Merge branch 'cloudflare:master' into tunnel-health

This commit is contained in:
Mads Jon Nielsen 2024-05-21 09:09:53 +02:00 committed by GitHub
commit e759716ce7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
453 changed files with 29346 additions and 18424 deletions

View File

@ -4,7 +4,7 @@ jobs:
check:
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:

View File

@ -3,6 +3,6 @@
cd /tmp
git clone -q https://github.com/cloudflare/go
cd go/src
# https://github.com/cloudflare/go/tree/34129e47042e214121b6bbff0ded4712debed18e is version go1.21.5-devel-cf
git checkout -q 34129e47042e214121b6bbff0ded4712debed18e
# https://github.com/cloudflare/go/tree/ec0a014545f180b0c74dfd687698657a9e86e310 is version go1.22.2-devel-cf
git checkout -q ec0a014545f180b0c74dfd687698657a9e86e310
./make.bash

View File

@ -9,8 +9,8 @@ Set-Location "$Env:Temp"
git clone -q https://github.com/cloudflare/go
Write-Output "Building go..."
cd go/src
# https://github.com/cloudflare/go/tree/34129e47042e214121b6bbff0ded4712debed18e is version go1.21.5-devel-cf
git checkout -q 34129e47042e214121b6bbff0ded4712debed18e
# https://github.com/cloudflare/go/tree/ec0a014545f180b0c74dfd687698657a9e86e310 is version go1.22.2-devel-cf
git checkout -q ec0a014545f180b0c74dfd687698657a9e86e310
& ./make.bat
Write-Output "Installed"

View File

@ -1,6 +1,6 @@
$ErrorActionPreference = "Stop"
$ProgressPreference = "SilentlyContinue"
$GoMsiVersion = "go1.21.5.windows-amd64.msi"
$GoMsiVersion = "go1.22.2.windows-amd64.msi"
Write-Output "Downloading go installer..."

View File

@ -1,7 +1,7 @@
# use a builder image for building cloudflare
ARG TARGET_GOOS
ARG TARGET_GOARCH
FROM golang:1.21.5 as builder
FROM golang:1.22.2 as builder
ENV GO111MODULE=on \
CGO_ENABLED=0 \
TARGET_GOOS=${TARGET_GOOS} \

View File

@ -1,5 +1,5 @@
# use a builder image for building cloudflare
FROM golang:1.21.5 as builder
FROM golang:1.22.2 as builder
ENV GO111MODULE=on \
CGO_ENABLED=0

View File

@ -1,5 +1,5 @@
# use a builder image for building cloudflare
FROM golang:1.21.5 as builder
FROM golang:1.22.2 as builder
ENV GO111MODULE=on \
CGO_ENABLED=0

View File

@ -251,17 +251,11 @@ github-windows-upload:
python3 github_release.py --path built_artifacts/cloudflared-windows-386.exe --release-version $(VERSION) --name cloudflared-windows-386.exe
python3 github_release.py --path built_artifacts/cloudflared-windows-386.msi --release-version $(VERSION) --name cloudflared-windows-386.msi
.PHONY: tunnelrpc-deps
tunnelrpc-deps:
.PHONY: capnp
capnp:
which capnp # https://capnproto.org/install.html
which capnpc-go # go install zombiezen.com/go/capnproto2/capnpc-go@latest
capnp compile -ogo tunnelrpc/tunnelrpc.capnp
.PHONY: quic-deps
quic-deps:
which capnp
which capnpc-go
capnp compile -ogo quic/schema/quic_metadata_protocol.capnp
capnp compile -ogo tunnelrpc/proto/tunnelrpc.capnp tunnelrpc/proto/quic_metadata_protocol.capnp
.PHONY: vet
vet:
@ -269,4 +263,4 @@ vet:
.PHONY: fmt
fmt:
goimports -l -w -local github.com/cloudflare/cloudflared $$(go list -mod=vendor -f '{{.Dir}}' -a ./... | fgrep -v tunnelrpc)
goimports -l -w -local github.com/cloudflare/cloudflared $$(go list -mod=vendor -f '{{.Dir}}' -a ./... | fgrep -v tunnelrpc/proto)

View File

@ -1,3 +1,13 @@
2024.5.0
- 2024-05-07 TUN-8407: Upgrade go to version 1.22.2
2024.4.1
- 2024-04-22 TUN-8380: Add sleep before requesting quick tunnel as temporary fix for component tests
- 2024-04-19 TUN-8374: Close UDP socket if registration fails
- 2024-04-18 TUN-8371: Bump quic-go to v0.42.0
- 2024-04-03 TUN-8333: Bump go-jose dependency to v4
- 2024-04-02 TUN-8331: Add unit testing for AccessJWTValidator middleware
2024.4.0
- 2024-04-02 feat: provide short version (#1206)
- 2024-04-02 Format code

View File

@ -1,4 +1,4 @@
pinned_go: &pinned_go go-boring=1.21.5-1
pinned_go: &pinned_go go-boring=1.22.2-1
build_dir: &build_dir /cfsetup_build
default-flavor: bullseye

View File

@ -78,8 +78,8 @@ const (
// hostKeyPath is the path of the dir to save SSH host keys too
hostKeyPath = "host-key-path"
// udpUnregisterSessionTimeout is how long we wait before we stop trying to unregister a UDP session from the edge
udpUnregisterSessionTimeoutFlag = "udp-unregister-session-timeout"
// rpcTimeout is how long to wait for a Capnp RPC request to the edge
rpcTimeout = "rpc-timeout"
// writeStreamTimeout sets if we should have a timeout when writing data to a stream towards the destination (edge/origin).
writeStreamTimeout = "write-stream-timeout"
@ -696,7 +696,7 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
Hidden: true,
}),
altsrc.NewDurationFlag(&cli.DurationFlag{
Name: udpUnregisterSessionTimeoutFlag,
Name: rpcTimeout,
Value: 5 * time.Second,
Hidden: true,
}),

View File

@ -246,7 +246,7 @@ func prepareTunnelConfig(
EdgeTLSConfigs: edgeTLSConfigs,
FeatureSelector: featureSelector,
MaxEdgeAddrRetries: uint8(c.Int("max-edge-addr-retries")),
UDPUnregisterSessionTimeout: c.Duration(udpUnregisterSessionTimeoutFlag),
RPCTimeout: c.Duration(rpcTimeout),
WriteStreamTimeout: c.Duration(writeStreamTimeout),
DisableQUICPathMTUDiscovery: c.Bool(quicDisablePathMTUDiscovery),
}

View File

@ -28,9 +28,11 @@ import (
"github.com/cloudflare/cloudflared/ingress"
"github.com/cloudflare/cloudflared/management"
"github.com/cloudflare/cloudflared/packet"
quicpogs "github.com/cloudflare/cloudflared/quic"
cfdquic "github.com/cloudflare/cloudflared/quic"
"github.com/cloudflare/cloudflared/tracing"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
rpcquic "github.com/cloudflare/cloudflared/tunnelrpc/quic"
)
const (
@ -59,14 +61,14 @@ type QUICConnection struct {
// sessionManager tracks active sessions. It receives datagrams from quic connection via datagramMuxer
sessionManager datagramsession.Manager
// datagramMuxer mux/demux datagrams from quic connection
datagramMuxer *quicpogs.DatagramMuxerV2
datagramMuxer *cfdquic.DatagramMuxerV2
packetRouter *ingress.PacketRouter
controlStreamHandler ControlStreamHandler
connOptions *tunnelpogs.ConnectionOptions
connIndex uint8
udpUnregisterTimeout time.Duration
streamWriteTimeout time.Duration
rpcTimeout time.Duration
streamWriteTimeout time.Duration
}
// NewQUICConnection returns a new instance of QUICConnection.
@ -82,7 +84,7 @@ func NewQUICConnection(
controlStreamHandler ControlStreamHandler,
logger *zerolog.Logger,
packetRouterConfig *ingress.GlobalRouterConfig,
udpUnregisterTimeout time.Duration,
rpcTimeout time.Duration,
streamWriteTimeout time.Duration,
) (*QUICConnection, error) {
udpConn, err := createUDPConnForConnIndex(connIndex, localAddr, logger)
@ -104,7 +106,7 @@ func NewQUICConnection(
}
sessionDemuxChan := make(chan *packet.Session, demuxChanCapacity)
datagramMuxer := quicpogs.NewDatagramMuxerV2(session, logger, sessionDemuxChan)
datagramMuxer := cfdquic.NewDatagramMuxerV2(session, logger, sessionDemuxChan)
sessionManager := datagramsession.NewManager(logger, datagramMuxer.SendToSession, sessionDemuxChan)
packetRouter := ingress.NewPacketRouter(packetRouterConfig, datagramMuxer, logger)
@ -118,7 +120,7 @@ func NewQUICConnection(
controlStreamHandler: controlStreamHandler,
connOptions: connOptions,
connIndex: connIndex,
udpUnregisterTimeout: udpUnregisterTimeout,
rpcTimeout: rpcTimeout,
streamWriteTimeout: streamWriteTimeout,
}, nil
}
@ -198,7 +200,7 @@ func (q *QUICConnection) acceptStream(ctx context.Context) error {
func (q *QUICConnection) runStream(quicStream quic.Stream) {
ctx := quicStream.Context()
stream := quicpogs.NewSafeStreamCloser(quicStream, q.streamWriteTimeout, q.logger)
stream := cfdquic.NewSafeStreamCloser(quicStream, q.streamWriteTimeout, q.logger)
defer stream.Close()
// we are going to fuse readers/writers from stream <- cloudflared -> origin, and we want to guarantee that
@ -206,7 +208,8 @@ func (q *QUICConnection) runStream(quicStream quic.Stream) {
// So, we wrap the stream with a no-op write closer and only this method can actually close write side of the stream.
// A call to close will simulate a close to the read-side, which will fail subsequent reads.
noCloseStream := &nopCloserReadWriter{ReadWriteCloser: stream}
if err := q.handleStream(ctx, noCloseStream); err != nil {
ss := rpcquic.NewCloudflaredServer(q.handleDataStream, q, q, q.rpcTimeout)
if err := ss.Serve(ctx, noCloseStream); err != nil {
q.logger.Debug().Err(err).Msg("Failed to handle QUIC stream")
// if we received an error at this level, then close write side of stream with an error, which will result in
@ -215,30 +218,7 @@ func (q *QUICConnection) runStream(quicStream quic.Stream) {
}
}
func (q *QUICConnection) handleStream(ctx context.Context, stream io.ReadWriteCloser) error {
signature, err := quicpogs.DetermineProtocol(stream)
if err != nil {
return err
}
switch signature {
case quicpogs.DataStreamProtocolSignature:
reqServerStream, err := quicpogs.NewRequestServerStream(stream, signature)
if err != nil {
return err
}
return q.handleDataStream(ctx, reqServerStream)
case quicpogs.RPCStreamProtocolSignature:
rpcStream, err := quicpogs.NewRPCServerStream(stream, signature)
if err != nil {
return err
}
return q.handleRPCStream(rpcStream)
default:
return fmt.Errorf("unknown protocol %v", signature)
}
}
func (q *QUICConnection) handleDataStream(ctx context.Context, stream *quicpogs.RequestServerStream) error {
func (q *QUICConnection) handleDataStream(ctx context.Context, stream *rpcquic.RequestServerStream) error {
request, err := stream.ReadConnectRequestData()
if err != nil {
return err
@ -264,22 +244,22 @@ func (q *QUICConnection) handleDataStream(ctx context.Context, stream *quicpogs.
// dispatchRequest will dispatch the request depending on the type and returns an error if it occurs.
// More importantly, it also tells if the during processing of the request the ConnectResponse metadata was sent downstream.
// This is important since it informs
func (q *QUICConnection) dispatchRequest(ctx context.Context, stream *quicpogs.RequestServerStream, err error, request *quicpogs.ConnectRequest) (error, bool) {
func (q *QUICConnection) dispatchRequest(ctx context.Context, stream *rpcquic.RequestServerStream, err error, request *pogs.ConnectRequest) (error, bool) {
originProxy, err := q.orchestrator.GetOriginProxy()
if err != nil {
return err, false
}
switch request.Type {
case quicpogs.ConnectionTypeHTTP, quicpogs.ConnectionTypeWebsocket:
case pogs.ConnectionTypeHTTP, pogs.ConnectionTypeWebsocket:
tracedReq, err := buildHTTPRequest(ctx, request, stream, q.connIndex, q.logger)
if err != nil {
return err, false
}
w := newHTTPResponseAdapter(stream)
return originProxy.ProxyHTTP(&w, tracedReq, request.Type == quicpogs.ConnectionTypeWebsocket), w.connectResponseSent
return originProxy.ProxyHTTP(&w, tracedReq, request.Type == pogs.ConnectionTypeWebsocket), w.connectResponseSent
case quicpogs.ConnectionTypeTCP:
case pogs.ConnectionTypeTCP:
rwa := &streamReadWriteAcker{RequestServerStream: stream}
metadata := request.MetadataMap()
return originProxy.ProxyTCP(ctx, rwa, &TCPRequest{
@ -293,14 +273,6 @@ func (q *QUICConnection) dispatchRequest(ctx context.Context, stream *quicpogs.R
}
}
func (q *QUICConnection) handleRPCStream(rpcStream *quicpogs.RPCServerStream) error {
if err := rpcStream.Serve(q, q, q.logger); err != nil {
q.logger.Err(err).Msg("failed handling RPC stream")
}
return nil
}
// RegisterUdpSession is the RPC method invoked by edge to register and run a session
func (q *QUICConnection) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeAfterIdleHint time.Duration, traceContext string) (*tunnelpogs.RegisterUdpSessionResponse, error) {
traceCtx := tracing.NewTracedContext(ctx, traceContext, q.logger)
@ -377,9 +349,9 @@ func (q *QUICConnection) closeUDPSession(ctx context.Context, sessionID uuid.UUI
return
}
stream := quicpogs.NewSafeStreamCloser(quicStream, q.streamWriteTimeout, q.logger)
stream := cfdquic.NewSafeStreamCloser(quicStream, q.streamWriteTimeout, q.logger)
defer stream.Close()
rpcClientStream, err := quicpogs.NewRPCClientStream(ctx, stream, q.udpUnregisterTimeout, q.logger)
rpcClientStream, err := rpcquic.NewSessionClient(ctx, stream, q.rpcTimeout)
if err != nil {
// Log this at debug because this is not an error if session was closed due to lost connection
// with edge
@ -408,16 +380,16 @@ func (q *QUICConnection) UpdateConfiguration(ctx context.Context, version int32,
// streamReadWriteAcker is a light wrapper over QUIC streams with a callback to send response back to
// the client.
type streamReadWriteAcker struct {
*quicpogs.RequestServerStream
*rpcquic.RequestServerStream
connectResponseSent bool
}
// AckConnection acks response back to the proxy.
func (s *streamReadWriteAcker) AckConnection(tracePropagation string) error {
metadata := []quicpogs.Metadata{}
metadata := []pogs.Metadata{}
// Only add tracing if provided by origintunneld
if tracePropagation != "" {
metadata = append(metadata, quicpogs.Metadata{
metadata = append(metadata, pogs.Metadata{
Key: tracing.CanonicalCloudflaredTracingHeader,
Val: tracePropagation,
})
@ -428,12 +400,12 @@ func (s *streamReadWriteAcker) AckConnection(tracePropagation string) error {
// httpResponseAdapter translates responses written by the HTTP Proxy into ones that can be used in QUIC.
type httpResponseAdapter struct {
*quicpogs.RequestServerStream
*rpcquic.RequestServerStream
headers http.Header
connectResponseSent bool
}
func newHTTPResponseAdapter(s *quicpogs.RequestServerStream) httpResponseAdapter {
func newHTTPResponseAdapter(s *rpcquic.RequestServerStream) httpResponseAdapter {
return httpResponseAdapter{RequestServerStream: s, headers: make(http.Header)}
}
@ -442,12 +414,12 @@ func (hrw *httpResponseAdapter) AddTrailer(trailerName, trailerValue string) {
}
func (hrw *httpResponseAdapter) WriteRespHeaders(status int, header http.Header) error {
metadata := make([]quicpogs.Metadata, 0)
metadata = append(metadata, quicpogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(status)})
metadata := make([]pogs.Metadata, 0)
metadata = append(metadata, pogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(status)})
for k, vv := range header {
for _, v := range vv {
httpHeaderKey := fmt.Sprintf("%s:%s", HTTPHeaderKey, k)
metadata = append(metadata, quicpogs.Metadata{Key: httpHeaderKey, Val: v})
metadata = append(metadata, pogs.Metadata{Key: httpHeaderKey, Val: v})
}
}
@ -483,17 +455,17 @@ func (hrw *httpResponseAdapter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
}
func (hrw *httpResponseAdapter) WriteErrorResponse(err error) {
hrw.WriteConnectResponseData(err, quicpogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(http.StatusBadGateway)})
hrw.WriteConnectResponseData(err, pogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(http.StatusBadGateway)})
}
func (hrw *httpResponseAdapter) WriteConnectResponseData(respErr error, metadata ...quicpogs.Metadata) error {
func (hrw *httpResponseAdapter) WriteConnectResponseData(respErr error, metadata ...pogs.Metadata) error {
hrw.connectResponseSent = true
return hrw.RequestServerStream.WriteConnectResponseData(respErr, metadata...)
}
func buildHTTPRequest(
ctx context.Context,
connectRequest *quicpogs.ConnectRequest,
connectRequest *pogs.ConnectRequest,
body io.ReadCloser,
connIndex uint8,
log *zerolog.Logger,
@ -502,7 +474,7 @@ func buildHTTPRequest(
dest := connectRequest.Dest
method := metadata[HTTPMethodKey]
host := metadata[HTTPHostKey]
isWebsocket := connectRequest.Type == quicpogs.ConnectionTypeWebsocket
isWebsocket := connectRequest.Type == pogs.ConnectionTypeWebsocket
req, err := http.NewRequestWithContext(ctx, method, dest, body)
if err != nil {
@ -597,11 +569,11 @@ func (np *nopCloserReadWriter) Close() error {
// muxerWrapper wraps DatagramMuxerV2 to satisfy the packet.FunnelUniPipe interface
type muxerWrapper struct {
muxer *quicpogs.DatagramMuxerV2
muxer *cfdquic.DatagramMuxerV2
}
func (rp *muxerWrapper) SendPacket(dst netip.Addr, pk packet.RawPacket) error {
return rp.muxer.SendPacket(quicpogs.RawPacket(pk))
return rp.muxer.SendPacket(cfdquic.RawPacket(pk))
}
func (rp *muxerWrapper) ReceivePacket(ctx context.Context) (packet.RawPacket, error) {
@ -609,7 +581,7 @@ func (rp *muxerWrapper) ReceivePacket(ctx context.Context) (packet.RawPacket, er
if err != nil {
return packet.RawPacket{}, err
}
rawPacket, ok := pk.(quicpogs.RawPacket)
rawPacket, ok := pk.(cfdquic.RawPacket)
if ok {
return packet.RawPacket(rawPacket), nil
}

View File

@ -3,9 +3,14 @@ package connection
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
@ -23,14 +28,15 @@ import (
"github.com/stretchr/testify/require"
"github.com/cloudflare/cloudflared/datagramsession"
quicpogs "github.com/cloudflare/cloudflared/quic"
cfdquic "github.com/cloudflare/cloudflared/quic"
"github.com/cloudflare/cloudflared/tracing"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
rpcquic "github.com/cloudflare/cloudflared/tunnelrpc/quic"
)
var (
testTLSServerConfig = quicpogs.GenerateTLSConfig()
testTLSServerConfig = GenerateTLSConfig()
testQUICConfig = &quic.Config{
KeepAlivePeriod: 5 * time.Second,
EnableDatagrams: true,
@ -50,16 +56,16 @@ func TestQUICServer(t *testing.T) {
var tests = []struct {
desc string
dest string
connectionType quicpogs.ConnectionType
metadata []quicpogs.Metadata
connectionType pogs.ConnectionType
metadata []pogs.Metadata
message []byte
expectedResponse []byte
}{
{
desc: "test http proxy",
dest: "/ok",
connectionType: quicpogs.ConnectionTypeHTTP,
metadata: []quicpogs.Metadata{
connectionType: pogs.ConnectionTypeHTTP,
metadata: []pogs.Metadata{
{
Key: "HttpHeader:Cf-Ray",
Val: "123123123",
@ -78,8 +84,8 @@ func TestQUICServer(t *testing.T) {
{
desc: "test http body request streaming",
dest: "/slow_echo_body",
connectionType: quicpogs.ConnectionTypeHTTP,
metadata: []quicpogs.Metadata{
connectionType: pogs.ConnectionTypeHTTP,
metadata: []pogs.Metadata{
{
Key: "HttpHeader:Cf-Ray",
Val: "123123123",
@ -103,8 +109,8 @@ func TestQUICServer(t *testing.T) {
{
desc: "test ws proxy",
dest: "/ws/echo",
connectionType: quicpogs.ConnectionTypeWebsocket,
metadata: []quicpogs.Metadata{
connectionType: pogs.ConnectionTypeWebsocket,
metadata: []pogs.Metadata{
{
Key: "HttpHeader:Cf-Cloudflared-Proxy-Connection-Upgrade",
Val: "Websocket",
@ -127,8 +133,8 @@ func TestQUICServer(t *testing.T) {
},
{
desc: "test tcp proxy",
connectionType: quicpogs.ConnectionTypeTCP,
metadata: []quicpogs.Metadata{},
connectionType: pogs.ConnectionTypeTCP,
metadata: []pogs.Metadata{},
message: []byte("Here is some tcp data"),
expectedResponse: []byte("Here is some tcp data"),
},
@ -175,7 +181,7 @@ type fakeControlStream struct {
ControlStreamHandler
}
func (fakeControlStream) ServeControlStream(ctx context.Context, rw io.ReadWriteCloser, connOptions *tunnelpogs.ConnectionOptions, tunnelConfigGetter TunnelConfigJSONGetter) error {
func (fakeControlStream) ServeControlStream(ctx context.Context, rw io.ReadWriteCloser, connOptions *pogs.ConnectionOptions, tunnelConfigGetter TunnelConfigJSONGetter) error {
<-ctx.Done()
return nil
}
@ -188,8 +194,8 @@ func quicServer(
t *testing.T,
listener *quic.Listener,
dest string,
connectionType quicpogs.ConnectionType,
metadata []quicpogs.Metadata,
connectionType pogs.ConnectionType,
metadata []pogs.Metadata,
message []byte,
expectedResponse []byte,
) {
@ -198,9 +204,9 @@ func quicServer(
quicStream, err := session.OpenStreamSync(context.Background())
require.NoError(t, err)
stream := quicpogs.NewSafeStreamCloser(quicStream, defaultQUICTimeout, &log)
stream := cfdquic.NewSafeStreamCloser(quicStream, defaultQUICTimeout, &log)
reqClientStream := quicpogs.RequestClientStream{ReadWriteCloser: stream}
reqClientStream := rpcquic.RequestClientStream{ReadWriteCloser: stream}
err = reqClientStream.WriteConnectRequestData(dest, connectionType, metadata...)
require.NoError(t, err)
@ -265,15 +271,15 @@ func (moc *mockOriginProxyWithRequest) ProxyHTTP(w ResponseWriter, tr *tracing.T
func TestBuildHTTPRequest(t *testing.T) {
var tests = []struct {
name string
connectRequest *quicpogs.ConnectRequest
connectRequest *pogs.ConnectRequest
body io.ReadCloser
req *http.Request
}{
{
name: "check if http.Request is built correctly with content length",
connectRequest: &quicpogs.ConnectRequest{
connectRequest: &pogs.ConnectRequest{
Dest: "http://test.com",
Metadata: []quicpogs.Metadata{
Metadata: []pogs.Metadata{
{
Key: "HttpHeader:Cf-Cloudflared-Proxy-Connection-Upgrade",
Val: "Websocket",
@ -317,9 +323,9 @@ func TestBuildHTTPRequest(t *testing.T) {
},
{
name: "if content length isn't part of request headers, then it's not set",
connectRequest: &quicpogs.ConnectRequest{
connectRequest: &pogs.ConnectRequest{
Dest: "http://test.com",
Metadata: []quicpogs.Metadata{
Metadata: []pogs.Metadata{
{
Key: "HttpHeader:Cf-Cloudflared-Proxy-Connection-Upgrade",
Val: "Websocket",
@ -358,9 +364,9 @@ func TestBuildHTTPRequest(t *testing.T) {
},
{
name: "if content length is 0, but transfer-encoding is chunked, body is not nil",
connectRequest: &quicpogs.ConnectRequest{
connectRequest: &pogs.ConnectRequest{
Dest: "http://test.com",
Metadata: []quicpogs.Metadata{
Metadata: []pogs.Metadata{
{
Key: "HttpHeader:Another-Header",
Val: "Misc",
@ -400,9 +406,9 @@ func TestBuildHTTPRequest(t *testing.T) {
},
{
name: "if content length is 0, but transfer-encoding is gzip,chunked, body is not nil",
connectRequest: &quicpogs.ConnectRequest{
connectRequest: &pogs.ConnectRequest{
Dest: "http://test.com",
Metadata: []quicpogs.Metadata{
Metadata: []pogs.Metadata{
{
Key: "HttpHeader:Another-Header",
Val: "Misc",
@ -442,10 +448,10 @@ func TestBuildHTTPRequest(t *testing.T) {
},
{
name: "if content length is 0, and connect request is a websocket, body is not nil",
connectRequest: &quicpogs.ConnectRequest{
Type: quicpogs.ConnectionTypeWebsocket,
connectRequest: &pogs.ConnectRequest{
Type: pogs.ConnectionTypeWebsocket,
Dest: "http://test.com",
Metadata: []quicpogs.Metadata{
Metadata: []pogs.Metadata{
{
Key: "HttpHeader:Another-Header",
Val: "Misc",
@ -617,9 +623,9 @@ func serveSession(ctx context.Context, qc *QUICConnection, edgeQUICSession quic.
}()
// Send a message to the quic session on edge side, it should be deumx to this datagram v2 session
muxedPayload, err := quicpogs.SuffixSessionID(sessionID, payload)
muxedPayload, err := cfdquic.SuffixSessionID(sessionID, payload)
require.NoError(t, err)
muxedPayload, err = quicpogs.SuffixType(muxedPayload, quicpogs.DatagramTypeUDP)
muxedPayload, err = cfdquic.SuffixType(muxedPayload, cfdquic.DatagramTypeUDP)
require.NoError(t, err)
err = edgeQUICSession.SendDatagram(muxedPayload)
@ -665,7 +671,7 @@ const (
closedByTimeout
)
func runRPCServer(ctx context.Context, session quic.Connection, sessionRPCServer tunnelpogs.SessionManager, configRPCServer tunnelpogs.ConfigurationManager, t *testing.T) {
func runRPCServer(ctx context.Context, session quic.Connection, sessionRPCServer pogs.SessionManager, configRPCServer pogs.ConfigurationManager, t *testing.T) {
stream, err := session.AcceptStream(ctx)
require.NoError(t, err)
@ -674,13 +680,15 @@ func runRPCServer(ctx context.Context, session quic.Connection, sessionRPCServer
stream, err = session.AcceptStream(ctx)
require.NoError(t, err)
}
protocol, err := quicpogs.DetermineProtocol(stream)
assert.NoError(t, err)
rpcServerStream, err := quicpogs.NewRPCServerStream(stream, protocol)
assert.NoError(t, err)
log := zerolog.New(os.Stdout)
err = rpcServerStream.Serve(sessionRPCServer, configRPCServer, &log)
ss := rpcquic.NewCloudflaredServer(
func(_ context.Context, _ *rpcquic.RequestServerStream) error {
return nil
},
sessionRPCServer,
configRPCServer,
10*time.Second,
)
err = ss.Serve(ctx, stream)
assert.NoError(t, err)
}
@ -726,7 +734,7 @@ func testQUICConnection(udpListenerAddr net.Addr, t *testing.T, index uint8) *QU
fakeControlStream{},
&log,
nil,
5*time.Second,
15*time.Second,
0*time.Second,
)
require.NoError(t, err)
@ -744,3 +752,27 @@ func (m *mockReaderNoopWriter) Write(p []byte) (n int, err error) {
func (m *mockReaderNoopWriter) Close() error {
return nil
}
// GenerateTLSConfig sets up a bare-bones TLS config for a QUIC server
func GenerateTLSConfig() *tls.Config {
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
panic(err)
}
template := x509.Certificate{SerialNumber: big.NewInt(1)}
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
if err != nil {
panic(err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
panic(err)
}
return &tls.Config{
Certificates: []tls.Certificate{tlsCert},
NextProtos: []string{"argotunnel"},
}
}

View File

@ -9,7 +9,6 @@ import (
"github.com/rs/zerolog"
"zombiezen.com/go/capnproto2/rpc"
"github.com/cloudflare/cloudflared/tunnelrpc"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
@ -25,11 +24,8 @@ func NewTunnelServerClient(
stream io.ReadWriteCloser,
log *zerolog.Logger,
) *tunnelServerClient {
transport := tunnelrpc.NewTransportLogger(log, rpc.StreamTransport(stream))
conn := rpc.NewConn(
transport,
tunnelrpc.ConnLog(log),
)
transport := rpc.StreamTransport(stream)
conn := rpc.NewConn(transport)
registrationClient := tunnelpogs.RegistrationServer_PogsClient{Client: conn.Bootstrap(ctx), Conn: conn}
return &tunnelServerClient{
client: tunnelpogs.TunnelServer_PogsClient{RegistrationServer_PogsClient: registrationClient, Client: conn.Bootstrap(ctx), Conn: conn},
@ -79,11 +75,8 @@ func newRegistrationRPCClient(
stream io.ReadWriteCloser,
log *zerolog.Logger,
) NamedTunnelRPCClient {
transport := tunnelrpc.NewTransportLogger(log, rpc.StreamTransport(stream))
conn := rpc.NewConn(
transport,
tunnelrpc.ConnLog(log),
)
transport := rpc.StreamTransport(stream)
conn := rpc.NewConn(transport)
return &registrationServerClient{
client: tunnelpogs.RegistrationServer_PogsClient{Client: conn.Bootstrap(ctx), Conn: conn},
transport: transport,

View File

@ -1,4 +1,4 @@
FROM golang:1.21.5 as builder
FROM golang:1.22.2 as builder
ENV GO111MODULE=on \
CGO_ENABLED=0
WORKDIR /go/src/github.com/cloudflare/cloudflared/

46
go.mod
View File

@ -1,6 +1,6 @@
module github.com/cloudflare/cloudflared
go 1.21
go 1.22
require (
github.com/coredns/coredns v1.10.0
@ -16,7 +16,7 @@ require (
github.com/gobwas/ws v1.0.4
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
github.com/google/gopacket v1.1.19
github.com/google/uuid v1.3.1
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.4.2
github.com/json-iterator/go v1.1.12
github.com/mattn/go-colorable v0.1.13
@ -27,21 +27,21 @@ require (
github.com/prometheus/client_model v0.2.0
github.com/quic-go/quic-go v0.42.0
github.com/rs/zerolog v1.20.0
github.com/stretchr/testify v1.8.4
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.3.0
go.opentelemetry.io/contrib/propagators v0.22.0
go.opentelemetry.io/otel v1.21.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
go.opentelemetry.io/otel/sdk v1.21.0
go.opentelemetry.io/otel/trace v1.21.0
go.opentelemetry.io/proto/otlp v1.0.0
go.opentelemetry.io/otel v1.26.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0
go.opentelemetry.io/otel/sdk v1.26.0
go.opentelemetry.io/otel/trace v1.26.0
go.opentelemetry.io/proto/otlp v1.2.0
go.uber.org/automaxprocs v1.4.0
golang.org/x/crypto v0.21.0
golang.org/x/net v0.21.0
golang.org/x/sync v0.4.0
golang.org/x/sys v0.18.0
golang.org/x/term v0.18.0
google.golang.org/protobuf v1.31.0
golang.org/x/crypto v0.23.0
golang.org/x/net v0.25.0
golang.org/x/sync v0.6.0
golang.org/x/sys v0.20.0
golang.org/x/term v0.20.0
google.golang.org/protobuf v1.34.1
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1
nhooyr.io/websocket v1.8.7
@ -61,14 +61,14 @@ require (
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect
github.com/gobwas/pool v0.2.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/klauspost/compress v1.15.11 // indirect
github.com/kr/text v0.2.0 // indirect
@ -83,17 +83,17 @@ require (
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.26.0 // indirect
go.uber.org/mock v0.4.0 // indirect
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/oauth2 v0.13.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect
golang.org/x/text v0.15.0 // indirect
golang.org/x/tools v0.9.1 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/grpc v1.60.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/grpc v1.63.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

94
go.sum
View File

@ -119,8 +119,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
@ -152,8 +152,6 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -180,8 +178,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -212,15 +210,15 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -344,8 +342,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
@ -366,19 +364,19 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8=
go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU=
go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs=
go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs=
go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30=
go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4=
go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8=
go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs=
go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA=
go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0=
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
@ -390,8 +388,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -463,8 +461,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -472,8 +470,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -485,8 +483,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -533,12 +531,12 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -547,8 +545,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -656,12 +654,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -674,8 +672,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k=
google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8=
google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -688,8 +686,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

12
quic/constants.go Normal file
View File

@ -0,0 +1,12 @@
package quic
import "time"
const (
HandshakeIdleTimeout = 5 * time.Second
MaxIdleTimeout = 5 * time.Second
MaxIdlePingPeriod = 1 * time.Second
// MaxIncomingStreams is 2^60, which is the maximum supported value by Quic-Go
MaxIncomingStreams = 1 << 60
)

View File

@ -1,274 +0,0 @@
package quic
import (
"context"
"fmt"
"io"
"net"
"time"
capnp "zombiezen.com/go/capnproto2"
"zombiezen.com/go/capnproto2/rpc"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/cloudflare/cloudflared/tunnelrpc"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
// ProtocolSignature defines the first 6 bytes of the stream, which is used to distinguish the type of stream. It
// ensures whoever performs a handshake does not write data before writing the metadata.
type ProtocolSignature [6]byte
var (
// DataStreamProtocolSignature is a custom protocol signature for data stream
DataStreamProtocolSignature = ProtocolSignature{0x0A, 0x36, 0xCD, 0x12, 0xA1, 0x3E}
// RPCStreamProtocolSignature is a custom protocol signature for RPC stream
RPCStreamProtocolSignature = ProtocolSignature{0x52, 0xBB, 0x82, 0x5C, 0xDB, 0x65}
)
type protocolVersion string
const (
protocolV1 protocolVersion = "01"
protocolVersionLength = 2
HandshakeIdleTimeout = 5 * time.Second
MaxIdleTimeout = 5 * time.Second
MaxIdlePingPeriod = 1 * time.Second
// MaxIncomingStreams is 2^60, which is the maximum supported value by Quic-Go
MaxIncomingStreams = 1 << 60
)
// RequestServerStream is a stream to serve requests
type RequestServerStream struct {
io.ReadWriteCloser
}
func NewRequestServerStream(stream io.ReadWriteCloser, signature ProtocolSignature) (*RequestServerStream, error) {
if signature != DataStreamProtocolSignature {
return nil, fmt.Errorf("RequestClientStream can only be created from data stream")
}
return &RequestServerStream{stream}, nil
}
// ReadConnectRequestData reads the handshake data from a QUIC stream.
func (rss *RequestServerStream) ReadConnectRequestData() (*ConnectRequest, error) {
// This is a NO-OP for now. We could cause a branching if we wanted to use multiple versions.
if _, err := readVersion(rss); err != nil {
return nil, err
}
msg, err := capnp.NewDecoder(rss).Decode()
if err != nil {
return nil, err
}
r := &ConnectRequest{}
if err := r.fromPogs(msg); err != nil {
return nil, err
}
return r, nil
}
// WriteConnectResponseData writes response to a QUIC stream.
func (rss *RequestServerStream) WriteConnectResponseData(respErr error, metadata ...Metadata) error {
var connectResponse *ConnectResponse
if respErr != nil {
connectResponse = &ConnectResponse{
Error: respErr.Error(),
}
} else {
connectResponse = &ConnectResponse{
Metadata: metadata,
}
}
msg, err := connectResponse.toPogs()
if err != nil {
return err
}
if err := writeDataStreamPreamble(rss); err != nil {
return err
}
return capnp.NewEncoder(rss).Encode(msg)
}
type RequestClientStream struct {
io.ReadWriteCloser
}
// WriteConnectRequestData writes requestMeta to a stream.
func (rcs *RequestClientStream) WriteConnectRequestData(dest string, connectionType ConnectionType, metadata ...Metadata) error {
connectRequest := &ConnectRequest{
Dest: dest,
Type: connectionType,
Metadata: metadata,
}
msg, err := connectRequest.toPogs()
if err != nil {
return err
}
if err := writeDataStreamPreamble(rcs); err != nil {
return err
}
return capnp.NewEncoder(rcs).Encode(msg)
}
// ReadConnectResponseData reads the response to a RequestMeta in a stream.
func (rcs *RequestClientStream) ReadConnectResponseData() (*ConnectResponse, error) {
signature, err := DetermineProtocol(rcs)
if err != nil {
return nil, err
}
if signature != DataStreamProtocolSignature {
return nil, fmt.Errorf("wrong protocol signature %v", signature)
}
// This is a NO-OP for now. We could cause a branching if we wanted to use multiple versions.
if _, err := readVersion(rcs); err != nil {
return nil, err
}
msg, err := capnp.NewDecoder(rcs).Decode()
if err != nil {
return nil, err
}
r := &ConnectResponse{}
if err := r.fromPogs(msg); err != nil {
return nil, err
}
return r, nil
}
// RPCServerStream is a stream to serve RPCs. It is closed when the RPC client is done
type RPCServerStream struct {
io.ReadWriteCloser
}
func NewRPCServerStream(stream io.ReadWriteCloser, protocol ProtocolSignature) (*RPCServerStream, error) {
if protocol != RPCStreamProtocolSignature {
return nil, fmt.Errorf("RPCStream can only be created from rpc stream")
}
return &RPCServerStream{stream}, nil
}
func (s *RPCServerStream) Serve(sessionManager tunnelpogs.SessionManager, configManager tunnelpogs.ConfigurationManager, logger *zerolog.Logger) error {
// RPC logs are very robust, create a new logger that only logs error to reduce noise
rpcLogger := logger.Level(zerolog.ErrorLevel)
rpcTransport := tunnelrpc.NewTransportLogger(&rpcLogger, rpc.StreamTransport(s))
defer rpcTransport.Close()
main := tunnelpogs.CloudflaredServer_ServerToClient(sessionManager, configManager)
rpcConn := rpc.NewConn(
rpcTransport,
rpc.MainInterface(main.Client),
tunnelrpc.ConnLog(&rpcLogger),
)
defer rpcConn.Close()
return rpcConn.Wait()
}
func DetermineProtocol(stream io.Reader) (ProtocolSignature, error) {
signature, err := readSignature(stream)
if err != nil {
return ProtocolSignature{}, err
}
switch signature {
case DataStreamProtocolSignature:
return DataStreamProtocolSignature, nil
case RPCStreamProtocolSignature:
return RPCStreamProtocolSignature, nil
default:
return ProtocolSignature{}, fmt.Errorf("unknown signature %v", signature)
}
}
func writeDataStreamPreamble(stream io.Writer) error {
if err := writeSignature(stream, DataStreamProtocolSignature); err != nil {
return err
}
return writeVersion(stream)
}
func writeVersion(stream io.Writer) error {
_, err := stream.Write([]byte(protocolV1)[:protocolVersionLength])
return err
}
func readVersion(stream io.Reader) (string, error) {
version := make([]byte, protocolVersionLength)
_, err := stream.Read(version)
return string(version), err
}
func readSignature(stream io.Reader) (ProtocolSignature, error) {
var signature ProtocolSignature
if _, err := io.ReadFull(stream, signature[:]); err != nil {
return ProtocolSignature{}, err
}
return signature, nil
}
func writeSignature(stream io.Writer, signature ProtocolSignature) error {
_, err := stream.Write(signature[:])
return err
}
// RPCClientStream is a stream to call methods of SessionManager
type RPCClientStream struct {
client tunnelpogs.CloudflaredServer_PogsClient
transport rpc.Transport
// Time we wait for the server to respond to a request before we close the connection.
rpcUnregisterUDPSessionDeadline time.Duration
}
func NewRPCClientStream(ctx context.Context, stream io.ReadWriteCloser, rpcUnregisterUDPSessionDeadline time.Duration, logger *zerolog.Logger) (*RPCClientStream, error) {
n, err := stream.Write(RPCStreamProtocolSignature[:])
if err != nil {
return nil, err
}
if n != len(RPCStreamProtocolSignature) {
return nil, fmt.Errorf("expect to write %d bytes for RPC stream protocol signature, wrote %d", len(RPCStreamProtocolSignature), n)
}
transport := tunnelrpc.NewTransportLogger(logger, rpc.StreamTransport(stream))
conn := rpc.NewConn(
transport,
tunnelrpc.ConnLog(logger),
)
return &RPCClientStream{
client: tunnelpogs.NewCloudflaredServer_PogsClient(conn.Bootstrap(ctx), conn),
transport: transport,
rpcUnregisterUDPSessionDeadline: rpcUnregisterUDPSessionDeadline,
}, nil
}
func (rcs *RPCClientStream) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeIdleAfterHint time.Duration, traceContext string) (*tunnelpogs.RegisterUdpSessionResponse, error) {
return rcs.client.RegisterUdpSession(ctx, sessionID, dstIP, dstPort, closeIdleAfterHint, traceContext)
}
func (rcs *RPCClientStream) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, message string) error {
ctx, cancel := context.WithTimeout(ctx, rcs.rpcUnregisterUDPSessionDeadline)
defer cancel()
return rcs.client.UnregisterUdpSession(ctx, sessionID, message)
}
func (rcs *RPCClientStream) UpdateConfiguration(ctx context.Context, version int32, config []byte) (*tunnelpogs.UpdateConfigurationResponse, error) {
return rcs.client.UpdateConfiguration(ctx, version, config)
}
func (rcs *RPCClientStream) Close() {
_ = rcs.client.Close()
_ = rcs.transport.Close()
}

View File

@ -2,8 +2,13 @@ package quic
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"io"
"math/big"
"net"
"sync"
"testing"
@ -147,3 +152,27 @@ func serverRoundTrip(t *testing.T, stream io.ReadWriteCloser, mustWork bool) {
}
require.NoError(t, err)
}
// GenerateTLSConfig sets up a bare-bones TLS config for a QUIC server
func GenerateTLSConfig() *tls.Config {
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
panic(err)
}
template := x509.Certificate{SerialNumber: big.NewInt(1)}
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
if err != nil {
panic(err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
panic(err)
}
return &tls.Config{
Certificates: []tls.Certificate{tlsCert},
NextProtos: []string{"argotunnel"},
}
}

View File

@ -1,28 +0,0 @@
using Go = import "/go.capnp";
@0xb29021ef7421cc32;
$Go.package("schema");
$Go.import("schema");
struct ConnectRequest{
dest @0 :Text;
type @1 :ConnectionType;
metadata @2 :List(Metadata);
}
enum ConnectionType{
http @0;
websocket @1;
tcp @2;
}
struct Metadata {
key @0 :Text;
val @1 :Text;
}
struct ConnectResponse{
error @0 :Text;
metadata @1 :List(Metadata);
}

View File

@ -1,34 +0,0 @@
package quic
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"math/big"
)
// GenerateTLSConfig sets up a bare-bones TLS config for a QUIC server
func GenerateTLSConfig() *tls.Config {
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
panic(err)
}
template := x509.Certificate{SerialNumber: big.NewInt(1)}
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
if err != nil {
panic(err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
panic(err)
}
return &tls.Config{
Certificates: []tls.Certificate{tlsCert},
NextProtos: []string{"argotunnel"},
}
}

View File

@ -27,8 +27,8 @@ import (
quicpogs "github.com/cloudflare/cloudflared/quic"
"github.com/cloudflare/cloudflared/retry"
"github.com/cloudflare/cloudflared/signal"
"github.com/cloudflare/cloudflared/tunnelrpc"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
"github.com/cloudflare/cloudflared/tunnelstate"
)
@ -65,8 +65,8 @@ type TunnelConfig struct {
EdgeTLSConfigs map[connection.Protocol]*tls.Config
PacketConfig *ingress.GlobalRouterConfig
UDPUnregisterSessionTimeout time.Duration
WriteStreamTimeout time.Duration
RPCTimeout time.Duration
WriteStreamTimeout time.Duration
DisableQUICPathMTUDiscovery bool
@ -74,9 +74,9 @@ type TunnelConfig struct {
}
func (c *TunnelConfig) registrationOptions(connectionID uint8, OriginLocalIP string, uuid uuid.UUID) *tunnelpogs.RegistrationOptions {
policy := tunnelrpc.ExistingTunnelPolicy_balance
policy := proto.ExistingTunnelPolicy_balance
if c.HAConnections <= 1 && c.LBPool == "" {
policy = tunnelrpc.ExistingTunnelPolicy_disconnect
policy = proto.ExistingTunnelPolicy_disconnect
}
return &tunnelpogs.RegistrationOptions{
ClientID: c.ClientID,
@ -614,7 +614,7 @@ func (e *EdgeTunnelServer) serveQUIC(
controlStreamHandler,
connLogger.Logger(),
e.config.PacketConfig,
e.config.UDPUnregisterSessionTimeout,
e.config.RPCTimeout,
e.config.WriteStreamTimeout,
)
if err != nil {

View File

@ -1,15 +0,0 @@
# Generate go.capnp.out with:
# capnp compile -o- go.capnp > go.capnp.out
# Must run inside this directory to preserve paths.
@0xd12a1c51fedd6c88;
annotation package(file) :Text;
annotation import(file) :Text;
annotation doc(struct, field, enum) :Text;
annotation tag(enumerant) :Text;
annotation notag(enumerant) :Void;
annotation customtype(field) :Text;
annotation name(struct, field, union, enum, enumerant, interface, method, param, annotation, const, group) :Text;
$package("capnp");

View File

@ -1,43 +0,0 @@
package tunnelrpc
import (
"context"
"github.com/rs/zerolog"
"golang.org/x/net/trace"
"zombiezen.com/go/capnproto2/rpc"
)
// ConnLogger wraps a Zerolog Logger for a connection.
type ConnLogger struct {
Log *zerolog.Logger
}
func (c ConnLogger) Infof(ctx context.Context, format string, args ...interface{}) {
c.Log.Info().Msgf(format, args...)
}
func (c ConnLogger) Errorf(ctx context.Context, format string, args ...interface{}) {
c.Log.Error().Msgf(format, args...)
}
func ConnLog(log *zerolog.Logger) rpc.ConnOption {
return rpc.ConnLog(ConnLogger{log})
}
// ConnTracer wraps a trace.EventLog for a connection.
type ConnTracer struct {
Events trace.EventLog
}
func (c ConnTracer) Infof(ctx context.Context, format string, args ...interface{}) {
c.Events.Printf(format, args...)
}
func (c ConnTracer) Errorf(ctx context.Context, format string, args ...interface{}) {
c.Events.Errorf(format, args...)
}
func ConnTrace(events trace.EventLog) rpc.ConnOption {
return rpc.ConnLog(ConnTracer{events})
}

View File

@ -1,45 +0,0 @@
// Package logtransport provides a transport that logs all of its messages.
package tunnelrpc
import (
"bytes"
"context"
"github.com/rs/zerolog"
"zombiezen.com/go/capnproto2/encoding/text"
"zombiezen.com/go/capnproto2/rpc"
rpccapnp "zombiezen.com/go/capnproto2/std/capnp/rpc"
)
type transport struct {
rpc.Transport
log *zerolog.Logger
}
// NewTransportLogger creates a new logger that proxies messages to and from t and
// logs them to log. If log is nil, then the log package's default
// logger is used.
func NewTransportLogger(log *zerolog.Logger, t rpc.Transport) rpc.Transport {
return &transport{Transport: t, log: log}
}
func (t *transport) SendMessage(ctx context.Context, msg rpccapnp.Message) error {
t.log.Trace().Msgf("rpc tx: %s", formatMsg(msg))
return t.Transport.SendMessage(ctx, msg)
}
func (t *transport) RecvMessage(ctx context.Context) (rpccapnp.Message, error) {
msg, err := t.Transport.RecvMessage(ctx)
if err != nil {
t.log.Debug().Msgf("rpc rx error: %s", err)
return msg, err
}
t.log.Trace().Msgf("rpc rx: %s", formatMsg(msg))
return msg, nil
}
func formatMsg(m rpccapnp.Message) string {
var buf bytes.Buffer
_ = text.NewEncoder(&buf).Encode(0x91b79f1f808db032, m.Struct)
return buf.String()
}

View File

@ -6,10 +6,10 @@ import (
"zombiezen.com/go/capnproto2/pogs"
"zombiezen.com/go/capnproto2/server"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
func (i TunnelServer_PogsImpl) Authenticate(p tunnelrpc.TunnelServer_authenticate) error {
func (i TunnelServer_PogsImpl) Authenticate(p proto.TunnelServer_authenticate) error {
originCert, err := p.Params.OriginCert()
if err != nil {
return err
@ -39,13 +39,13 @@ func (i TunnelServer_PogsImpl) Authenticate(p tunnelrpc.TunnelServer_authenticat
return MarshalAuthenticateResponse(result, resp)
}
func MarshalAuthenticateResponse(s tunnelrpc.AuthenticateResponse, p *AuthenticateResponse) error {
return pogs.Insert(tunnelrpc.AuthenticateResponse_TypeID, s.Struct, p)
func MarshalAuthenticateResponse(s proto.AuthenticateResponse, p *AuthenticateResponse) error {
return pogs.Insert(proto.AuthenticateResponse_TypeID, s.Struct, p)
}
func (c TunnelServer_PogsClient) Authenticate(ctx context.Context, originCert []byte, hostname string, options *RegistrationOptions) (*AuthenticateResponse, error) {
client := tunnelrpc.TunnelServer{Client: c.Client}
promise := client.Authenticate(ctx, func(p tunnelrpc.TunnelServer_authenticate_Params) error {
client := proto.TunnelServer{Client: c.Client}
promise := client.Authenticate(ctx, func(p proto.TunnelServer_authenticate_Params) error {
err := p.SetOriginCert(originCert)
if err != nil {
return err
@ -71,8 +71,8 @@ func (c TunnelServer_PogsClient) Authenticate(ctx context.Context, originCert []
return UnmarshalAuthenticateResponse(retval)
}
func UnmarshalAuthenticateResponse(s tunnelrpc.AuthenticateResponse) (*AuthenticateResponse, error) {
func UnmarshalAuthenticateResponse(s proto.AuthenticateResponse) (*AuthenticateResponse, error) {
p := new(AuthenticateResponse)
err := pogs.Extract(p, tunnelrpc.AuthenticateResponse_TypeID, s.Struct)
err := pogs.Extract(p, proto.AuthenticateResponse_TypeID, s.Struct)
return p, err
}

View File

@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/assert"
capnp "zombiezen.com/go/capnproto2"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
// Ensure the AuthOutcome sum is correct
@ -119,7 +119,7 @@ func TestSerializeAuthenticationResponse(t *testing.T) {
for i, testCase := range tests {
_, seg, err := capnp.NewMessage(capnp.SingleSegment(nil))
assert.NoError(t, err)
capnpEntity, err := tunnelrpc.NewAuthenticateResponse(seg)
capnpEntity, err := proto.NewAuthenticateResponse(seg)
if !assert.NoError(t, err) {
t.Fatal("Couldn't initialize a new message")
}

View File

@ -1,9 +1,10 @@
package pogs
import (
"github.com/cloudflare/cloudflared/tunnelrpc"
capnp "zombiezen.com/go/capnproto2"
"zombiezen.com/go/capnproto2/rpc"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
type CloudflaredServer interface {
@ -16,8 +17,8 @@ type CloudflaredServer_PogsImpl struct {
ConfigurationManager_PogsImpl
}
func CloudflaredServer_ServerToClient(s SessionManager, c ConfigurationManager) tunnelrpc.CloudflaredServer {
return tunnelrpc.CloudflaredServer_ServerToClient(CloudflaredServer_PogsImpl{
func CloudflaredServer_ServerToClient(s SessionManager, c ConfigurationManager) proto.CloudflaredServer {
return proto.CloudflaredServer_ServerToClient(CloudflaredServer_PogsImpl{
SessionManager_PogsImpl: SessionManager_PogsImpl{s},
ConfigurationManager_PogsImpl: ConfigurationManager_PogsImpl{c},
})

View File

@ -4,10 +4,11 @@ import (
"context"
"fmt"
"github.com/cloudflare/cloudflared/tunnelrpc"
capnp "zombiezen.com/go/capnproto2"
"zombiezen.com/go/capnproto2/rpc"
"zombiezen.com/go/capnproto2/server"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
type ConfigurationManager interface {
@ -18,11 +19,11 @@ type ConfigurationManager_PogsImpl struct {
impl ConfigurationManager
}
func ConfigurationManager_ServerToClient(c ConfigurationManager) tunnelrpc.ConfigurationManager {
return tunnelrpc.ConfigurationManager_ServerToClient(ConfigurationManager_PogsImpl{c})
func ConfigurationManager_ServerToClient(c ConfigurationManager) proto.ConfigurationManager {
return proto.ConfigurationManager_ServerToClient(ConfigurationManager_PogsImpl{c})
}
func (i ConfigurationManager_PogsImpl) UpdateConfiguration(p tunnelrpc.ConfigurationManager_updateConfiguration) error {
func (i ConfigurationManager_PogsImpl) UpdateConfiguration(p proto.ConfigurationManager_updateConfiguration) error {
server.Ack(p.Options)
version := p.Params.Version()
@ -51,8 +52,8 @@ func (c ConfigurationManager_PogsClient) Close() error {
}
func (c ConfigurationManager_PogsClient) UpdateConfiguration(ctx context.Context, version int32, config []byte) (*UpdateConfigurationResponse, error) {
client := tunnelrpc.ConfigurationManager{Client: c.Client}
promise := client.UpdateConfiguration(ctx, func(p tunnelrpc.ConfigurationManager_updateConfiguration_Params) error {
client := proto.ConfigurationManager{Client: c.Client}
promise := client.UpdateConfiguration(ctx, func(p proto.ConfigurationManager_updateConfiguration_Params) error {
p.SetVersion(version)
return p.SetConfig(config)
})
@ -74,7 +75,7 @@ type UpdateConfigurationResponse struct {
Err error `json:"err"`
}
func (p *UpdateConfigurationResponse) Marshal(s tunnelrpc.UpdateConfigurationResponse) error {
func (p *UpdateConfigurationResponse) Marshal(s proto.UpdateConfigurationResponse) error {
s.SetLatestAppliedVersion(p.LastAppliedVersion)
if p.Err != nil {
return s.SetErr(p.Err.Error())
@ -82,7 +83,7 @@ func (p *UpdateConfigurationResponse) Marshal(s tunnelrpc.UpdateConfigurationRes
return nil
}
func (p *UpdateConfigurationResponse) Unmarshal(s tunnelrpc.UpdateConfigurationResponse) error {
func (p *UpdateConfigurationResponse) Unmarshal(s proto.UpdateConfigurationResponse) error {
p.LastAppliedVersion = s.LatestAppliedVersion()
respErr, err := s.Err()
if err != nil {

View File

@ -12,7 +12,7 @@ import (
"zombiezen.com/go/capnproto2/rpc"
"zombiezen.com/go/capnproto2/server"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
type RegistrationServer interface {
@ -25,11 +25,11 @@ type RegistrationServer_PogsImpl struct {
impl RegistrationServer
}
func RegistrationServer_ServerToClient(s RegistrationServer) tunnelrpc.RegistrationServer {
return tunnelrpc.RegistrationServer_ServerToClient(RegistrationServer_PogsImpl{s})
func RegistrationServer_ServerToClient(s RegistrationServer) proto.RegistrationServer {
return proto.RegistrationServer_ServerToClient(RegistrationServer_PogsImpl{s})
}
func (i RegistrationServer_PogsImpl) RegisterConnection(p tunnelrpc.RegistrationServer_registerConnection) error {
func (i RegistrationServer_PogsImpl) RegisterConnection(p proto.RegistrationServer_registerConnection) error {
server.Ack(p.Options)
auth, err := p.Params.Auth()
@ -82,14 +82,14 @@ func (i RegistrationServer_PogsImpl) RegisterConnection(p tunnelrpc.Registration
}
}
func (i RegistrationServer_PogsImpl) UnregisterConnection(p tunnelrpc.RegistrationServer_unregisterConnection) error {
func (i RegistrationServer_PogsImpl) UnregisterConnection(p proto.RegistrationServer_unregisterConnection) error {
server.Ack(p.Options)
i.impl.UnregisterConnection(p.Ctx)
return nil
}
func (i RegistrationServer_PogsImpl) UpdateLocalConfiguration(c tunnelrpc.RegistrationServer_updateLocalConfiguration) error {
func (i RegistrationServer_PogsImpl) UpdateLocalConfiguration(c proto.RegistrationServer_updateLocalConfiguration) error {
server.Ack(c.Options)
configBytes, err := c.Params.Config()
@ -111,8 +111,8 @@ func (c RegistrationServer_PogsClient) Close() error {
}
func (c RegistrationServer_PogsClient) RegisterConnection(ctx context.Context, auth TunnelAuth, tunnelID uuid.UUID, connIndex byte, options *ConnectionOptions) (*ConnectionDetails, error) {
client := tunnelrpc.TunnelServer{Client: c.Client}
promise := client.RegisterConnection(ctx, func(p tunnelrpc.RegistrationServer_registerConnection_Params) error {
client := proto.TunnelServer{Client: c.Client}
promise := client.RegisterConnection(ctx, func(p proto.RegistrationServer_registerConnection_Params) error {
tunnelAuth, err := p.NewAuth()
if err != nil {
return err
@ -145,7 +145,7 @@ func (c RegistrationServer_PogsClient) RegisterConnection(ctx context.Context, a
}
result := response.Result()
switch result.Which() {
case tunnelrpc.ConnectionResponse_result_Which_error:
case proto.ConnectionResponse_result_Which_error:
resultError, err := result.Error()
if err != nil {
return nil, wrapRPCError(err)
@ -160,7 +160,7 @@ func (c RegistrationServer_PogsClient) RegisterConnection(ctx context.Context, a
}
return nil, err
case tunnelrpc.ConnectionResponse_result_Which_connectionDetails:
case proto.ConnectionResponse_result_Which_connectionDetails:
connDetails, err := result.ConnectionDetails()
if err != nil {
return nil, wrapRPCError(err)
@ -176,8 +176,8 @@ func (c RegistrationServer_PogsClient) RegisterConnection(ctx context.Context, a
}
func (c RegistrationServer_PogsClient) SendLocalConfiguration(ctx context.Context, config []byte) error {
client := tunnelrpc.TunnelServer{Client: c.Client}
promise := client.UpdateLocalConfiguration(ctx, func(p tunnelrpc.RegistrationServer_updateLocalConfiguration_Params) error {
client := proto.TunnelServer{Client: c.Client}
promise := client.UpdateLocalConfiguration(ctx, func(p proto.RegistrationServer_updateLocalConfiguration_Params) error {
if err := p.SetConfig(config); err != nil {
return err
}
@ -194,8 +194,8 @@ func (c RegistrationServer_PogsClient) SendLocalConfiguration(ctx context.Contex
}
func (c RegistrationServer_PogsClient) UnregisterConnection(ctx context.Context) error {
client := tunnelrpc.TunnelServer{Client: c.Client}
promise := client.UnregisterConnection(ctx, func(p tunnelrpc.RegistrationServer_unregisterConnection_Params) error {
client := proto.TunnelServer{Client: c.Client}
promise := client.UnregisterConnection(ctx, func(p proto.RegistrationServer_unregisterConnection_Params) error {
return nil
})
_, err := promise.Struct()
@ -225,20 +225,20 @@ type TunnelAuth struct {
TunnelSecret []byte
}
func (p *ConnectionOptions) MarshalCapnproto(s tunnelrpc.ConnectionOptions) error {
return pogs.Insert(tunnelrpc.ConnectionOptions_TypeID, s.Struct, p)
func (p *ConnectionOptions) MarshalCapnproto(s proto.ConnectionOptions) error {
return pogs.Insert(proto.ConnectionOptions_TypeID, s.Struct, p)
}
func (p *ConnectionOptions) UnmarshalCapnproto(s tunnelrpc.ConnectionOptions) error {
return pogs.Extract(p, tunnelrpc.ConnectionOptions_TypeID, s.Struct)
func (p *ConnectionOptions) UnmarshalCapnproto(s proto.ConnectionOptions) error {
return pogs.Extract(p, proto.ConnectionOptions_TypeID, s.Struct)
}
func (a *TunnelAuth) MarshalCapnproto(s tunnelrpc.TunnelAuth) error {
return pogs.Insert(tunnelrpc.TunnelAuth_TypeID, s.Struct, a)
func (a *TunnelAuth) MarshalCapnproto(s proto.TunnelAuth) error {
return pogs.Insert(proto.TunnelAuth_TypeID, s.Struct, a)
}
func (a *TunnelAuth) UnmarshalCapnproto(s tunnelrpc.TunnelAuth) error {
return pogs.Extract(a, tunnelrpc.TunnelAuth_TypeID, s.Struct)
func (a *TunnelAuth) UnmarshalCapnproto(s proto.TunnelAuth) error {
return pogs.Extract(a, proto.TunnelAuth_TypeID, s.Struct)
}
type ConnectionDetails struct {
@ -247,7 +247,7 @@ type ConnectionDetails struct {
TunnelIsRemotelyManaged bool
}
func (details *ConnectionDetails) MarshalCapnproto(s tunnelrpc.ConnectionDetails) error {
func (details *ConnectionDetails) MarshalCapnproto(s proto.ConnectionDetails) error {
if err := s.SetUuid(details.UUID[:]); err != nil {
return err
}
@ -259,7 +259,7 @@ func (details *ConnectionDetails) MarshalCapnproto(s tunnelrpc.ConnectionDetails
return nil
}
func (details *ConnectionDetails) UnmarshalCapnproto(s tunnelrpc.ConnectionDetails) error {
func (details *ConnectionDetails) UnmarshalCapnproto(s proto.ConnectionDetails) error {
uuidBytes, err := s.Uuid()
if err != nil {
return err
@ -277,7 +277,7 @@ func (details *ConnectionDetails) UnmarshalCapnproto(s tunnelrpc.ConnectionDetai
return err
}
func MarshalError(s tunnelrpc.ConnectionError, err error) error {
func MarshalError(s proto.ConnectionError, err error) error {
if err := s.SetCause(err.Error()); err != nil {
return err
}

View File

@ -13,7 +13,7 @@ import (
capnp "zombiezen.com/go/capnproto2"
"zombiezen.com/go/capnproto2/rpc"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
const testAccountTag = "abc123"
@ -34,7 +34,7 @@ func TestMarshalConnectionOptions(t *testing.T) {
_, seg, err := capnp.NewMessage(capnp.SingleSegment(nil))
require.NoError(t, err)
capnpOpts, err := tunnelrpc.NewConnectionOptions(seg)
capnpOpts, err := proto.NewConnectionOptions(seg)
require.NoError(t, err)
err = orig.MarshalCapnproto(capnpOpts)

View File

@ -1,4 +1,4 @@
package quic
package pogs
import (
"fmt"
@ -6,7 +6,7 @@ import (
capnp "zombiezen.com/go/capnproto2"
"zombiezen.com/go/capnproto2/pogs"
"github.com/cloudflare/cloudflared/quic/schema"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
// ConnectionType indicates the type of underlying connection proxied within the QUIC stream.
@ -52,26 +52,26 @@ func (r *ConnectRequest) MetadataMap() map[string]string {
return metadataMap
}
func (r *ConnectRequest) fromPogs(msg *capnp.Message) error {
metadata, err := schema.ReadRootConnectRequest(msg)
func (r *ConnectRequest) FromPogs(msg *capnp.Message) error {
metadata, err := proto.ReadRootConnectRequest(msg)
if err != nil {
return err
}
return pogs.Extract(r, schema.ConnectRequest_TypeID, metadata.Struct)
return pogs.Extract(r, proto.ConnectRequest_TypeID, metadata.Struct)
}
func (r *ConnectRequest) toPogs() (*capnp.Message, error) {
func (r *ConnectRequest) ToPogs() (*capnp.Message, error) {
msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil))
if err != nil {
return nil, err
}
root, err := schema.NewRootConnectRequest(seg)
root, err := proto.NewRootConnectRequest(seg)
if err != nil {
return nil, err
}
if err := pogs.Insert(schema.ConnectRequest_TypeID, root.Struct, r); err != nil {
if err := pogs.Insert(proto.ConnectRequest_TypeID, root.Struct, r); err != nil {
return nil, err
}
@ -84,26 +84,26 @@ type ConnectResponse struct {
Metadata []Metadata `capnp:"metadata"`
}
func (r *ConnectResponse) fromPogs(msg *capnp.Message) error {
metadata, err := schema.ReadRootConnectResponse(msg)
func (r *ConnectResponse) FromPogs(msg *capnp.Message) error {
metadata, err := proto.ReadRootConnectResponse(msg)
if err != nil {
return err
}
return pogs.Extract(r, schema.ConnectResponse_TypeID, metadata.Struct)
return pogs.Extract(r, proto.ConnectResponse_TypeID, metadata.Struct)
}
func (r *ConnectResponse) toPogs() (*capnp.Message, error) {
func (r *ConnectResponse) ToPogs() (*capnp.Message, error) {
msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil))
if err != nil {
return nil, err
}
root, err := schema.NewRootConnectResponse(seg)
root, err := proto.NewRootConnectResponse(seg)
if err != nil {
return nil, err
}
if err := pogs.Insert(schema.ConnectResponse_TypeID, root.Struct, r); err != nil {
if err := pogs.Insert(proto.ConnectResponse_TypeID, root.Struct, r); err != nil {
return nil, err
}

View File

@ -5,10 +5,10 @@ import (
"zombiezen.com/go/capnproto2/server"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
func (i TunnelServer_PogsImpl) ReconnectTunnel(p tunnelrpc.TunnelServer_reconnectTunnel) error {
func (i TunnelServer_PogsImpl) ReconnectTunnel(p proto.TunnelServer_reconnectTunnel) error {
jwt, err := p.Params.Jwt()
if err != nil {
return err
@ -53,8 +53,8 @@ func (c TunnelServer_PogsClient) ReconnectTunnel(
hostname string,
options *RegistrationOptions,
) *TunnelRegistration {
client := tunnelrpc.TunnelServer{Client: c.Client}
promise := client.ReconnectTunnel(ctx, func(p tunnelrpc.TunnelServer_reconnectTunnel_Params) error {
client := proto.TunnelServer{Client: c.Client}
promise := client.ReconnectTunnel(ctx, func(p proto.TunnelServer_reconnectTunnel_Params) error {
err := p.SetJwt(jwt)
if err != nil {
return err

View File

@ -6,11 +6,12 @@ import (
"net"
"time"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/google/uuid"
capnp "zombiezen.com/go/capnproto2"
"zombiezen.com/go/capnproto2/rpc"
"zombiezen.com/go/capnproto2/server"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
type SessionManager interface {
@ -26,11 +27,11 @@ type SessionManager_PogsImpl struct {
impl SessionManager
}
func SessionManager_ServerToClient(s SessionManager) tunnelrpc.SessionManager {
return tunnelrpc.SessionManager_ServerToClient(SessionManager_PogsImpl{s})
func SessionManager_ServerToClient(s SessionManager) proto.SessionManager {
return proto.SessionManager_ServerToClient(SessionManager_PogsImpl{s})
}
func (i SessionManager_PogsImpl) RegisterUdpSession(p tunnelrpc.SessionManager_registerUdpSession) error {
func (i SessionManager_PogsImpl) RegisterUdpSession(p proto.SessionManager_registerUdpSession) error {
server.Ack(p.Options)
sessionIDRaw, err := p.Params.SessionId()
@ -76,7 +77,7 @@ func (i SessionManager_PogsImpl) RegisterUdpSession(p tunnelrpc.SessionManager_r
return resp.Marshal(result)
}
func (i SessionManager_PogsImpl) UnregisterUdpSession(p tunnelrpc.SessionManager_unregisterUdpSession) error {
func (i SessionManager_PogsImpl) UnregisterUdpSession(p proto.SessionManager_unregisterUdpSession) error {
server.Ack(p.Options)
sessionIDRaw, err := p.Params.SessionId()
@ -101,7 +102,7 @@ type RegisterUdpSessionResponse struct {
Spans []byte // Spans in protobuf format
}
func (p *RegisterUdpSessionResponse) Marshal(s tunnelrpc.RegisterUdpSessionResponse) error {
func (p *RegisterUdpSessionResponse) Marshal(s proto.RegisterUdpSessionResponse) error {
if p.Err != nil {
return s.SetErr(p.Err.Error())
}
@ -111,7 +112,7 @@ func (p *RegisterUdpSessionResponse) Marshal(s tunnelrpc.RegisterUdpSessionRespo
return nil
}
func (p *RegisterUdpSessionResponse) Unmarshal(s tunnelrpc.RegisterUdpSessionResponse) error {
func (p *RegisterUdpSessionResponse) Unmarshal(s proto.RegisterUdpSessionResponse) error {
respErr, err := s.Err()
if err != nil {
return err
@ -131,14 +132,21 @@ type SessionManager_PogsClient struct {
Conn *rpc.Conn
}
func NewSessionManager_PogsClient(client capnp.Client, conn *rpc.Conn) SessionManager_PogsClient {
return SessionManager_PogsClient{
Client: client,
Conn: conn,
}
}
func (c SessionManager_PogsClient) Close() error {
c.Client.Close()
return c.Conn.Close()
}
func (c SessionManager_PogsClient) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeAfterIdleHint time.Duration, traceContext string) (*RegisterUdpSessionResponse, error) {
client := tunnelrpc.SessionManager{Client: c.Client}
promise := client.RegisterUdpSession(ctx, func(p tunnelrpc.SessionManager_registerUdpSession_Params) error {
client := proto.SessionManager{Client: c.Client}
promise := client.RegisterUdpSession(ctx, func(p proto.SessionManager_registerUdpSession_Params) error {
if err := p.SetSessionId(sessionID[:]); err != nil {
return err
}
@ -164,8 +172,8 @@ func (c SessionManager_PogsClient) RegisterUdpSession(ctx context.Context, sessi
}
func (c SessionManager_PogsClient) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, message string) error {
client := tunnelrpc.SessionManager{Client: c.Client}
promise := client.UnregisterUdpSession(ctx, func(p tunnelrpc.SessionManager_unregisterUdpSession_Params) error {
client := proto.SessionManager{Client: c.Client}
promise := client.UnregisterUdpSession(ctx, func(p proto.SessionManager_unregisterUdpSession_Params) error {
if err := p.SetSessionId(sessionID[:]); err != nil {
return err
}

View File

@ -9,7 +9,7 @@ import (
"zombiezen.com/go/capnproto2/rpc"
"zombiezen.com/go/capnproto2/server"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
const (
@ -22,13 +22,13 @@ type Authentication struct {
OriginCAKey string
}
func MarshalAuthentication(s tunnelrpc.Authentication, p *Authentication) error {
return pogs.Insert(tunnelrpc.Authentication_TypeID, s.Struct, p)
func MarshalAuthentication(s proto.Authentication, p *Authentication) error {
return pogs.Insert(proto.Authentication_TypeID, s.Struct, p)
}
func UnmarshalAuthentication(s tunnelrpc.Authentication) (*Authentication, error) {
func UnmarshalAuthentication(s proto.Authentication) (*Authentication, error) {
p := new(Authentication)
err := pogs.Extract(p, tunnelrpc.Authentication_TypeID, s.Struct)
err := pogs.Extract(p, proto.Authentication_TypeID, s.Struct)
return p, err
}
@ -144,13 +144,13 @@ func (*RetryableRegistrationError) IsPermanent() bool {
return false
}
func MarshalTunnelRegistration(s tunnelrpc.TunnelRegistration, p *TunnelRegistration) error {
return pogs.Insert(tunnelrpc.TunnelRegistration_TypeID, s.Struct, p)
func MarshalTunnelRegistration(s proto.TunnelRegistration, p *TunnelRegistration) error {
return pogs.Insert(proto.TunnelRegistration_TypeID, s.Struct, p)
}
func UnmarshalTunnelRegistration(s tunnelrpc.TunnelRegistration) (*TunnelRegistration, error) {
func UnmarshalTunnelRegistration(s proto.TunnelRegistration) (*TunnelRegistration, error) {
p := new(TunnelRegistration)
err := pogs.Extract(p, tunnelrpc.TunnelRegistration_TypeID, s.Struct)
err := pogs.Extract(p, proto.TunnelRegistration_TypeID, s.Struct)
return p, err
}
@ -158,7 +158,7 @@ type RegistrationOptions struct {
ClientID string `capnp:"clientId"`
Version string
OS string `capnp:"os"`
ExistingTunnelPolicy tunnelrpc.ExistingTunnelPolicy
ExistingTunnelPolicy proto.ExistingTunnelPolicy
PoolName string `capnp:"poolName"`
Tags []Tag
ConnectionID uint8 `capnp:"connectionId"`
@ -171,13 +171,13 @@ type RegistrationOptions struct {
Features []string
}
func MarshalRegistrationOptions(s tunnelrpc.RegistrationOptions, p *RegistrationOptions) error {
return pogs.Insert(tunnelrpc.RegistrationOptions_TypeID, s.Struct, p)
func MarshalRegistrationOptions(s proto.RegistrationOptions, p *RegistrationOptions) error {
return pogs.Insert(proto.RegistrationOptions_TypeID, s.Struct, p)
}
func UnmarshalRegistrationOptions(s tunnelrpc.RegistrationOptions) (*RegistrationOptions, error) {
func UnmarshalRegistrationOptions(s proto.RegistrationOptions) (*RegistrationOptions, error) {
p := new(RegistrationOptions)
err := pogs.Extract(p, tunnelrpc.RegistrationOptions_TypeID, s.Struct)
err := pogs.Extract(p, proto.RegistrationOptions_TypeID, s.Struct)
return p, err
}
@ -190,13 +190,13 @@ type ServerInfo struct {
LocationName string
}
func MarshalServerInfo(s tunnelrpc.ServerInfo, p *ServerInfo) error {
return pogs.Insert(tunnelrpc.ServerInfo_TypeID, s.Struct, p)
func MarshalServerInfo(s proto.ServerInfo, p *ServerInfo) error {
return pogs.Insert(proto.ServerInfo_TypeID, s.Struct, p)
}
func UnmarshalServerInfo(s tunnelrpc.ServerInfo) (*ServerInfo, error) {
func UnmarshalServerInfo(s proto.ServerInfo) (*ServerInfo, error) {
p := new(ServerInfo)
err := pogs.Extract(p, tunnelrpc.ServerInfo_TypeID, s.Struct)
err := pogs.Extract(p, proto.ServerInfo_TypeID, s.Struct)
return p, err
}
@ -209,8 +209,8 @@ type TunnelServer interface {
ReconnectTunnel(ctx context.Context, jwt, eventDigest, connDigest []byte, hostname string, options *RegistrationOptions) (*TunnelRegistration, error)
}
func TunnelServer_ServerToClient(s TunnelServer) tunnelrpc.TunnelServer {
return tunnelrpc.TunnelServer_ServerToClient(TunnelServer_PogsImpl{RegistrationServer_PogsImpl{s}, s})
func TunnelServer_ServerToClient(s TunnelServer) proto.TunnelServer {
return proto.TunnelServer_ServerToClient(TunnelServer_PogsImpl{RegistrationServer_PogsImpl{s}, s})
}
type TunnelServer_PogsImpl struct {
@ -218,7 +218,7 @@ type TunnelServer_PogsImpl struct {
impl TunnelServer
}
func (i TunnelServer_PogsImpl) RegisterTunnel(p tunnelrpc.TunnelServer_registerTunnel) error {
func (i TunnelServer_PogsImpl) RegisterTunnel(p proto.TunnelServer_registerTunnel) error {
originCert, err := p.Params.OriginCert()
if err != nil {
return err
@ -245,7 +245,7 @@ func (i TunnelServer_PogsImpl) RegisterTunnel(p tunnelrpc.TunnelServer_registerT
return MarshalTunnelRegistration(result, registration)
}
func (i TunnelServer_PogsImpl) GetServerInfo(p tunnelrpc.TunnelServer_getServerInfo) error {
func (i TunnelServer_PogsImpl) GetServerInfo(p proto.TunnelServer_getServerInfo) error {
server.Ack(p.Options)
serverInfo, err := i.impl.GetServerInfo(p.Ctx)
if err != nil {
@ -258,13 +258,13 @@ func (i TunnelServer_PogsImpl) GetServerInfo(p tunnelrpc.TunnelServer_getServerI
return MarshalServerInfo(result, serverInfo)
}
func (i TunnelServer_PogsImpl) UnregisterTunnel(p tunnelrpc.TunnelServer_unregisterTunnel) error {
func (i TunnelServer_PogsImpl) UnregisterTunnel(p proto.TunnelServer_unregisterTunnel) error {
gracePeriodNanoSec := p.Params.GracePeriodNanoSec()
server.Ack(p.Options)
return i.impl.UnregisterTunnel(p.Ctx, gracePeriodNanoSec)
}
func (i TunnelServer_PogsImpl) ObsoleteDeclarativeTunnelConnect(p tunnelrpc.TunnelServer_obsoleteDeclarativeTunnelConnect) error {
func (i TunnelServer_PogsImpl) ObsoleteDeclarativeTunnelConnect(p proto.TunnelServer_obsoleteDeclarativeTunnelConnect) error {
return fmt.Errorf("RPC to create declarative tunnel connection has been deprecated")
}
@ -280,8 +280,8 @@ func (c TunnelServer_PogsClient) Close() error {
}
func (c TunnelServer_PogsClient) RegisterTunnel(ctx context.Context, originCert []byte, hostname string, options *RegistrationOptions) *TunnelRegistration {
client := tunnelrpc.TunnelServer{Client: c.Client}
promise := client.RegisterTunnel(ctx, func(p tunnelrpc.TunnelServer_registerTunnel_Params) error {
client := proto.TunnelServer{Client: c.Client}
promise := client.RegisterTunnel(ctx, func(p proto.TunnelServer_registerTunnel_Params) error {
err := p.SetOriginCert(originCert)
if err != nil {
return err
@ -312,8 +312,8 @@ func (c TunnelServer_PogsClient) RegisterTunnel(ctx context.Context, originCert
}
func (c TunnelServer_PogsClient) GetServerInfo(ctx context.Context) (*ServerInfo, error) {
client := tunnelrpc.TunnelServer{Client: c.Client}
promise := client.GetServerInfo(ctx, func(p tunnelrpc.TunnelServer_getServerInfo_Params) error {
client := proto.TunnelServer{Client: c.Client}
promise := client.GetServerInfo(ctx, func(p proto.TunnelServer_getServerInfo_Params) error {
return nil
})
retval, err := promise.Result().Struct()
@ -324,8 +324,8 @@ func (c TunnelServer_PogsClient) GetServerInfo(ctx context.Context) (*ServerInfo
}
func (c TunnelServer_PogsClient) UnregisterTunnel(ctx context.Context, gracePeriodNanoSec int64) error {
client := tunnelrpc.TunnelServer{Client: c.Client}
promise := client.UnregisterTunnel(ctx, func(p tunnelrpc.TunnelServer_unregisterTunnel_Params) error {
client := proto.TunnelServer{Client: c.Client}
promise := client.UnregisterTunnel(ctx, func(p proto.TunnelServer_unregisterTunnel_Params) error {
p.SetGracePeriodNanoSec(gracePeriodNanoSec)
return nil
})

View File

@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/assert"
capnp "zombiezen.com/go/capnproto2"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/proto"
)
const (
@ -39,7 +39,7 @@ func TestTunnelRegistration(t *testing.T) {
for i, testCase := range testCases {
_, seg, err := capnp.NewMessage(capnp.SingleSegment(nil))
assert.NoError(t, err)
capnpEntity, err := tunnelrpc.NewTunnelRegistration(seg)
capnpEntity, err := proto.NewTunnelRegistration(seg)
if !assert.NoError(t, err) {
t.Fatal("Couldn't initialize a new message")
}

31
tunnelrpc/proto/go.capnp Normal file
View File

@ -0,0 +1,31 @@
# Generate go.capnp.out with:
# capnp compile -o- go.capnp > go.capnp.out
# Must run inside this directory to preserve paths.
@0xd12a1c51fedd6c88;
annotation package(file) :Text;
# The Go package name for the generated file.
annotation import(file) :Text;
# The Go import path that the generated file is accessible from.
# Used to generate import statements and check if two types are in the
# same package.
annotation doc(struct, field, enum) :Text;
# Adds a doc comment to the generated code.
annotation tag(enumerant) :Text;
# Changes the string representation of the enum in the generated code.
annotation notag(enumerant) :Void;
# Removes the string representation of the enum in the generated code.
annotation customtype(field) :Text;
# OBSOLETE, not used by code generator.
annotation name(struct, field, union, enum, enumerant, interface, method, param, annotation, const, group) :Text;
# Used to rename the element in the generated code.
$package("capnp");
$import("zombiezen.com/go/capnproto2");

View File

@ -0,0 +1,28 @@
using Go = import "go.capnp";
@0xb29021ef7421cc32;
$Go.package("proto");
$Go.import("github.com/cloudflare/cloudflared/tunnelrpc");
struct ConnectRequest @0xc47116a1045e4061 {
dest @0 :Text;
type @1 :ConnectionType;
metadata @2 :List(Metadata);
}
enum ConnectionType @0xc52e1bac26d379c8 {
http @0;
websocket @1;
tcp @2;
}
struct Metadata @0xe1446b97bfd1cd37 {
key @0 :Text;
val @1 :Text;
}
struct ConnectResponse @0xb1032ec91cef8727 {
error @0 :Text;
metadata @1 :List(Metadata);
}

View File

@ -1,6 +1,6 @@
// Code generated by capnpc-go. DO NOT EDIT.
package schema
package proto
import (
capnp "zombiezen.com/go/capnproto2"
@ -357,34 +357,34 @@ func (p ConnectResponse_Promise) Struct() (ConnectResponse, error) {
return ConnectResponse{s}, err
}
const schema_b29021ef7421cc32 = "x\xda\xb4\x91\xcfk\x13A\x1c\xc5\xdf\x9bI\xba\x1e\xa2" +
"\x9b!\xd5\x8b\x8a\xa4\xf8+ES\xdb(\xa2\xa7\x80\x15" +
"TZ\xcc\x14\xcf\x96u;\x98\x92vw\x92\x9dZ\xf2" +
"\x17x\x15/\xe2\xd1\xbb \x15<\x0b\xa2\xa0\xa2\x07\x11" +
"\xff\x80\xfe\x05=y\xf0\xb42)\xdb@)\x08Bo" +
"\xdfy<\xe6}\xbe\xdfW\xfd\xd5\x16\xb3\xe5\xc7\x04t" +
"\xb5<\x91_x\xbas\xeaKSnA5\x98\xcf}" +
"\xab\xbb\x9d\xfa\xb3\xb7(\x8b\x00\x98}\xf9\x95\xea]\x00" +
"\xa8\xadM0\x8f\xda\x0fK\xafN\xf4?B7\xb8\xdf" +
"\xda\xaa\xf3\x03k7\x18\x00\xb5k|\x03\xe6\x9f\x87?" +
"\xcf\xbf>\xd9\xfc\x04\xd5\x10c3\xd8\xda\xf6\xce?#" +
"\xe7o\xde\x07\xf3\xeb\xdf\x7f\xbc\x7f\xd1\x9b\xdf>\x80\xa0" +
"uT<g\xed\x9c\x1fku\xe1!\xfa\x1b\xab\xf1L" +
"\x16w'\xccz4\xe3\x1f\xcb\xeb\xc6E+\x91\x8b\x96" +
"\xed ui\x9c\xae5\xe3\xc8&\xf6\xe6\xad4IL" +
"\xec\x96Lf\xd3$3@\x87\xd4Gd\x09(\x11P" +
"\x8d9@\x9f\x95\xd4W\x04\x159I/^\xbe\x07\xe8" +
"K\x92\xfa\x8e\xe0\x193\x18\xa4\x03V X\x01\xf3\"" +
"\x06\x00\x8f\x81\x1dIV\xc7\xe8\xa0\x17\xff\x87\xae\xbfa" +
"27b\xab\xec\xb1\xdd\x9e\x06t[R/\x08\x16h" +
"w\xbd6/\xa9;\x82Jp\x92\x02P\x8b\x9ewA" +
"Rw\x05\xc3\x15\x93\xb9\x027tCk\x18\x8e[\x00" +
"\x19\x1e\xda\x16\xabi\xf2`h\xcd\xee\x16#\xb0\xd3\xd3" +
"\xfe3u|\x09\xa0Pj\x0a\x08\xbb\xce\xd9|\xd3<" +
"\xca\xd2\xb8g@\x17\xb8\xd8\xeeE\x95\xff\x19\xb5\xb8\xab" +
"3\xdaW\xe3\xd4A5z\xf1\xa2\xa4\xbe*\x18\xf4\xcc" +
"\xb0\xb8J\xf0$Z+\xe6\xbf\x01\x00\x00\xff\xff\xf5\xed" +
"\xc9\xfe"
const schema_b29021ef7421cc32 = "x\xda\xb4\x911k\x14A\x1c\xc5\xdf\x9b\xcde-\x0e" +
"\xf7\x86KlT\xc2\x05Q\x13\xdc\x8b\xc9\x09\xa2 \x1c" +
"\x18A%\xc1\x9b`mX7\x83\x09w\xee\xce\xed\xce" +
"\x19\xee\x13\xd8\xda\x89\xa5\xbd \x09X\xdb((h!" +
"\x16\xd6\x0a66\xf9\x04\xb22\x0b\x9b\x83\x90B\x04\xbb" +
"\xe1\xcd\x9by\xbf\xff\xff5\xbeu\xc5r\xed\x11\x01\xd5" +
"\xa8M\x17\x17\x9e\x1e\x9c\xf9\xd8\xf6\xf6 C\x16+\x9f" +
"Z\xf6\xa0\xf5l\x1f5\xe1\x03\xcb/~Q\xbe\xf1\x01" +
"\xb9\xb7\x0b\x16Q\xf7\xc1\xd4\xcbS\xc3wP!\x8fZ" +
";-\xfe`\xf3\x06}\xa0y\x8d\xaf\xc1\xe2\xc3\xf8\xeb" +
"\xf9W\xa7\xdb\xef!C11\x83\x9d\x9f\xceI\xf7\xa8" +
"\xf9\x9b\xf7\xc0\xe2\xea\xe7/o\x9f\xf7W\xbf\x1fC\xd0" +
"\x99\x15\xfbl\x86\xa5yA8\x08;J\x12=\xc8\xcc" +
"t\xbcd\xb2\xd4\xa6K\xc3\xd1N\xbc\xf9X\xdbh+" +
"\xb2\xd1f\xa9\xc5\xe9\xa0\x1dG&1\xd7o\xa6I\xa2" +
"c\xbb\xa1s\x13\xa4I\xae{\xa4:\xe1M\x01S\x04" +
"\xe4\xc2\x0a\xa0\xceyT\x97\x05%9C'\x86w\x01" +
"u\xc9\xa3\xba-8\xa7\xb3,\xcdX\x87`\x1d,\xaa" +
"\x14\x00<\x09\xf6<\xb21\xa1\x07\x9d\xf8\xaf\x80\xc3\x91" +
"\xafs\xeb\xf8\xea\x87|\xb7\x16\x01\xd5\xf5\xa8\xd6\x04+" +
"\xbc;N[\xf5\xa8z\x82Rp\x86\x02\x90\xeb\x8ey" +
"\xcd\xa3\xda\x16\x0c\xb6tn+\xe4\xc0\x8e\x8df0)" +
"\x03d\xf0_'\xd9I\x93\xfb\xfe\xd8\x94\x9b\xae\x97p" +
"g\x17\xdd\x87rv\x03\xa0\x90r\x1e\x08\xb6\xad5\xc5" +
"\xae~\x98\xa7q_\x83\xd6\xb7\xb19\x8c\xab\xfdU\xdc" +
"\xba\xb6s\xe5\xc5\x91J\xe7\x8f\xab\xd4\x89\x17=\xaa+" +
"\x82~_\x8f\xab\xed\xf8O\xa2Au\xfe\x13\x00\x00\xff" +
"\xff\x1d\xce\xd1\xb0"
func init() {
schemas.Register(schema_b29021ef7421cc32,

View File

@ -1,15 +1,15 @@
using Go = import "go.capnp";
@0xdb8274f9144abc7e;
$Go.package("tunnelrpc");
$Go.package("proto");
$Go.import("github.com/cloudflare/cloudflared/tunnelrpc");
struct Authentication {
struct Authentication @0xc082ef6e0d42ed1d {
key @0 :Text;
email @1 :Text;
originCAKey @2 :Text;
}
struct TunnelRegistration {
struct TunnelRegistration @0xf41a0f001ad49e46 {
err @0 :Text;
# the url to access the tunnel
url @1 :Text;
@ -27,7 +27,7 @@ struct TunnelRegistration {
connDigest @7 :Data;
}
struct RegistrationOptions {
struct RegistrationOptions @0xc793e50592935b4a {
# The tunnel client's unique identifier, used to verify a reconnection.
clientId @0 :Text;
# Information about the running binary.
@ -56,29 +56,29 @@ struct RegistrationOptions {
features @13 :List(Text);
}
struct Tag {
struct Tag @0xcbd96442ae3bb01a {
name @0 :Text;
value @1 :Text;
}
enum ExistingTunnelPolicy {
enum ExistingTunnelPolicy @0x84cb9536a2cf6d3c {
ignore @0;
disconnect @1;
balance @2;
}
struct ServerInfo {
struct ServerInfo @0xf2c68e2547ec3866 {
locationName @0 :Text;
}
struct AuthenticateResponse {
struct AuthenticateResponse @0x82c325a07ad22a65 {
permanentErr @0 :Text;
retryableErr @1 :Text;
jwt @2 :Data;
hoursUntilRefresh @3 :UInt8;
}
struct ClientInfo {
struct ClientInfo @0x83ced0145b2f114b {
# The tunnel client's unique identifier, used to verify a reconnection.
clientId @0 :Data;
# Set of features this cloudflared knows it supports
@ -89,7 +89,7 @@ struct ClientInfo {
arch @3 :Text;
}
struct ConnectionOptions {
struct ConnectionOptions @0xb4bf9861fe035d04 {
# client details
client @0 :ClientInfo;
# origin LAN IP
@ -102,21 +102,21 @@ struct ConnectionOptions {
numPreviousAttempts @4 :UInt8;
}
struct ConnectionResponse {
struct ConnectionResponse @0xdbaa9d03d52b62dc {
result :union {
error @0 :ConnectionError;
connectionDetails @1 :ConnectionDetails;
}
}
struct ConnectionError {
struct ConnectionError @0xf5f383d2785edb86 {
cause @0 :Text;
# How long should this connection wait to retry in ns
retryAfter @1 :Int64;
shouldRetry @2 :Bool;
}
struct ConnectionDetails {
struct ConnectionDetails @0xb5f39f082b9ac18a {
# identifier of this connection
uuid @0 :Data;
# airport code of the colo where this connection landed
@ -125,18 +125,18 @@ struct ConnectionDetails {
tunnelIsRemotelyManaged @2: Bool;
}
struct TunnelAuth {
struct TunnelAuth @0x9496331ab9cd463f {
accountTag @0 :Text;
tunnelSecret @1 :Data;
}
interface RegistrationServer {
interface RegistrationServer @0xf71695ec7fe85497 {
registerConnection @0 (auth :TunnelAuth, tunnelId :Data, connIndex :UInt8, options :ConnectionOptions) -> (result :ConnectionResponse);
unregisterConnection @1 () -> ();
updateLocalConfiguration @2 (config :Data) -> ();
}
interface TunnelServer extends (RegistrationServer) {
interface TunnelServer @0xea58385c65416035 extends (RegistrationServer) {
registerTunnel @0 (originCert :Data, hostname :Text, options :RegistrationOptions) -> (result :TunnelRegistration);
getServerInfo @1 () -> (result :ServerInfo);
unregisterTunnel @2 (gracePeriodNanoSec :Int64) -> ();
@ -146,18 +146,18 @@ interface TunnelServer extends (RegistrationServer) {
reconnectTunnel @5 (jwt :Data, eventDigest :Data, connDigest :Data, hostname :Text, options :RegistrationOptions) -> (result :TunnelRegistration);
}
struct RegisterUdpSessionResponse {
struct RegisterUdpSessionResponse @0xab6d5210c1f26687 {
err @0 :Text;
spans @1 :Data;
}
interface SessionManager {
interface SessionManager @0x839445a59fb01686 {
# Let the edge decide closeAfterIdle to make sure cloudflared doesn't close session before the edge closes its side
registerUdpSession @0 (sessionId :Data, dstIp :Data, dstPort :UInt16, closeAfterIdleHint :Int64, traceContext :Text = "") -> (result :RegisterUdpSessionResponse);
unregisterUdpSession @1 (sessionId :Data, message :Text) -> ();
}
struct UpdateConfigurationResponse {
struct UpdateConfigurationResponse @0xdb58ff694ba05cf9 {
# Latest configuration that was applied successfully. The err field might be populated at the same time to indicate
# that cloudflared is using an older configuration because the latest cannot be applied
latestAppliedVersion @0 :Int32;
@ -166,8 +166,8 @@ struct UpdateConfigurationResponse {
}
# ConfigurationManager defines RPC to manage cloudflared configuration remotely
interface ConfigurationManager {
interface ConfigurationManager @0xb48edfbdaa25db04 {
updateConfiguration @0 (version :Int32, config :Data) -> (result: UpdateConfigurationResponse);
}
interface CloudflaredServer extends(SessionManager, ConfigurationManager) {}
interface CloudflaredServer @0xf548cef9dea2a4a1 extends(SessionManager, ConfigurationManager) {}

View File

@ -1,10 +1,11 @@
// Code generated by capnpc-go. DO NOT EDIT.
package tunnelrpc
package proto
import (
context "golang.org/x/net/context"
strconv "strconv"
context "golang.org/x/net/context"
capnp "zombiezen.com/go/capnproto2"
text "zombiezen.com/go/capnproto2/encoding/text"
schemas "zombiezen.com/go/capnproto2/schemas"
@ -1598,7 +1599,7 @@ func (c RegistrationServer) RegisterConnection(ctx context.Context, params func(
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "registerConnection",
},
Options: capnp.NewCallOptions(opts),
@ -1618,7 +1619,7 @@ func (c RegistrationServer) UnregisterConnection(ctx context.Context, params fun
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "unregisterConnection",
},
Options: capnp.NewCallOptions(opts),
@ -1638,7 +1639,7 @@ func (c RegistrationServer) UpdateLocalConfiguration(ctx context.Context, params
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 2,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "updateLocalConfiguration",
},
Options: capnp.NewCallOptions(opts),
@ -1674,7 +1675,7 @@ func RegistrationServer_Methods(methods []server.Method, s RegistrationServer_Se
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "registerConnection",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -1688,7 +1689,7 @@ func RegistrationServer_Methods(methods []server.Method, s RegistrationServer_Se
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "unregisterConnection",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -1702,7 +1703,7 @@ func RegistrationServer_Methods(methods []server.Method, s RegistrationServer_Se
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 2,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "updateLocalConfiguration",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -2206,7 +2207,7 @@ func (c TunnelServer) RegisterTunnel(ctx context.Context, params func(TunnelServ
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "registerTunnel",
},
Options: capnp.NewCallOptions(opts),
@ -2226,7 +2227,7 @@ func (c TunnelServer) GetServerInfo(ctx context.Context, params func(TunnelServe
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "getServerInfo",
},
Options: capnp.NewCallOptions(opts),
@ -2246,7 +2247,7 @@ func (c TunnelServer) UnregisterTunnel(ctx context.Context, params func(TunnelSe
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 2,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "unregisterTunnel",
},
Options: capnp.NewCallOptions(opts),
@ -2266,7 +2267,7 @@ func (c TunnelServer) ObsoleteDeclarativeTunnelConnect(ctx context.Context, para
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 3,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "obsoleteDeclarativeTunnelConnect",
},
Options: capnp.NewCallOptions(opts),
@ -2288,7 +2289,7 @@ func (c TunnelServer) Authenticate(ctx context.Context, params func(TunnelServer
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 4,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "authenticate",
},
Options: capnp.NewCallOptions(opts),
@ -2308,7 +2309,7 @@ func (c TunnelServer) ReconnectTunnel(ctx context.Context, params func(TunnelSer
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 5,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "reconnectTunnel",
},
Options: capnp.NewCallOptions(opts),
@ -2328,7 +2329,7 @@ func (c TunnelServer) RegisterConnection(ctx context.Context, params func(Regist
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "registerConnection",
},
Options: capnp.NewCallOptions(opts),
@ -2348,7 +2349,7 @@ func (c TunnelServer) UnregisterConnection(ctx context.Context, params func(Regi
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "unregisterConnection",
},
Options: capnp.NewCallOptions(opts),
@ -2368,7 +2369,7 @@ func (c TunnelServer) UpdateLocalConfiguration(ctx context.Context, params func(
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 2,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "updateLocalConfiguration",
},
Options: capnp.NewCallOptions(opts),
@ -2416,7 +2417,7 @@ func TunnelServer_Methods(methods []server.Method, s TunnelServer_Server) []serv
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "registerTunnel",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -2430,7 +2431,7 @@ func TunnelServer_Methods(methods []server.Method, s TunnelServer_Server) []serv
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "getServerInfo",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -2444,7 +2445,7 @@ func TunnelServer_Methods(methods []server.Method, s TunnelServer_Server) []serv
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 2,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "unregisterTunnel",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -2458,7 +2459,7 @@ func TunnelServer_Methods(methods []server.Method, s TunnelServer_Server) []serv
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 3,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "obsoleteDeclarativeTunnelConnect",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -2472,7 +2473,7 @@ func TunnelServer_Methods(methods []server.Method, s TunnelServer_Server) []serv
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 4,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "authenticate",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -2486,7 +2487,7 @@ func TunnelServer_Methods(methods []server.Method, s TunnelServer_Server) []serv
Method: capnp.Method{
InterfaceID: 0xea58385c65416035,
MethodID: 5,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:TunnelServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:TunnelServer",
MethodName: "reconnectTunnel",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -2500,7 +2501,7 @@ func TunnelServer_Methods(methods []server.Method, s TunnelServer_Server) []serv
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "registerConnection",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -2514,7 +2515,7 @@ func TunnelServer_Methods(methods []server.Method, s TunnelServer_Server) []serv
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "unregisterConnection",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -2528,7 +2529,7 @@ func TunnelServer_Methods(methods []server.Method, s TunnelServer_Server) []serv
Method: capnp.Method{
InterfaceID: 0xf71695ec7fe85497,
MethodID: 2,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:RegistrationServer",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:RegistrationServer",
MethodName: "updateLocalConfiguration",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -3689,7 +3690,7 @@ func (c SessionManager) RegisterUdpSession(ctx context.Context, params func(Sess
Method: capnp.Method{
InterfaceID: 0x839445a59fb01686,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:SessionManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:SessionManager",
MethodName: "registerUdpSession",
},
Options: capnp.NewCallOptions(opts),
@ -3709,7 +3710,7 @@ func (c SessionManager) UnregisterUdpSession(ctx context.Context, params func(Se
Method: capnp.Method{
InterfaceID: 0x839445a59fb01686,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:SessionManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:SessionManager",
MethodName: "unregisterUdpSession",
},
Options: capnp.NewCallOptions(opts),
@ -3741,7 +3742,7 @@ func SessionManager_Methods(methods []server.Method, s SessionManager_Server) []
Method: capnp.Method{
InterfaceID: 0x839445a59fb01686,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:SessionManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:SessionManager",
MethodName: "registerUdpSession",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -3755,7 +3756,7 @@ func SessionManager_Methods(methods []server.Method, s SessionManager_Server) []
Method: capnp.Method{
InterfaceID: 0x839445a59fb01686,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:SessionManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:SessionManager",
MethodName: "unregisterUdpSession",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -4225,7 +4226,7 @@ func (c ConfigurationManager) UpdateConfiguration(ctx context.Context, params fu
Method: capnp.Method{
InterfaceID: 0xb48edfbdaa25db04,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:ConfigurationManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:ConfigurationManager",
MethodName: "updateConfiguration",
},
Options: capnp.NewCallOptions(opts),
@ -4255,7 +4256,7 @@ func ConfigurationManager_Methods(methods []server.Method, s ConfigurationManage
Method: capnp.Method{
InterfaceID: 0xb48edfbdaa25db04,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:ConfigurationManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:ConfigurationManager",
MethodName: "updateConfiguration",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -4451,7 +4452,7 @@ func (c CloudflaredServer) RegisterUdpSession(ctx context.Context, params func(S
Method: capnp.Method{
InterfaceID: 0x839445a59fb01686,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:SessionManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:SessionManager",
MethodName: "registerUdpSession",
},
Options: capnp.NewCallOptions(opts),
@ -4471,7 +4472,7 @@ func (c CloudflaredServer) UnregisterUdpSession(ctx context.Context, params func
Method: capnp.Method{
InterfaceID: 0x839445a59fb01686,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:SessionManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:SessionManager",
MethodName: "unregisterUdpSession",
},
Options: capnp.NewCallOptions(opts),
@ -4491,7 +4492,7 @@ func (c CloudflaredServer) UpdateConfiguration(ctx context.Context, params func(
Method: capnp.Method{
InterfaceID: 0xb48edfbdaa25db04,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:ConfigurationManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:ConfigurationManager",
MethodName: "updateConfiguration",
},
Options: capnp.NewCallOptions(opts),
@ -4525,7 +4526,7 @@ func CloudflaredServer_Methods(methods []server.Method, s CloudflaredServer_Serv
Method: capnp.Method{
InterfaceID: 0x839445a59fb01686,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:SessionManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:SessionManager",
MethodName: "registerUdpSession",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -4539,7 +4540,7 @@ func CloudflaredServer_Methods(methods []server.Method, s CloudflaredServer_Serv
Method: capnp.Method{
InterfaceID: 0x839445a59fb01686,
MethodID: 1,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:SessionManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:SessionManager",
MethodName: "unregisterUdpSession",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -4553,7 +4554,7 @@ func CloudflaredServer_Methods(methods []server.Method, s CloudflaredServer_Serv
Method: capnp.Method{
InterfaceID: 0xb48edfbdaa25db04,
MethodID: 0,
InterfaceName: "tunnelrpc/tunnelrpc.capnp:ConfigurationManager",
InterfaceName: "tunnelrpc/proto/tunnelrpc.capnp:ConfigurationManager",
MethodName: "updateConfiguration",
},
Impl: func(c context.Context, opts capnp.CallOptions, p, r capnp.Struct) error {
@ -4566,234 +4567,232 @@ func CloudflaredServer_Methods(methods []server.Method, s CloudflaredServer_Serv
return methods
}
const schema_db8274f9144abc7e = "x\xda\xccZ{t\x1c\xe5u\xbfwfW#\x19\xc9" +
"\xaba\x96H\x96#\xab\xd1\xb1Kqb@v\x9d\x82" +
"\x9bF\x92\x91\x1cV\xf8\xa1\xd9\xb5s\xa8\xb1s\x18\xed" +
"~\x92F\x9d\x9dYff\x85\xe5\xe0\xd886\x06\x0e" +
"\x10 6`'n\x8c\x1d\xda\x83\x09)\x0e\xa6)=" +
"\xa4\xc5i\x1c\x12H\x1c\x9c\x03\xa9\x89I\xd3\xc6q[" +
"\xfb\x98R\x0c4\xc7m\xcc\xf4\xdc\x99\x9d\x87v\x17=" +
" \x7f\xe4\xbf\xd5\x9d\xfb=\xee\xef\xfb\xdd\xc7w?]" +
"}\xa0\xae\x8b\xeb\x88oN\x00\xc8\x87\xe35\x0e\x9b\xff" +
"\xd3\x8d\xfb\xe6\xfd\xd3V\x90[\x10\x9d/<\xd7\x97\xbc" +
"`o=\x09q^\x00X\xf4\x98\xb0\x11\xa5g\x05\x01" +
"@zF\xf8\x0f@\xe7\x8e\x8f<\xf5\xb5\xc7zw~" +
"\x11\xc4\x16>T\x06\\\xb4\xbb\xb6\x0f\xa5'kI\xf3" +
"\xf1\xda\x1dR]\x9d\x00\xe0\xdc ^uS\xf2\xe5c" +
"\xa4\x1d\x9d:FS\xbfU;\x1f%$5\xe9b-" +
"M\xfd\xa9\xfcO\xf6\x7fr\xd7K\xdb@l\xe1\xc6M" +
"}\xa6n#J\x17]\xcd\x0bu\xab\x00\x9d\xb7w6" +
"?\xf1\xe8\xb1\x1fl\x07\xf1r\x84\xd2N\xc5\x19?G" +
"@i\xde\x8c\xbf\x01t~\xfc\xeeM\xef\x1c\xfe\xfe\xe2" +
";@\xbc\x82\x14\x90\x14\x8e\xceh\xe7\x00\xa5_\xcc\xe8" +
"\x04t\xce\x9e\xfb\xbf\x1d\x9f\xbfb\xe5\xfd _\x81\x9c" +
"?\xc5\xc5\x19-\x1c\xe0\xa2\xcb.iC@\xa7s\xd9" +
"\x8f\x9fmY\xf4\xd0\xce\xb2\xbds\xa4ym\xfd|\x94" +
"R\xf5\xb4\xa3\xde\xfa[\x01\x9dO\xff\xc9\xd3\xf7\xf5<" +
"\xb4e\x17\x88W\x05\x0b>^\xbf\x96\x16<ZO\x0b" +
"\xfe\xcf\xcc\xaf\x1c+^\xf7\xed\x87J;rg9]" +
"?\x9f\x14.\xba3\xb4\x8f\xce\xbb\xf9\xbbG\x9f~\x18" +
"\xe4\x05\x88\xce\xeb\x03\x1f\x7f\x95\xdf{\xf0$\xacA\x81" +
"6\xb8h}\xc3~2/\xdf@\xba?\xf9\xc4s\x7f" +
"\x7f\xff\xd3;\xbe\x02\xf2\xe5\x88\x00.\x9c\xc7\x1b\xfe\x97" +
"\x14N7\xd0j;O|ge\xfe\x81=\xfb=\x80" +
"\xdc\xef\x0d39\x0eb\xce\xb6\xd4o\xf2k\x0ed\x0e" +
"\x94\xa0\x8b\xd3'\x9cy\x1e\xc9\xee\x99\xae\xdd\x8b\x7f~" +
"z\xd5\x8ao\x0d\xfeudlGb#\x8d\xdd1x" +
"\xfeHc:\xffD5D\x16$\x0e\xa2\xd4\x9b D" +
"\xba\x13\xb4\xc7'\xe7\xdcP\xb7\xe1\xf4\xb2\xa7@\\\xe0" +
"O\xb37\x91\xa6iF_<\xf0\x87\xf3^\xbc\xf5\x10" +
"\xc8Wa\x00\xd6n\xfa\x86\xd23\xee\xd8\xd8\xc9y\x07" +
"\xbf\xf3\xcb\xfb\x0eW\x90Ll\xdc\x88\xd2\xbcFZ\xe5" +
"c\x8d\x9f\x91d\xfa\xe5\xc4\xd6\xf3\xef)\x8f\xfc\xe3\xe1" +
"r\x02{'\xd58\x80\xd2\x0a\xd2[\x94jt\xed\xbb" +
"\xfb\xc8\x9e\x8f\xd7~\xed\xedg\xaa\xaa\xdf\"\x0e\xa0\xb4" +
"]\xa4\x05n\x17\x89I\x97\xa5\xf0\xf5\xe7;b\xdf\x8e" +
"Rm\xde\xa5g\x09\xeak/%\x85\xd67\x966\xe8" +
"on}\xbe\x0c\x14W\xf1\xb5K\xfbPz\xe3R\x9a" +
"\xed\x8c\xab\x1c\xfb\xe4\xc8\x0e\xf1\xd4\xcf\x8ez\xa0x\x96" +
"o\x97F\xc8\xf2G%:\xb8\xbe\x9b\xbe\xfc`\xfc\xf4" +
"\x97_\xa0\xcdE\x9c N~\xb5\xe8\x88d\xa2\xf4\xaa" +
"\xe4\x9e\xb6\xd4\xc4\x03:-O\xfd\xe97\x97\xe6^{" +
"\xa9\xca\x89Hr\xd3yi}\x13\xfd\xfa\xf3&\x02\xf5" +
"\xd4\x82C\x9f?s\xef\xf1WJ\x96\xb8k?\xd3\xe4" +
"\x92\xe6\x87M\xb4\xf6\x85u\xfbnP\x9d\x1bO\x96\x03" +
"\xe3j\x9ei\xfa\x16J\xd8\xecz\xab;]\xc0\xd0j" +
"\xdaJ\xf3\x08JEW\xfb\x96f\x9a\x9b;\xad\xcc\xda" +
"\xf2\xb3O\xbf\x1e!U\xb1\xf9W\x081g\xe5go" +
"\x1a\xa9\xdbt\xeaTt[j\xb3\x0b\xf0&w\xe8?" +
"\xfc\xf3\xc3\xc3\xeb\xbfy\xect\x94H\xcd&\x11\xe9\xbf" +
"\xfe\xea\xec\x97\xce\xe5s\xff\xee\xba\x8c\x7f8\xbb\x9b\x97" +
"\x10\x9c\x87\x9a)\xa44\xb55\xf4\xb6\x9f\xe8?\x1b\xc5" +
"\xfb\xdeYKI\xe1\xb1Y4\xf9\xe2\x9b\xbb\xd9\xbak" +
"n<[\xc1\xb4\xa3\xb3\x96\xa0\xf4\xea,\x17\xebY;" +
"P:\xd3\xd2\x04\xe0\x8c\xfe\xed\x037>\xf1\xbd\x95\xe7" +
"=/v\xf7\xf2Z\xcbB\xda\xcb}_\xe8Yum" +
"\xfb\x91\xf3Q3\x8e\xb7\x9cw]\xb2\x85V\x1a\xbc\xe6" +
"\xdcg\xe6\xdd\xf7\xfd\xf3eG\xe5*\xc6g\xcfG\xe9" +
"\xb2\xd9\x04\x978\xbb\x13\xf0\xcde\x7f\xf9JK\xa2\xe5" +
"\x9d2hkHw\xf1\xec\x11\x94R\xa4\xbb\xa8w\xf6" +
"\x0bD\xe8G\xbf\xbe\xff_.\x1c\xbb\xfe\xdd\x0a\x1b\xae" +
"m%\xee\xb7\xd2\xb4\xa9VAJ\xb5^\x0e\xe0\xdcq" +
"\xf2s\x1b~\xfa\xc5\xb7\xdf-g\x98\xbb\x91\xee\xd64" +
"Jk\xdc\x11r+\x11\xf6\xe1\xd5\xff\xb9\xf9\xdc\xae\x8f" +
"\xfc\xa6b\xee\xb7ZGP\x8a\xcf!M\x9c\xf3\x82\xb4" +
"\x8b~9/\x0b\x07:z6\xbft!rT\x9b\xe6" +
"\xf4\x11<\x0f\x09_=\xb5\xe5\x97\x9f\xfbm\x14\x9e\xb1" +
"9\xbf\"x\xee\x9dC\xf0\xdc\xf6\xe6\xee\xeb\xbf\xb4\xee" +
"\x1b\xefE\x08\xf2\xe4\x9c\xad4\xd4.\xea:\xd3\xccB" +
",{\x95\xff3{eV)\xe8\x85%\xddE{\x98" +
"\xe9\xb6\x9aUl\x96f\x9dV\xc1\xd0-\xd6\x8f(7" +
"\xf21\x80\x18\x02\x88\xca\x08\x80|3\x8f\xb2\xc6\xa1\x88" +
"\x98$\xa2\x88*\x09\x87y\x94m\x0eE\x8eKR\x84" +
"\x15oi\x07\x905\x1e\xe5\x0d\x1c\"\x9fD\x1e@," +
">\x08 o\xe0Q\xde\xc6\xa1S`f^\xd1\x99\x0e" +
"\x09\xbb\xd74\xb1\x1e8\xac\x07tLf\x9bc\xca\x80" +
"\x06\x09\x16\x11\x0b#\xb7\xda\xd8\x00\x1c6\x00:\xc3F" +
"\xd1\xb4\xd6\xe86\xaaZ\x9a\x0d\x9a\xcc\xc2a\xac\x01\x0e" +
"k\x00'2/\xc3,K5\xf4\x15\x8a\xae\x0c1\x13" +
"\x80,\xab\xe5\xe3\x00A\xfaB?\xd1\x89\x1d{\x80\x13" +
"\x17\x08\x18f\x1a\xf4\xc9*~\xec pb\xab\xe0\x98" +
"lH\xb5lf\xe2\x9a\\\xc1\x9d\x9b7\xf4.t\x8a" +
"\xba\xf7\x01\x99\xe9}H\xd0\xaa]\xd8\x8f\xe1\xee\xf8\xca" +
"\xdd]\xa7\xa9L\xb7\x13)}\xd0(\x83\xbc\xaf\x1a\xe4" +
"}%\xc8\xb7E \xbf})\x80|\x1b\x8f\xf2\x9d\x1c" +
"\x8a|\x09\xf3\xed\xf3\x01\xe4-<\xca\xf7p\xe8d\xdd" +
"ER9\x00\x08\xd0\x1cd\x8a]4\x99E\xb2\x99\x80" +
"\xfd<\xba\xa0\xcf\x04\xdc<\xcaL\xda\xbb\x7f\x08\x09\xc5" +
"\xcc\x0e\x07\x075\x01\xd2\xbd\x1bT\xcbV\xf5\xa1\xd5\xae" +
"\xbc\xb3\xdf\xd0\xd4\xec\x18YU\xef\xee\xb3u\x09\x00\xa2" +
"x\xd9Z\x00\xe4Dq)@\xa7:\xa4\x1b&sr" +
"\xaa\x955t\x9d\x01\x9f\xb57\x0f(\x9a\xa2gY\xb0" +
"PM\xe5B\xde\x02\x19f\x8e2\xf3J%B\xdf\xb9" +
"\xfd\x8a\xa9\xf0yK\xae\x0fp\xec]\x0b \xf7\xf0(" +
"\xf7Gp\\A8.\xe7Q\xbe1\x82\xe3\x1a\xc2\xb1" +
"\x9fGy\x1d\x87\x8ea\xaaC\xaa~\x1d\x03\xde\x8c2" +
"\xd0\xb2u%\xcf\x08\xb3\x12\x1e\x9b\x8d\x82\xad\x1a\xba\x85" +
"\x8da\xd6\x01\xc4\xc6\x08R\xc2d\x9c\xbc\xd2\xa7\x94\xcf" +
"(C\x9f\x9bfVQ\xd0lK\x8e\x05\x964,\x01" +
"\x90ky\x94\x93\x1cv\x9a\xcc*j66\x86\xe5\xc4" +
"\xefbU\x1f\xbed\xb0\xe8\xa6t\x84\\>|\xdb\x17" +
"\x86\xe4\xc2\x12zw\x11z\xdbx\x94\xef'\x16\xa2\xc7" +
"\xc2{\xf7\x00\xc8\xf7\xf3(\x7f\x95C1\xc6%1\x86" +
"(\xee\xa6\xb8\xf1\x08\x8f\xf2\xd79t,o\xe5\x14`" +
"\xce\x87\xb9-g\xd9\xa9\x82\xff\xd7\xe6\x9ce\xf7\x1b\xa6" +
"\x8d\x02p(\x00\x91\xd9\xb0X\xf7 9Z*\xa7\xb1" +
"\xebU^\xb71\x0e\x1c\xc6\xc9zS\xc9\xb2\xeb\x0c\x8a" +
".l\x83]:$\x10q\x06\xc0D^\xe8\x11*A" +
"\x91\xd0\x0b\x0f\xbe\xf9W\x10{\xfe\x88G\xf9\x8f#\xe6" +
"w\x90\x01W\xf3(\x7f\x8aCG\xc9f\x8d\xa2n\xaf" +
"\x06^\x19*s\x92\x0c\x83D\xd6d!\x7f\xfcek" +
"\xab\xc4\x01C\x1fT\x87\x8a\xa6bGN\xa8X\xc8)" +
"6\x1b\xf7\xc9%\x86\xc6O\x81\x18A!2mb\xf8" +
"\xa1\xac\x8c\x1a\x09S\xc9[Ql\xd2\xd5\xb0!\x1a|" +
"\x82G\xf9\x9a\xea\x87\xbb9\xcf,K\x19b\x15\xf1$" +
"^\x15\x13\x9de\xc9\xea4\xf3\xb2\xd2\x95&\xb3\x84\xa2" +
"f\xd3.\xea\x1d\xc7\xdb\x06\x91q.\x8f\xf2\xd5\x1c6" +
"\xe0{\x8e\xb7\x8f\x05\x0f\x86g\xd4\xc6L\xd30\xb11" +
"\xcc\xda%H\xb2\xa5\x05\xd0\xd0{\x98\xad\xa8\x1a\x92\x1f" +
"\x07\xa5m\x19p\x93\x05\xa2\x106O<\xb7\x93\xdc)" +
"?\xee\xa4\xc8\x1f\x1ay\x94?\xca\xa13D\\\xedg" +
"&\xaaFn\xa5\xa2\x1b\x19\x9eeC\"\x97V\x9a9" +
"\xddE]~\xd8\x16\x04\xa3&\x1eo\xb2\x12\x08\xa5\xe1" +
"\xfdm\xde\x9e#\x11\xa0=\xcc\xde\xc11\xdf>\x10F" +
"\x80 \x80\xdeE\xcer'\x8f\xf2\xceH\"z\xa0/" +
"\x1a\x02bI\x8c\x01\x88\xbb\x89%;y\x94\xf7q\xe3" +
"s<\x1be\xba\xdd\xa3\x0e\x81\xc0\xacPJ[\xecQ" +
"\x87\x18\xf0\xd6\x87\x0d\xc6\xb5\x93\xe0a\x0cX\x86\xc6l" +
"\xd6\xc3\xb2\x9aB.7\xca\xbc\xef%2\xfa\x87:\x11" +
"o\xd3\x15\xdeC\xfcM\xf8eU\xc4\x83\xdaC\xea\x06" +
"\xd0.X\x18\xba\x95\xc0\xc2Z\xa8\xcd*(\xba5\x95" +
"X\xe2\xad\xef\xc5\x8b\x0a\x9a\x84NU\xa2\x0aZ\xbf\x93" +
"\xb8\xe4\xc2\x82\xe3\xe2\xc3\xd2\xd0\xba\xc0\xb8%\xa1qA" +
"\x9d\x11\x03\x0ec\x80\x9dYw\xc2\x0a\x0bc\x93\xed\xaa" +
"\xd3\xdb\x16a\x1bs\x0b;\xffn\x8c~CA\x14\xf7" +
"\x03'6\x08\x8e\xbfs\xf4\xc7\x0b\x15EZl\xa2@" +
"\xb4\xaa`\xab\x82\xa1[\xb4V\xc4E\x96Ts\x11\xb3" +
"J\x92\xdc\x1a\xf5\x90R\x92|`O\xe8\x0c^\x92\x04" +
"\x10\xf7\xee\x07\x90\xf7\xf1(\x7f\x83\xc3N\xaf~\xc3\xc6" +
"\xb0\x13Tb\xb5W\xa5,7\xa0-\xabha\xcet" +
"LV\xd0\x94,\xeb\xc5RE\x06\x88\xc0!\xba\xae\x94" +
"/\x98\xcc\xb2P5t\xb9\xa8h*o\x8f\x05U\xb4" +
"^\xcc\xf7\x9blTE\xa3hu\xdb6\xcb\x0b\x05\xdb" +
"\x9aJ\x8d\x1d\x02DqTP5\x17\xa0H\x11F\xd5" +
"h\x17\x8f\xf2\xf2\x08@)J\xa3\xd7\xf3(\xaf\x0e\x01" +
"\x92\xbf\x0b \xaf\xe6Q\xbe\x99\xc3D\xb1\xa8\x06y\xc3" +
"\xd1\x8c\xac{\xda\x90X\xa9\xe4\xcb\xd3G\xca\xe2\xd2," +
"o\xd8L\x1b\xf38\x9a\x0b-\x9ej\xf8.\x8b\xa3~" +
"\xbe\xfb}\xaa$'\xbe\xbc\x118P\x86{{5\xdc" +
"\x17F\xec\xf0\xb7\xbcb \xb4C\xf8\x0b6\x16D\x1d" +
"\x96\xa7\xbc\xe8\xc3]2\xa6\x1b\x84\x1bB\x9d\xe9\xc6\"" +
"\xd7\x07\x97\x1bYE+\x0f!\x89\xf2|\x19\xadl\xa6" +
"\x1e\x1e\xa2\x8b\xae*\xb4\xb9\xb0\x120\xd7\xf8\x13Kc" +
"\xd8\x07\x90\xd9\x80<f\xb6a\x88\x8dt;.\x05\xc8" +
"\xdcF\xf2;1\x84G\xda\x8e-\x00\x99-$\xbf\x07" +
"\x83\x9b\xadt\x17\x1e\x04\xc8\xdcC\xe2GH=\xc6\xbb" +
"\xde+\xedr\xa7\xdfI\xf2}$\x8f\xc7\x92\x18\x07\x90" +
"\xf6\xe2|\x80\xcc#$?L\xf2\x1a.\x895\x00\xd2" +
"!\x1c\x01\xc8<E\xf2\xe7H.\xc4\x93t\xb9\x97\x9e" +
"E\x13 \xf3w$\xff\x1e\xc9k\x9b\x93X\x0b \x1d" +
"q\xe5\xcf\x93\xfcG$\xaf\x9b\x95\xc4:\x00\xe9\x87\xb8" +
"\x15 \xf3\x03\x92\xbfB\xf2\x19\x98\xa4\xd2W:\x8e{" +
"\x002\xaf\x90\xfc_I~IM\x12/\x01\x90~\xe1" +
"\xee\xe7\x04\xc9\x7fM\xf2\xfaX\x92\xeae\xe9\xdfp?" +
"@\xe6\xd7$\xffo\x927\x08Il\x00\x90\xdep\xed" +
":G\xf2Z\xae\xecb\xe9\xd3\xb8\xec\xf6\xc8\x1bV\xc0" +
"\x13V\x0aG\xe8\xf9X\xbf\x91\xa0\x1b\"&\xc2\xa65" +
" &\x00\x9d\x82ah+\xc7\xbbG\xc2V\x86,\xff" +
"\xa6\xda\x18\xf6\xec\x00I\x18\x94r\x900\xf4T.\x88" +
"Y\xe5\x01\xd2\xdf\x89ju\x17m\xa3X\x806\xe2b" +
".\x08\x16fQ_f\x1a\xf9\xd5\xc8\xcc\xbc\xaa+\xda" +
"$\x81\xb3\x0e8\xac\x83R\xa4\xf2\xe7\x9e8\x8a\xbe\xff" +
"\xbd;`4W\xce\xe8\xb6\xc2\x92\xd5\xcaPY\xed0" +
"\x7f\x92\xda!\xa1G\xe2d\xdb\xa8\xa2\x15+\x8b\xee\x9a" +
"iV\x87\xe9N\xaf\xba\x9c\xec\xf2\xe1\xb7\xde\xca\xe2W" +
"\x95ZiMe)\x91fV[\xa1J\xb1t0\xbc" +
"Y\xf8\xf6.n\x8f\xdc\xc44\xc5f\x96\xdd]\xc0\x82" +
"\xa6\xb2\xdcg\x99\x99\x88V\x17\xd1Rjj\xa9l\\" +
"\xd1\xe6\x1a\x8c\x91\x07\x062\x9c+\x19<e<\x87\x98" +
"\xed\xfdJ\xe9\x83\x06\x95LB\xb4\x94\x9c\xde\xe84\xb3" +
"\x12S9\x8b\xb0g:y)<\x9dh\x9dfm." +
"\x17&\xbaXV\x99\xafJ\x1d\xea\xdf\x93\"\xcd/\"" +
"\xf7:\x1e\xe5\xe1\x08\xb9\x19\xa5\xda\x1c\x8fr!\xac\x17" +
"\xf2\xe9\xb0\xdd(\xf2\\\xa9\xdfH\xe9\xb7\xc0\xa3|\x1b" +
"\x87\x09\xa5h\x0fcc\xf8\x0e5\x0e\x84\xf1-1\xe2" +
"zJ\xcf1\xc0\x0d\xbe\xbbF\x92r\xf0@\xf2\x81`" +
"|\xdf\xf2\xdb\x82I\x0f0x$([\xf9}\x1b\x19" +
"\x9d\xde\xa2\xc4\xdbf\xb7 \xf6\xdf_\xd0o\xb0\x8b\x87" +
"6\x02'>.`\xf8\xa8\x80\xfe\x1b\x82\xb8\xd7\x04N" +
"\xdc% \x17<\x91\xa1\xff\x14&\xdeu7p\xe2v" +
"\x01\xf9\xe0\x85\x0b\xfd\xa6s\xc7\xd8\x0c\x04N\xdc$`" +
",x[D\xbfe-\xde2\x02\x9c\xa8\x0a\x18\x0f\x1e" +
"\xcf\xd0\x7fK\x11\xd7o\x05N\\\x13\xb6V\xa1\xd3\xb3" +
"\xa3\x0b\x1d\x9f\xf3\xd0\xe6\xb2~|\xa3\xd5\xd3\x02\xe8B" +
"\xc7\xbf\xb7\xf1\xefwqs\xb5\xfc^!$\xb2\x8a\xcd" +
"\xba\xa8P\xf6\x02\x1c\x96\"\x1ct\xa1\x1c\xc3H\xc7>" +
"\xd27\xfa\x80\x8d\x93\x0a?\x99f!\xea\x8f\xff\x801" +
"\x97\xaf\xb6kZ'\xe89G\xe6\xa5\x8a\xbc\x9eG\xb9" +
"\x99\x9b\xa4\xe0\xae\x1a:\xbd\x0d\xfb\xe4O\xd0`\x9a\xff" +
"\x0f\x82\xf9\x8fS\xb8\xfe\x11\x8f\xf2\x89\x88[\xbfJ\xc2" +
"\x97y\x94_\x8f\x94\xa3\xaf\x91\xaf\x9f\xe0Q~'|" +
"Fx\xebn\x00\xf9\x1d\x1e\xd3\x91JK\xbcH\x8a\xbf" +
"\xa5z\xc4\xad\xb3\xd0\xab\xb3\xe2\xf8 @\xa6\x96\xea\x94" +
"\xa4[g\xc5\xbc:K\xc4\x01\x80L#\xc9?\x1a\xad" +
"\xb3f\xe1Z\x80L3\xc9\xe7\xe2\xf8k\xb7P4\xc3" +
"\xf2W3\x86\x96\xabz\xd5\xe4\xed\xbfk\xa0\xbdLQ" +
"\xb5\xa2\xc9\xa0\xfc\x0a\x92\xea\x89\x943\xde\x83\x87\xd7\xc6" +
"\xcc\x10\x09sh\x05-\xceitA&\xcad\x9aQ" +
"\xcc\x0dj\x8a\xc9r\x19f\x0a^@\xe8\xe7\xe3r-" +
"F\xfe\x03\x01 |(\x8e\x90}\xc2\xcc\xd8k\x9a\x06" +
"\x9aeW\x8d\x85\xe1U#\xb8i\xac\x0dox\"\xd7" +
"U\xba\xe2\x0d\x84\x97\xa3\xb6\xacR\xb4X\x05&\xc03" +
"3\xe8\x84Y\xc3FQ\xcb\xa5\x19\x08\xb69Vq\xab" +
"\x8bM\x16}\x13~$\xacw#\xa1\xff\xda\x89\xfe\xa3" +
"\xa6(\xef\x01N\\A\x91\xd0\x7fxC\xff\xd5]\xec" +
">\x08\x9c\xf8g\x14\x09\xfd7g\xf4\x1fR\xc5\x8e\x17" +
"\x81\x13;\"\xefA>>\x15\xefA\xde\x07\xd7\x1f\xe8" +
"C)\xa1r\xe5\x19\x95\"T\xb4\x11\xf1!:;^" +
"B\x8d\x1c\xe7\xb4\x1eQ\xa6\xfc\xf6\x10\xfc\xd3KY\xcc" +
"\xa9\xfb\xb0M6?5\xfe\x7f\x00\x00\x00\xff\xff\xda\xbc" +
"\xea\xa1"
const schema_db8274f9144abc7e = "x\xda\xccZ}t\x14\xd7u\xbfw\xde.#\xc9\x12" +
"\xbb\xe3\xd9\x04!E^G\x07\xda\x9a\x04cAIm" +
"\x9aD\x12\x96\x88\x85\x01k\xb4\x90\xe3\x83q\x8eG\xbb" +
"O\xd2\xa8\xbb3\x9b\x99Y\x19\x11;|\x04\x8c\xf1\xb1" +
"\x1dC\xc06Jh\x08\x8e\xdbS9IM\x8c\x9b\xa6" +
"\xc7nM\x1a\xc7\x89\x1d\x13\xe3cR\x08N\xd3\x94\xd0" +
"6>\xa4\xae\xbf\x9a\xc3i\xea\xe9\xb93;\x1f\xda\x15" +
"H\x82\xf6\x9c\xfe\x07w\xef\xbc\xf7\xee\xef\xfd\xee}\xbf" +
"w\x9f\xae{\xb1\xb6Ch\x8boN\x00(G\xe2\xb3" +
"\x1c\xbe\xe0\xd5M\x07\xe7\xff\xfd6P\xaeFt>\xff" +
"\xcc\xca\xd4y{\xdbi\x883\x11`\xc9\xe3\xe28\xca" +
"\xcf\x8a\"\x80\xfc]\xf1_\x01\x9d{>\xf8\xe4W\x1f" +
"\xef\xde\xfb\x05\x90\xaef\xa13\xe0\x92\x035\x9bP>" +
"\\C\x9e\xdf\xac\xd9)7\xd4\x8a\x00\xce\xcd\xd2\xa2\xdb" +
"R\xaf\x1c#\xef\xe8\xd01\x1a\xfa\xbd\x9a\xf5(\xd7\x92" +
"\x9b\x1c\xaf\xa5\xa1?^\xf8\xc9\xa1\x8f\xed{i;H" +
"W\x0b\x13\x86~\xabv\x1c\xe5\xda:\xd7\xb3\xee\x16@" +
"\xe7\x9d\xbd\x8dO|\xed\xd8\x0fw\x80\xb4\x10\xa1\xbc\xd2" +
"\x96\xba:\x01P^Z\xf7\x97\x80\xce\xcb\xef\xdd\xf6\xee" +
"\x91\x1f,\xbd\x07\xa4E\xe4\x80\xe4p\xa2\xae\x8f\x1c\xde" +
"\xaak\x07t\xde8\xf7_;?w\xcd\x9a\x87@Y" +
"\x84\x82?\x84t\xc5J\x01p\xc9\xc2+\xd2\x08\xe8\xb4" +
"\xafx\xf9\xbbMK\x1e\xde[\xb1v\x81<\x95\xfa\xf5" +
"(\xf3zZ\x91Z\x7f'\xa0\xf3\xc9?z\xea\xc1\xae" +
"\x87\xb7\xec\x03ii0\xe1\xf3\xf5\xf7\xd1\x84g\xebi" +
"\xc2\xff\x9c\xfd\xe5c\xa5\x1b\xbf\xf3pyE\xee(\xf1" +
"\x86\xf5\xe4\xf0\xe1\x06\x1a\xa1ud\xfe\x1d\xdf{\xfe\xa9" +
"G@Y\x82\xe8\xbc\xde\xff\x91\x13\xec\xc0\xf8iX\x87" +
"\"-p\xc9\xd6\x86W\x11P\xde\xe7\xfa\xfe\xe4\xa3\xcf" +
"\xfc\xcdCO\xed\xfc2(\x0b\x11\x01\\8\xcf7," +
"\xa0\xc1\xa4\xd94\xdb\xde\x93\xcf\xae)\xec\x1e;\xe4\x01" +
"\xe4\xfe~\xc3\xec\xc5\x02\xc4\x9c\xed=\xbf-\xac{," +
"\xf3X\x19\xba8\xfd\xd46\xbb\x95\xe2\xee\x99\xed\xc6\xbd" +
"\xf4ggoY\xfd\xed\x81?\x8f|\xcb\x13\xe3\xf4\xed" +
"\xce\x81\xb7\x8f&\xfb\x0aOL\x86\x08O\xfc\x0c\xe5\xad" +
"\x09B\xe4\xee\x04\xad\xf1\x9bW\xdd\\\xbb\xf1\xec\x8a'" +
"AZ\xe2\x0fs*\xb1\x8d\x86\x19y\xf1\xb1\xdf\x9b\xff" +
"\xe2\x9d\x87AY\x8a\xe1\xee\xd0o(\xbf\xe5~\x1b;" +
"=\x7f\xfc\xd9_<x\xa4\x8ad\xab\x93\xe3(\xf3\xa4" +
"\x8b{\xf2S\xf2n\xfa\x97\x13\xbb\x9d\xbd\xaf>\xfaw" +
"G*\x09\xec\xaek4\xb9\x07=\xbf%\x0f$\xdd\xf8" +
"\xee;:\xf6\x91\x9a\xaf\xbe\xf3\xf4\xa4\xeeOK{P" +
"~Y\xa2\x09~$\x11\x93>\xd0\x83\xaf?\xd7\x16\xfb" +
"N\x94j\x85+\x9bh\xad;\xae$\x87\x96\xdf,o" +
"\xd0\xdf\xdc\xf6\\\x05(\xae\xe3|y\x13\xca7\xc84" +
"\xdaR\x99\x9cc\x1f\x1b\xde)\x9d\xf9\xe9\xf3\x1e(^" +
"\xe4\xc7\xe517r\x996n\xe5m_\xda\x13?\xfb" +
"\xa5\x17hq\x91$\x88\xd7\xb8\xfcL\x1dB\xf9\x9a\x94" +
";rj\x0e\x03t\x9a\x9e\xfc\xe3o-\xcf\x9dzi" +
"\xb2\x1dy|N+\xcaO\xcf\xa1\xc9\x0f\xcf!T\xcf" +
",<\xfc\xb9_?p\xfc\xb5r(\xee\xe4\x0d\x8d." +
"k\xe67\xd2\xe4\xe77\x1c\xbcYsn=]\x89\x8c" +
"\xeb\xd9\xdd\xf8\xcf(\xab\x8d4\xdc\xed\x8d4\\@\xd1" +
"\xc9\xbc\x8f6\x8e\xa1|\xca\xf5>\xe1\x8e-\x9cU\xe7" +
"n\xf9\xe9'_\x8f\xb0\xeaT\xe3\x95D\x875\x9f\xbe" +
"m\xb8\xf6\xee3g\xa2\xcb:\xde\xe8\"\xfck\xf7\xd3" +
"\xbf\xfd\x87G\x86n\xff\xd6\xb1\xb3\x11&\xd5\xce=D" +
"\x9f\xfe\xfb\x9f\xbd\xf1\xc5s\x85\xdc\xbf\xb89\xe3\xefN" +
"\xed\xdca7\xa4\xb9TS\xe6\xa4\x1b\xba[O\xf6\xbe" +
"\x11\x05\x1c\x9bLrhi\xa2\xc1\x97\xde\xd1\xc97\\" +
"\x7f\xeb\x1bUT\xfbD\xd30\xcaJ\x13}\xb0\xbai" +
"'\xca\xbcy\x0e\x803\xf2W\xbbo}\xe2\xfbk\xde" +
"\xf6\xd2\xd8]\xcb\xba\xe6~Z\xcb\x83\x9f\xef\xba\xe5\x86" +
"\xd6\xa3oG\xc3P\x9a)\xb1d\xad\x99f\x1a\xb8\xfe" +
"\xdc\xa7\xe6?\xf8\x83\xb7+\xf6\xcau\xdc\xd5\xbc\x1e\xe5" +
"\x03\xcd\x04\xd7~r~s\xc5\x9f\xbe\xd6\x94hz\xb7" +
"\x02\xdaY\xe4\xfbl\xf3\x18\xca'\x9a]\x98\x9a_ " +
"F\x7f\xed\xeb\x87\xfe\xf1\xfc\xb1\x9b\xde\xab\x8a\xe1\xf9\x96" +
"=(\xff\xbc\x85\x86=\xd5\"\xca\xa7Z~\x1f\xc0\xb9" +
"\xe7\xf4g6\xbe\xfa\x85w\xde\xab\xa4\x98\x07|\xcb6" +
"\x94\xcf\xba_\xfc\xb2\x85\x18\xfb\xc8\xda\x7f\xdb|n\xdf" +
"\x07\x7f[5\xf6\xd6\xab\xc6P\xde\x7f\x15y\xee\xbb\xea" +
"\x05\xf9\x9a4\xa5\xe2+\xe2cm]\x9b_:\x1f\xd9" +
"*)\xbd\x89\xe0yX\xfc\xca\x99-\xbf\xf8\xcc\xef&" +
"\x90/}\xa5\xbbSi\x82\xe7\xae7\xf7\xdf\xf4\xc5\x0d" +
"\xdfx?B\x90\xee\xf4\xb7\xe9S\xbb\xa4\xeb<o\x16" +
"\xe3\xd9EE\xd3\xb0\x8dE\xbe!{mV-\xea\xc5" +
"e\x9d%{\x88\xeb\xb6\x96Um\xde\xc7\xadb\xc2\xd0" +
"-\xde\x8b\xa8$Y\x0c \x86\x00\x92:\x0c\xa0\xdc\xc1" +
"P\xc9\x0b(!\xa6\x88.\x92F\xc6!\x86\x8a-\xa0" +
"$\x08)*\xb4\xd2g[\x01\x94<Ce\xa3\x80\xc8" +
"R\xc8\x00\xa4\xd2\x1e\x00e#Ce\xbb\x80N\x91\x9b" +
"\x05U\xe7:$\xecn\xd3\xc4z\x10\xb0\x1e\xd01\xb9" +
"m\x8e\xaa\xfdyH\xf0\x88Y\x1c\xbe\xd3\xc6\x06\x10\xb0" +
"\x01\xd0\x192J\xa6\xb5N\xb7Q\xcb\xf7\xf1\x01\x93[" +
"8\x84\xb3@\xc0Y\x80A\x90\xb1\x0b\x05\x99\xe1\x96\xa5" +
"\x19\xfa\xeavUW\x07\xb9I\xe1\xd5\xb08@p\x94" +
"\xa1\x7f\xe8Imc H\x0bE\x0cO\x1d\xf4y+" +
"}x\x1c\x04\xa9EtL>\xa8Y67q]\xae" +
"\xe8\x0e\xcd\x0c\xbd\x03\x9d\x92\xee\xfd\x80\xdc\xf4~H\xd0" +
"\xa4\x1d\xd8\x8b\xd3X\xe2\x8dy\x8d\xebv\x8f\xce\x06\x8c" +
"\x0a\xf4WN\x86\xfe\xca2\xfa\xdb#\xe8o]\x0e\xa0" +
"\xdc\xc5P\xb9W@\x89\x95\xe1\xdf\xb1\x00@\xd9\xc2P" +
"\xb9_@'\xebM\x92\x03\x80\x00\xd8\x01\xae\xda%\x93" +
"[d\x9b\x0d\xd8\xcb\xd0\xc5\x7f6\xe0\xe6\x11nR\x04" +
"\xfe~$T3;\x14\xec\xd9\x94\xcc\xea\xde\xa8Y\xb6" +
"\xa6\x0f\xaeu\xed\xbdF\"\xafeG)\xb6zw\xb5" +
"-\xcb\x00\x10\xa5\x0f\xac\x07@A\x92\x96\x03\xb4k\x83" +
"\xbaar'\xa7YYC\xd79\xb0\xac\xbd\xb9_\xcd" +
"\xabz\x96\x07\xd3\x89\x17\x9a\xce\x9b&\xc3\xcd\x11n^" +
"\xabFX=\xafW5\xd5\x82\x05\xa0\xd4\x07\xa0v\xaf" +
"\x07P\xba\x18*\xbd\x11PW\x13\xa8\xab\x18*\xb7F" +
"@]G\xa0\xf62T6\x08\xe8\x18\xa66\xa8\xe97" +
"r`f\x94\x99\x96\xad\xab\x05N\x00\x96\xc1\xd9l\x14" +
"m\xcd\xd0-L\x86\x87\x12 &#\xb0\xd5L\xc5U" +
"\x8f\xaa\xd7\xfa\\\xf3\xa9f\xe8\xf3\xfa\xb8U\xca\xdbh" +
")\xb1 \x9e\x86e\x00J\x0dC%%`\xbb\xe9\xfd" +
"\x9e\x0c5\xc7\xff\xde\xdc\x01\x96\xa9`\xee\xbb\xfb\"\xb4" +
"\xf3\xb1\xdc\xb18\xa4\x1d\x96\xa1\xdcEPng\xa8<" +
"D\xfcD\x8f\x9f\x0f\x8c\x01(\x0f1T\xbe\"\xa0\x14" +
"\x13R\x18C\x94\xf6Sqy\x94\xa1\xf2u\x01\x1d\xcb" +
"\x9b\xba\x070\xe7c\x9e\xceYvO\xd1\xff\xdf\xe6\x9c" +
"e\xf7\x1a\xa6\x8d\"\x08(\x02\xd1\xdc\xb0x\xe7\x00%" +
"bO.\xcfo\xd2\x98nc\x1c\x04\x8c\x13\x08\xa6\x9a" +
"\xe57\x1aT\x82\xf8F\xbb\xbcc a\x1d\xc0\xd4Y" +
"\xea\x91\xac\xb3\xc4\xec!\xaf\x88\xf8 \\C\x84\xfa\x03" +
"\x86\xca\x1fF@h\xa30\xaec\xa8|\\@G\xcd" +
"f\x8d\x92n\xaf\x05\xa6\x0eV$Q\x86C\"k\xf2" +
"\x90R\xfe\xb4\xb5\x17\xac\x16\x86>\xa0\x0d\x96L\xd5\x8e" +
"lW\xa9\x98Sm>\xe1\xa72Wh\xc3\xa6\"K" +
" `.\x91,~\xf5\xab\xa2\x0b+XQ\xa0\xfa&" +
"\x03\x8a\x98\xf1Q\x86\xca\xf5\x93\xef\xf7\xe6\x02\xb7,u" +
"\x90W\x15\x9fY\x17\x01H\xe7Y\x82\x80\x0e5:\xd3" +
"\xaeu#E\x9b\xd6R\xef8\xdeb\x88\xa5\xf3\x18*" +
"\xd7\x09\xd8\x80\xef;\xdej\x16\xee\x09\xb7-\xcdM\xd3" +
"01\x19\x9e\xfcex\xb2\xe5\x09\xd0\xd0\xbb\xb8\xadj" +
"y\xa4l\x0f\xf4q\x05\x88\xd3\xabZ!\x84\x9ey^" +
"\xaf\x9a\xa0t\x8b\xee\x1d\xa5K\x92\xa1\xf2!\x01\x9dA" +
"\xa2r/7Q3rkT\xdd\xc80\x9e\x0dy~" +
"yS\xf7\xf1\xb4\xcb\x9c\x19\x8ec\xf220A\x04\xa6" +
"H\x11D\xcaEk\xa8\x07\x02\x02l\xed\x0f\xcbEP" +
"zwQN\xdd\xcbP\xd9\x1b9\xcfv\xaf\x8c\xd6\x8b" +
"X\x0ac\x00\xd2~\xe2\xcf^\x86\xcaAa\xa2j\xe0" +
"#\\\xb7\xbb\xb4A\x10\xb9\x15Zi\x89]\xda \x07" +
"f]n\x19\xaf\x9d\x16*F\xbfe\xe4\xb9\xcd\xbbx" +
"6\xafRf\x8ep\xef\xf72M\xfd\x8d\x9e\x9a\xd7}" +
"U9\xe6\xf1\x9by\xa2-\x92g\xad!\xb5\x03\x98\x17" +
".\x0e\x93O\xe4\xa1\xd2J[EU\xb7\xaa\xca\x8f|" +
"\xf1Ux%\xa6\x8a@a\xea\x05\xd5'\xf8\xfer\xcb" +
"Y\xf9\xf8\x89\xc6\xb9<\x8c3\x08sY\x18f b" +
"b `\x0c\xb0=\xeb\x0eX\x15k|\xbakK\xf8" +
"\x0a2\xe6*H\xffB\x8e~\x17C\x92\x0e\x81 5" +
"\x88\x8e\xbf~\xf4\xbf\x17\xab\xd4`|\xea\xf2u\x8bK" +
"A\xb4h\xc6H\x12-\x9b,\x89\xccI\xce\xdcm\xd1" +
"\x1c*\x9f\xb9\xbb\xc7\xc2t\xf1\xce\\\x00\xe9\xc0!\x00" +
"\xe5 C\xe5\x1b\x02\xb6{B\x11\x93a\x13\xaa\xcc{" +
"O\x01\xad2 \x9dU\xf3\xe1\x11\xec\x98\xbc\x98W\xb3" +
"\xbc\x1b\xcb\xa2\x0f\x10A@t\x93\xadP4\xb9e\xa1" +
"f\xe8JI\xcdk\xcc\x1e\x0d\x94\xbb^*\xf4\x9a|" +
"DC\xa3du\xda6/\x88E\xdb\xaa\xd2\xf5\xd3\x80" +
"\xc9\xaf\xc1\xae\xbe\x0ce\x1e\x89\xdf\x0e\x86\xca\xaa\x08L" +
"=t*\xdf\xc4PY\x1b\xc2\xa4|\x0f@Y\xcbP" +
"\xb9C\xc0D\xa9\xa4\x05'\x8f\x937\xb2\xee\xceCb" +
"\x8dZ\xa8<\x80z,\xa1\x8f\x17\x0c\x9b\xe7G=\xd6" +
"\xe6\xc2\xb8gZ7+\x0a\xbfwn\xfe\x7fR\xac\xb1" +
"\xa9\xae\x90\xed\x1eR\x15[\xd0:\xd9\x16,\x8e\x04\xe3" +
"\xaf{u\x7f\x18\x8c\xf8'|4(N\xbc@[\xeb" +
"#_\x8e\xa8\x13\xc4\x9bC\x9f\xa9\xeb\xf1d%\xcbM" +
"\xd0UFV\xcdWW\x19V\xb8\xa0\xbe\x9ei\x05\x89" +
"NM\xe9,\x1a\xba\xcb\xd3\xeb\xfd\xe1\xe5Q\\\x09\x90" +
"\xd9\x88\x0c3\xdb1\xc4I\xde\x8a\xcb\x012w\x91\xfd" +
"^\x0c\xa1\x92w`\x13@f\x0b\xd9\xef\xc7\xe0\xaa-" +
"\xef\xc2q\x80\xcc\xfdd~\x94\xdcc\xccMmy\x9f" +
";\xfc^\xb2\x1f${<\x96\xc28\x80|\x00\x17\x00" +
"d\x1e%\xfb\x11\xb2\xcf\x12R8\x0b@>\x8c\xc3\x00" +
"\x99'\xc9\xfe\x0c\xd9\xc5x\x0a\xdd.6\x9a\x00\x99\xbf" +
"&\xfb\xf7\xc9^\xd3\x98\xc2\x1a\x00\xf9\xa8k\x7f\x8e\xec" +
"?&{\xed\xdc\x14\xd6\x02\xc8?\xc2m\x00\x99\x1f\x92" +
"\xfd5\xb2\xd7a\x8ad\xb6|\x1c\xc7\x002\xaf\x91\xfd" +
"\x9f\xc8~\xc5\xac\x14^\x01 \xff\xdc]\xcfI\xb2\xff" +
"\x8a\xec\xf5\xb1\x14is\xf9\x97x\x08 \xf3+\xb2\xff" +
"\x07\xd9\x1b\xc4\x146\x00\xc8\xbfq\xe3:G\xf6\x1a\xa1" +
"\xe2z\xeb\xf3\xba\xe2\x0e\xcb\x0c+\xe0\x0c/\xd7*\x9c" +
"pC\xc5D\xd8L\x07\xc4\x04\xa0S4\x8c\xfc\x9a\x89" +
"\xf9\x92\xb0\xd5A\xcb\xbf/'\xc3^\" \x19\x03u" +
"\x08\x09C\xef\xc9\x05\x05\xad\xb2z\xfa+\xd1\xac\xce\x92" +
"m\x94\x8a\x90&F\xe6\x82\x1ab\x96\xf4\x15\xa6QX" +
"\x8b\xdc,h\xba\x9a\x9f\xa2\xaa\xd6\x82\x80\xb5P.`" +
"\xfe\xd8\x17/\xb1\x17\xbe\xfd\x07\xbcf\x17\xe2\xb5\xb8V" +
"\x1d\xac\x10\x1d\x0b\xa6\x10\x1d\x09=RD\xd3#j\xbe" +
"T\xad\xe9/Mh\xf6q+AZc\xaa{\x8e\xdf" +
"\x1d\xac(n\x17\x14\\\xeb\xaaU\x88\xab\xb8D\xbdJ" +
"q\x8d\x87\x97\x18?\xf6\xa5\xad\x91\x1b`^\xb5\xb9e" +
"w\x16\xb1\x98\xd7x\xee\xd3\xdcLD\x85IT\x8f\xcd" +
"\xe4\xe4\x9b\xa0\xff\xdc\xe01\xf2 B \x08\xe5\xe0g" +
"\x88\xf0 \xb7\xbd\x7f\xf5\xe8\x03\x86\xa7\xbc\xd0\xba\xac1" +
"\\9\xc8\xa6\xde\xa3\xb0\xdd;]\xb5=\x93\xeaN\xab" +
"\x10\xa37\x9b\x9a\x19\x8c:\x89\xc8\xf5/i\x91\x96\x1d" +
"\xa5\xc1\x06\x86\xcaP$\x0d8\x9d\xd59\x86J1\x94" +
"\x1d\x85\xbe\xb0_*1\xa1\xdc0\xa5\xf3\xbb\xc8P\xb9" +
"K\xc0\x84Z\xb2\x870\x19\xbe\xa7M\x00db#\x8f" +
"\xf2\xa1G\xcfq\xc0\x8d~zGN\xf5\xe0\xa1g\xba" +
"\xd7\xfb\xe9\x05\xef_\x13\xa7\xdc\xd2\xe0\xc5c\xba\xaa\xc2" +
"\xe7Q\x82\xa6&n7\xbaz\xdb\x7fSB\xff\xcd@" +
":\xbc\x09\x04\xe9/D\x0c\xdfI\xd0\x7f\x16\x91\x0e\x98" +
" H\xfbD\x14\x82g?\xf4\x9f\xf7\xa4]\xf7\x81 " +
"\xed\x10\x91\x05\xafv\xe8\xf7\xd1\xdbF\xeb\x10\x04\xe9n" +
"\x11c\xc1{)\xfa]x\xe9\xb3\xc3 H\x9a\x88\xf1" +
"\xe0A\x10\xfd\xe7!\xe9\xf6m H\xeb\xc2\x161\xb4" +
"{qt\xa0\xe3\xe7\x02\xa4\xddl\x98\xd80\xf6\xbc\x00" +
":\xd0\xf1/\x8b\xecB\xb7E\xd7\xcb\xefpB\"\xab" +
"\xda\xbc\x83\x14\xb8W\x10\xb1\\\x11\xa1\x03\x95\x18F\x1e" +
"!\"\xfd\xad\xcb\xea\xe9T\xe5\xcf%)\\\x7f\x94K" +
"\xac\xd7\x17i\xf5{\xe5\xa6\xdcG\x8f\x8c>\xecv\x80" +
"Qi\x14\xa6P\xf5\x17)\xbb\xde\xe2\xc3\xd4`\x9e\xdc" +
"\xbd:\x98\xe58\x15\xfc\x1f3TNFR\xff\x04\x19" +
"_a\xa8\xbc\x1e\x91\xbb\xa7\xa8\x1e\x9cd\xa8\xbc\x1b\xbe" +
"\x95\xbcu\x1f\x80\xf2.\xc3\xbe\x88z\x93\xfe\x9b\x1c\x7f" +
"G\x1a\xc7\xd5n\xe8i\xb78\xee\x01\xc8\xd4\x90\xf6I" +
"\xb9\xda-\xe6i7\x09\xfb\x012I\xb2\x7f(\xaa\xdd" +
"\xe6\xe2z\x80L#\xd9\xe7\xe1\xc4\xdb\xbfX2Cy" +
"\x9d7\x06Wi\xfa\xa4\x82\xc0\x7f\xbcA{\x85\xaa\xe5" +
"K&\x87\xca\xdbNOWD\"y\xaf:^\x1b6" +
"C\xe4\xcc\xa1\x15\xb4hg\xd0\x98\x99\xfa,\xcc\x1b\xa5" +
"\xdc@^5y\xce\xdd}\xa4r\xd1\xcb\xe2J\x0dF" +
"\xfe\xe6\x02 |\x1a\x8f\xa4\xc24N\xd8n\xd34L" +
"\xa8\xb8\xd6,\x0e\xaf5\xc1\xadf}x\xb1\x94\x84\x8e" +
"\xf2\xcd\xb2?\xbc\x8d\xa5\xb3j\xc9\xe2U\xf8\x00\xe3f" +
"\xd0\xb6\xb3\x86\x8cR>\xd7\xc7A\xb4\xcd\xd1\xaa\xcbd" +
"|\xba\xd5\x9ay5\xb3\xde\xad\x99\xfeS/\xfa/\xba" +
"\x922\x06\x82\xb4\x9aj\xa6\xff\xea\x88\xfe\xdf\x1cH\x9d" +
"\xe3 H\x9f\xa0\x9a\xe9\xbf\xb8\xa3\xff\x8a,\xb5\xbd\x08" +
"\x82\xd4\x16y\x01\xf3Q\xaaz\x01\xf3~H\xd8\x9a\xf7" +
"C\xf90\x16*Oc\xaae\xd1\x8eH\xcd\xe5\xb6\x9c" +
"\xda\xbd\x16\xd1\xe5\xbc\x17M\xfb}%\xf8\xeb\x9f\xff\x9b" +
"\xa6\xa0\x7f\xb6\xfeO\x00\x00\x00\xff\xff\xb4\x0bQ\xfc"
func init() {
schemas.Register(schema_db8274f9144abc7e,

View File

@ -0,0 +1,64 @@
package quic
import (
"context"
"fmt"
"io"
"net"
"time"
"zombiezen.com/go/capnproto2/rpc"
"github.com/google/uuid"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
// CloudflaredClient calls capnp rpc methods of SessionManager and ConfigurationManager.
type CloudflaredClient struct {
client pogs.CloudflaredServer_PogsClient
transport rpc.Transport
requestTimeout time.Duration
}
func NewCloudflaredClient(ctx context.Context, stream io.ReadWriteCloser, requestTimeout time.Duration) (*CloudflaredClient, error) {
n, err := stream.Write(rpcStreamProtocolSignature[:])
if err != nil {
return nil, err
}
if n != len(rpcStreamProtocolSignature) {
return nil, fmt.Errorf("expect to write %d bytes for RPC stream protocol signature, wrote %d", len(rpcStreamProtocolSignature), n)
}
transport := tunnelrpc.SafeTransport(stream)
conn := rpc.NewConn(transport)
client := pogs.NewCloudflaredServer_PogsClient(conn.Bootstrap(ctx), conn)
return &CloudflaredClient{
client: client,
transport: transport,
requestTimeout: requestTimeout,
}, nil
}
func (c *CloudflaredClient) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeIdleAfterHint time.Duration, traceContext string) (*pogs.RegisterUdpSessionResponse, error) {
ctx, cancel := context.WithTimeout(ctx, c.requestTimeout)
defer cancel()
return c.client.RegisterUdpSession(ctx, sessionID, dstIP, dstPort, closeIdleAfterHint, traceContext)
}
func (c *CloudflaredClient) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, message string) error {
ctx, cancel := context.WithTimeout(ctx, c.requestTimeout)
defer cancel()
return c.client.UnregisterUdpSession(ctx, sessionID, message)
}
func (c *CloudflaredClient) UpdateConfiguration(ctx context.Context, version int32, config []byte) (*pogs.UpdateConfigurationResponse, error) {
ctx, cancel := context.WithTimeout(ctx, c.requestTimeout)
defer cancel()
return c.client.UpdateConfiguration(ctx, version, config)
}
func (c *CloudflaredClient) Close() {
_ = c.client.Close()
_ = c.transport.Close()
}

View File

@ -0,0 +1,70 @@
package quic
import (
"context"
"fmt"
"io"
"time"
"zombiezen.com/go/capnproto2/rpc"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
// HandleRequestFunc wraps the proxied request from the upstream and also provides methods on the stream to
// handle the response back.
type HandleRequestFunc = func(ctx context.Context, stream *RequestServerStream) error
// CloudflaredServer provides a handler interface for a client to provide methods to handle the different types of
// requests that can be communicated by the stream.
type CloudflaredServer struct {
handleRequest HandleRequestFunc
sessionManager pogs.SessionManager
configManager pogs.ConfigurationManager
responseTimeout time.Duration
}
func NewCloudflaredServer(handleRequest HandleRequestFunc, sessionManager pogs.SessionManager, configManager pogs.ConfigurationManager, responseTimeout time.Duration) *CloudflaredServer {
return &CloudflaredServer{
handleRequest: handleRequest,
sessionManager: sessionManager,
configManager: configManager,
responseTimeout: responseTimeout,
}
}
// Serve executes the defined handlers in ServerStream on the provided stream if it is a proper RPC stream with the
// correct preamble protocol signature.
func (s *CloudflaredServer) Serve(ctx context.Context, stream io.ReadWriteCloser) error {
signature, err := determineProtocol(stream)
if err != nil {
return err
}
switch signature {
case dataStreamProtocolSignature:
return s.handleRequest(ctx, &RequestServerStream{stream})
case rpcStreamProtocolSignature:
return s.handleRPC(ctx, stream)
default:
return fmt.Errorf("unknown protocol %v", signature)
}
}
func (s *CloudflaredServer) handleRPC(ctx context.Context, stream io.ReadWriteCloser) error {
ctx, cancel := context.WithTimeout(ctx, s.responseTimeout)
defer cancel()
transport := tunnelrpc.SafeTransport(stream)
defer transport.Close()
main := pogs.CloudflaredServer_ServerToClient(s.sessionManager, s.configManager)
rpcConn := rpc.NewConn(transport, rpc.MainInterface(main.Client))
defer rpcConn.Close()
// We ignore the errors here because if cloudflared fails to handle a request, we will just move on.
select {
case <-rpcConn.Done():
case <-ctx.Done():
}
return nil
}

View File

@ -0,0 +1,78 @@
package quic
import (
"fmt"
"io"
)
// protocolSignature defines the first 6 bytes of the stream, which is used to distinguish the type of stream. It
// ensures whoever performs a handshake does not write data before writing the metadata.
type protocolSignature [6]byte
var (
// dataStreamProtocolSignature is a custom protocol signature for data stream
dataStreamProtocolSignature = protocolSignature{0x0A, 0x36, 0xCD, 0x12, 0xA1, 0x3E}
// rpcStreamProtocolSignature is a custom protocol signature for RPC stream
rpcStreamProtocolSignature = protocolSignature{0x52, 0xBB, 0x82, 0x5C, 0xDB, 0x65}
errDataStreamNotSupported = fmt.Errorf("data protocol not supported")
errRPCStreamNotSupported = fmt.Errorf("rpc protocol not supported")
)
type protocolVersion string
const (
protocolV1 protocolVersion = "01"
protocolVersionLength = 2
)
// determineProtocol reads the first 6 bytes from the stream to determine which protocol is spoken by the client.
// The protocols are magic byte arrays understood by both sides of the stream.
func determineProtocol(stream io.Reader) (protocolSignature, error) {
signature, err := readSignature(stream)
if err != nil {
return protocolSignature{}, err
}
switch signature {
case dataStreamProtocolSignature:
return dataStreamProtocolSignature, nil
case rpcStreamProtocolSignature:
return rpcStreamProtocolSignature, nil
default:
return protocolSignature{}, fmt.Errorf("unknown signature %v", signature)
}
}
func writeDataStreamPreamble(stream io.Writer) error {
if err := writeSignature(stream, dataStreamProtocolSignature); err != nil {
return err
}
return writeVersion(stream)
}
func writeVersion(stream io.Writer) error {
_, err := stream.Write([]byte(protocolV1)[:protocolVersionLength])
return err
}
func readVersion(stream io.Reader) (string, error) {
version := make([]byte, protocolVersionLength)
_, err := stream.Read(version)
return string(version), err
}
func readSignature(stream io.Reader) (protocolSignature, error) {
var signature protocolSignature
if _, err := io.ReadFull(stream, signature[:]); err != nil {
return protocolSignature{}, err
}
return signature, nil
}
func writeSignature(stream io.Writer, signature protocolSignature) error {
_, err := stream.Write(signature[:])
return err
}

View File

@ -0,0 +1,61 @@
package quic
import (
"fmt"
"io"
capnp "zombiezen.com/go/capnproto2"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
// RequestClientStream is a stream to provide requests to the server. This operation is typically driven by the edge service.
type RequestClientStream struct {
io.ReadWriteCloser
}
// WriteConnectRequestData writes requestMeta to a stream.
func (rcs *RequestClientStream) WriteConnectRequestData(dest string, connectionType pogs.ConnectionType, metadata ...pogs.Metadata) error {
connectRequest := &pogs.ConnectRequest{
Dest: dest,
Type: connectionType,
Metadata: metadata,
}
msg, err := connectRequest.ToPogs()
if err != nil {
return err
}
if err := writeDataStreamPreamble(rcs); err != nil {
return err
}
return capnp.NewEncoder(rcs).Encode(msg)
}
// ReadConnectResponseData reads the response from the rpc stream to a ConnectResponse.
func (rcs *RequestClientStream) ReadConnectResponseData() (*pogs.ConnectResponse, error) {
signature, err := determineProtocol(rcs)
if err != nil {
return nil, err
}
if signature != dataStreamProtocolSignature {
return nil, fmt.Errorf("wrong protocol signature %v", signature)
}
// This is a NO-OP for now. We could cause a branching if we wanted to use multiple versions.
if _, err := readVersion(rcs); err != nil {
return nil, err
}
msg, err := capnp.NewDecoder(rcs).Decode()
if err != nil {
return nil, err
}
r := &pogs.ConnectResponse{}
if err := r.FromPogs(msg); err != nil {
return nil, err
}
return r, nil
}

View File

@ -0,0 +1,57 @@
package quic
import (
"io"
capnp "zombiezen.com/go/capnproto2"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
// RequestServerStream is a stream to serve requests
type RequestServerStream struct {
io.ReadWriteCloser
}
// ReadConnectRequestData reads the handshake data from a QUIC stream.
func (rss *RequestServerStream) ReadConnectRequestData() (*pogs.ConnectRequest, error) {
// This is a NO-OP for now. We could cause a branching if we wanted to use multiple versions.
if _, err := readVersion(rss); err != nil {
return nil, err
}
msg, err := capnp.NewDecoder(rss).Decode()
if err != nil {
return nil, err
}
r := &pogs.ConnectRequest{}
if err := r.FromPogs(msg); err != nil {
return nil, err
}
return r, nil
}
// WriteConnectResponseData writes response to a QUIC stream.
func (rss *RequestServerStream) WriteConnectResponseData(respErr error, metadata ...pogs.Metadata) error {
var connectResponse *pogs.ConnectResponse
if respErr != nil {
connectResponse = &pogs.ConnectResponse{
Error: respErr.Error(),
}
} else {
connectResponse = &pogs.ConnectResponse{
Metadata: metadata,
}
}
msg, err := connectResponse.ToPogs()
if err != nil {
return err
}
if err := writeDataStreamPreamble(rss); err != nil {
return err
}
return capnp.NewEncoder(rss).Encode(msg)
}

View File

@ -11,11 +11,10 @@ import (
"time"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
const (
@ -26,14 +25,14 @@ func TestConnectRequestData(t *testing.T) {
var tests = []struct {
name string
hostname string
connectionType ConnectionType
metadata []Metadata
connectionType pogs.ConnectionType
metadata []pogs.Metadata
}{
{
name: "Signature verified and request metadata is unmarshaled and read correctly",
hostname: "tunnel.com",
connectionType: ConnectionTypeHTTP,
metadata: []Metadata{
connectionType: pogs.ConnectionTypeHTTP,
metadata: []pogs.Metadata{
{
Key: "key",
Val: "1234",
@ -47,10 +46,10 @@ func TestConnectRequestData(t *testing.T) {
reqClientStream := RequestClientStream{noopCloser{b}}
err := reqClientStream.WriteConnectRequestData(test.hostname, test.connectionType, test.metadata...)
require.NoError(t, err)
protocol, err := DetermineProtocol(b)
require.NoError(t, err)
reqServerStream, err := NewRequestServerStream(noopCloser{b}, protocol)
protocol, err := determineProtocol(b)
require.NoError(t, err)
require.Equal(t, dataStreamProtocolSignature, protocol)
reqServerStream := RequestServerStream{&noopCloser{b}}
reqMeta, err := reqServerStream.ReadConnectRequestData()
require.NoError(t, err)
@ -66,11 +65,11 @@ func TestConnectResponseMeta(t *testing.T) {
var tests = []struct {
name string
err error
metadata []Metadata
metadata []pogs.Metadata
}{
{
name: "Signature verified and response metadata is unmarshaled and read correctly",
metadata: []Metadata{
metadata: []pogs.Metadata{
{
Key: "key",
Val: "1234",
@ -80,7 +79,7 @@ func TestConnectResponseMeta(t *testing.T) {
{
name: "If error is not empty, other fields should be blank",
err: errors.New("something happened"),
metadata: []Metadata{
metadata: []pogs.Metadata{
{
Key: "key",
Val: "1234",
@ -142,22 +141,18 @@ func TestRegisterUdpSession(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logger := zerolog.Nop()
clientStream, serverStream := newMockRPCStreams()
sessionRegisteredChan := make(chan struct{})
go func() {
protocol, err := DetermineProtocol(serverStream)
assert.NoError(t, err)
rpcServerStream, err := NewRPCServerStream(serverStream, protocol)
assert.NoError(t, err)
err = rpcServerStream.Serve(test.sessionRPCServer, nil, &logger)
ss := NewCloudflaredServer(nil, test.sessionRPCServer, nil, 10*time.Second)
err := ss.Serve(context.Background(), serverStream)
assert.NoError(t, err)
serverStream.Close()
close(sessionRegisteredChan)
}()
rpcClientStream, err := NewRPCClientStream(context.Background(), clientStream, 5*time.Second, &logger)
rpcClientStream, err := NewCloudflaredClient(context.Background(), clientStream, 5*time.Second)
assert.NoError(t, err)
reg, err := rpcClientStream.RegisterUdpSession(context.Background(), test.sessionRPCServer.sessionID, test.sessionRPCServer.dstIP, test.sessionRPCServer.dstPort, testCloseIdleAfterHint, test.sessionRPCServer.traceContext)
@ -192,14 +187,10 @@ func TestManageConfiguration(t *testing.T) {
config: config,
}
logger := zerolog.Nop()
updatedChan := make(chan struct{})
go func() {
protocol, err := DetermineProtocol(serverStream)
assert.NoError(t, err)
rpcServerStream, err := NewRPCServerStream(serverStream, protocol)
assert.NoError(t, err)
err = rpcServerStream.Serve(nil, configRPCServer, &logger)
server := NewCloudflaredServer(nil, nil, configRPCServer, 10*time.Second)
err := server.Serve(context.Background(), serverStream)
assert.NoError(t, err)
serverStream.Close()
@ -208,7 +199,7 @@ func TestManageConfiguration(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
rpcClientStream, err := NewRPCClientStream(ctx, clientStream, 5*time.Second, &logger)
rpcClientStream, err := NewCloudflaredClient(ctx, clientStream, 5*time.Second)
assert.NoError(t, err)
result, err := rpcClientStream.UpdateConfiguration(ctx, version, config)
@ -230,7 +221,7 @@ type mockSessionRPCServer struct {
traceContext string
}
func (s mockSessionRPCServer) RegisterUdpSession(_ context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeIdleAfter time.Duration, traceContext string) (*tunnelpogs.RegisterUdpSessionResponse, error) {
func (s mockSessionRPCServer) RegisterUdpSession(_ context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeIdleAfter time.Duration, traceContext string) (*pogs.RegisterUdpSessionResponse, error) {
if s.sessionID != sessionID {
return nil, fmt.Errorf("expect session ID %s, got %s", s.sessionID, sessionID)
}
@ -246,7 +237,7 @@ func (s mockSessionRPCServer) RegisterUdpSession(_ context.Context, sessionID uu
if s.traceContext != traceContext {
return nil, fmt.Errorf("expect traceContext %s, got %s", s.traceContext, traceContext)
}
return &tunnelpogs.RegisterUdpSessionResponse{}, nil
return &pogs.RegisterUdpSessionResponse{}, nil
}
func (s mockSessionRPCServer) UnregisterUdpSession(_ context.Context, sessionID uuid.UUID, message string) error {
@ -264,18 +255,18 @@ type mockConfigRPCServer struct {
config []byte
}
func (s mockConfigRPCServer) UpdateConfiguration(_ context.Context, version int32, config []byte) *tunnelpogs.UpdateConfigurationResponse {
func (s mockConfigRPCServer) UpdateConfiguration(_ context.Context, version int32, config []byte) *pogs.UpdateConfigurationResponse {
if s.version != version {
return &tunnelpogs.UpdateConfigurationResponse{
return &pogs.UpdateConfigurationResponse{
Err: fmt.Errorf("expect version %d, got %d", s.version, version),
}
}
if !bytes.Equal(s.config, config) {
return &tunnelpogs.UpdateConfigurationResponse{
return &pogs.UpdateConfigurationResponse{
Err: fmt.Errorf("expect config %v, got %v", s.config, config),
}
}
return &tunnelpogs.UpdateConfigurationResponse{LastAppliedVersion: version}
return &pogs.UpdateConfigurationResponse{LastAppliedVersion: version}
}
type mockRPCStream struct {

View File

@ -0,0 +1,56 @@
package quic
import (
"context"
"fmt"
"io"
"net"
"time"
"github.com/google/uuid"
"zombiezen.com/go/capnproto2/rpc"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
// SessionClient calls capnp rpc methods of SessionManager.
type SessionClient struct {
client pogs.SessionManager_PogsClient
transport rpc.Transport
requestTimeout time.Duration
}
func NewSessionClient(ctx context.Context, stream io.ReadWriteCloser, requestTimeout time.Duration) (*SessionClient, error) {
n, err := stream.Write(rpcStreamProtocolSignature[:])
if err != nil {
return nil, err
}
if n != len(rpcStreamProtocolSignature) {
return nil, fmt.Errorf("expect to write %d bytes for RPC stream protocol signature, wrote %d", len(rpcStreamProtocolSignature), n)
}
transport := tunnelrpc.SafeTransport(stream)
conn := rpc.NewConn(transport)
return &SessionClient{
client: pogs.NewSessionManager_PogsClient(conn.Bootstrap(ctx), conn),
transport: transport,
requestTimeout: requestTimeout,
}, nil
}
func (c *SessionClient) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeIdleAfterHint time.Duration, traceContext string) (*pogs.RegisterUdpSessionResponse, error) {
ctx, cancel := context.WithTimeout(ctx, c.requestTimeout)
defer cancel()
return c.client.RegisterUdpSession(ctx, sessionID, dstIP, dstPort, closeIdleAfterHint, traceContext)
}
func (c *SessionClient) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, message string) error {
ctx, cancel := context.WithTimeout(ctx, c.requestTimeout)
defer cancel()
return c.client.UnregisterUdpSession(ctx, sessionID, message)
}
func (c *SessionClient) Close() {
_ = c.client.Close()
_ = c.transport.Close()
}

View File

@ -0,0 +1,60 @@
package quic
import (
"context"
"fmt"
"io"
"time"
"zombiezen.com/go/capnproto2/rpc"
"github.com/cloudflare/cloudflared/tunnelrpc"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
// SessionManagerServer handles streams with the SessionManager RPCs.
type SessionManagerServer struct {
sessionManager pogs.SessionManager
responseTimeout time.Duration
}
func NewSessionManagerServer(sessionManager pogs.SessionManager, responseTimeout time.Duration) *SessionManagerServer {
return &SessionManagerServer{
sessionManager: sessionManager,
responseTimeout: responseTimeout,
}
}
func (s *SessionManagerServer) Serve(ctx context.Context, stream io.ReadWriteCloser) error {
signature, err := determineProtocol(stream)
if err != nil {
return err
}
switch signature {
case rpcStreamProtocolSignature:
break
case dataStreamProtocolSignature:
return errDataStreamNotSupported
default:
return fmt.Errorf("unknown protocol %v", signature)
}
// Every new quic.Stream request aligns to a new RPC request, this is why there is a timeout for the server-side
// of the RPC request.
ctx, cancel := context.WithTimeout(ctx, s.responseTimeout)
defer cancel()
transport := tunnelrpc.SafeTransport(stream)
defer transport.Close()
main := pogs.SessionManager_ServerToClient(s.sessionManager)
rpcConn := rpc.NewConn(transport, rpc.MainInterface(main.Client))
defer rpcConn.Close()
select {
case <-rpcConn.Done():
return rpcConn.Err()
case <-ctx.Done():
return ctx.Err()
}
}

69
tunnelrpc/utils.go Normal file
View File

@ -0,0 +1,69 @@
package tunnelrpc
import (
"io"
"time"
"github.com/pkg/errors"
"zombiezen.com/go/capnproto2/rpc"
)
const (
// These default values are here so that we give some time for the underlying connection/stream
// to recover in the face of what we believe to be temporarily errors.
// We don't want to be too aggressive, as the end result of giving a final error (non-temporary)
// will result in the connection to be dropped.
// In turn, the other side will probably reconnect, which will put again more pressure in the overall system.
// So, the best solution is to give it some conservative time to recover.
defaultSleepBetweenTemporaryError = 500 * time.Millisecond
defaultMaxRetries = 3
)
type readWriterSafeTemporaryErrorCloser struct {
io.ReadWriteCloser
retries int
sleepBetweenRetries time.Duration
maxRetries int
}
func (r *readWriterSafeTemporaryErrorCloser) Read(p []byte) (n int, err error) {
n, err = r.ReadWriteCloser.Read(p)
// if there was a failure reading from the read closer, and the error is temporary, try again in some seconds
// otherwise, just fail without a temporary error.
if n == 0 && err != nil && isTemporaryError(err) {
if r.retries >= r.maxRetries {
return 0, errors.Wrap(err, "failed read from capnproto ReaderWriter after multiple temporary errors")
} else {
r.retries += 1
// sleep for some time to prevent quick read loops that cause exhaustion of CPU resources
time.Sleep(r.sleepBetweenRetries)
}
}
if err == nil {
r.retries = 0
}
return n, err
}
func SafeTransport(rw io.ReadWriteCloser) rpc.Transport {
return rpc.StreamTransport(&readWriterSafeTemporaryErrorCloser{
ReadWriteCloser: rw,
maxRetries: defaultMaxRetries,
sleepBetweenRetries: defaultSleepBetweenTemporaryError,
})
}
// isTemporaryError reports whether e has a Temporary() method that
// returns true.
func isTemporaryError(e error) bool {
type temp interface {
Temporary() bool
}
t, ok := e.(temp)
return ok && t.Temporary()
}

View File

@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
| Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
package.
alternative API, with [interoperability](#slog-interoperability) provided by
some conversion functions.
### Inspiration
@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller.
slog API.
## Using a `logr.Sink` as backend for slog
### Using a `logr.LogSink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
implementing both the normal logr interface(s) and `SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls.
Such an implementation should also support values that implement specific
@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr
### Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the
other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
- Stack unwinding is done by the `SlogSink` and the resulting program
counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value.
@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient.
## Context support for slog
### Context support for slog
Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
to fill this gap:
slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
used to fill this gap. They store and retrieve a `slog.Logger` pointer
under the same context key that is also used by `NewContext` and
`FromContext` for `logr.Logger` value.
func HandlerFromContext(ctx context.Context) slog.Handler {
logger, err := logr.FromContext(ctx)
if err == nil {
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
automatically convert the `slog.Logger` to a
`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
return logr.NewContext(ctx, slogr.NewLogr(handler))
}
With this approach, binaries which use either slog or logr are as efficient as
possible with no unnecessary allocations. This is also why the API stores a
`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
on retrieval would need to allocate one.
The downside is that storing and retrieving a `slog.Handler` needs more
allocations compared to using a `logr.Logger`. Therefore the recommendation is
to use the `logr.Logger` API in code which uses contextual logging.
The downside is that switching back and forth needs more allocations. Because
logr is the API that is already in use by different packages, in particular
Kubernetes, the recommendation is to use the `logr.Logger` API in code which
uses contextual logging.
An alternative to adding values to a logger and storing that logger in the
context is to store the values in the context and to configure a logging
backend to extract those values when emitting log entries. This only works when
log calls are passed the context, which is not supported by the logr API.
With the slog API, it is possible, but not
required. https://github.com/veqryn/slog-context is a package for slog which
provides additional support code for this approach. It also contains wrappers
for the context functions in logr, so developers who prefer to not use the logr
APIs directly can use those instead and the resulting code will still be
interoperable with logr.
## FAQ

33
vendor/github.com/go-logr/logr/context.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
// the value is always a Logger value. With Go >= 1.21, the value can be a
// Logger value or a slog.Logger pointer.
type contextKey struct{}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}

49
vendor/github.com/go-logr/logr/context_noslog.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
//go:build !go1.21
// +build !go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

83
vendor/github.com/go-logr/logr/context_slog.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"fmt"
"log/slog"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
v := ctx.Value(contextKey{})
if v == nil {
return Logger{}, notFoundError{}
}
switch v := v.(type) {
case Logger:
return v, nil
case *slog.Logger:
return FromSlogHandler(v.Handler()), nil
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
v := ctx.Value(contextKey{})
if v == nil {
return nil
}
switch v := v.(type) {
case Logger:
return slog.New(ToSlogHandler(v))
case *slog.Logger:
return v
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if logger, err := FromContext(ctx); err == nil {
return logger
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
// provided slog.Logger.
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

View File

@ -100,6 +100,11 @@ type Options struct {
// details, see docs for Go's time.Layout.
TimestampFormat string
// LogInfoLevel tells funcr what key to use to log the info level.
// If not specified, the info level will be logged as "level".
// If this is set to "", the info level will not be logged at all.
LogInfoLevel *string
// Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded.
@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth
}
if opts.LogInfoLevel == nil {
opts.LogInfoLevel = new(string)
*opts.LogInfoLevel = "level"
}
f := Formatter{
outputFormat: outfmt,
prefix: "",
@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
outputFormat outputFormat
prefix string
values []any
valuesStr string
depth int
opts *Options
outputFormat outputFormat
prefix string
values []any
valuesStr string
parentValuesStr string
depth int
opts *Options
group string // for slog groups
groupDepth int
}
// outputFormat indicates which outputFormat to use.
@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
buf.WriteByte('{')
buf.WriteByte('{') // for the whole line
}
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
if len(f.valuesStr) > 0 {
if f.parentValuesStr != "" {
if continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
buf.WriteByte(' ')
}
buf.WriteByte(f.comma())
}
buf.WriteString(f.parentValuesStr)
continuing = true
buf.WriteString(f.valuesStr)
}
groupDepth := f.groupDepth
if f.group != "" {
if f.valuesStr != "" || len(args) != 0 {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
} else {
// The group was empty
groupDepth--
}
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
continuing = true
}
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
if f.outputFormat == outputJSON {
buf.WriteByte('}')
for i := 0; i < groupDepth; i++ {
buf.WriteByte('}') // for the groups
}
if f.outputFormat == outputJSON {
buf.WriteByte('}') // for the whole line
}
return buf.String()
}
@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
copied := false
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
if !copied {
newList := make([]any, len(kvList))
copy(newList, kvList)
kvList = newList
copied = true
}
k = f.nonStringKey(kvList[i])
kvList[i] = k
}
@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if i > 0 || continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
buf.WriteByte(f.comma())
} else {
// In theory the format could be something we don't understand. In
// practice, we control it, so it won't be.
@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
}
}
if escapeKeys {
buf.WriteString(prettyString(k))
} else {
// this is faster
buf.WriteByte('"')
buf.WriteString(k)
buf.WriteByte('"')
}
if f.outputFormat == outputJSON {
buf.WriteByte(':')
} else {
buf.WriteByte('=')
}
buf.WriteString(f.quoted(k, escapeKeys))
buf.WriteByte(f.colon())
buf.WriteString(f.pretty(v))
}
return kvList
}
func (f Formatter) quoted(str string, escape bool) string {
if escape {
return prettyString(str)
}
// this is faster
return `"` + str + `"`
}
func (f Formatter) comma() byte {
if f.outputFormat == outputJSON {
return ','
}
return ' '
}
func (f Formatter) colon() byte {
if f.outputFormat == outputJSON {
return ':'
}
return '='
}
func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0)
}
@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
}
for i := 0; i < len(v); i += 2 {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping
buf.WriteString(prettyString(k))
buf.WriteByte(':')
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
}
if flags&flagRawStruct == 0 {
@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
continue
}
if printComma {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
printComma = true // if we got here, we are rendering a field
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
name = fld.Name
}
// field names can't contain characters which need escaping
buf.WriteByte('"')
buf.WriteString(name)
buf.WriteByte('"')
buf.WriteByte(':')
buf.WriteString(f.quoted(name, false))
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
}
if flags&flagRawStruct == 0 {
@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
i := 0
for it.Next() {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
// If a map key supports TextMarshaler, use it.
keystr := ""
@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
}
}
buf.WriteString(keystr)
buf.WriteByte(':')
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++
}
@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any {
return kvList
}
// startGroup opens a new group scope (basically a sub-struct), which locks all
// the current saved values and starts them anew. This is needed to satisfy
// slog.
func (f *Formatter) startGroup(group string) {
// Unnamed groups are just inlined.
if group == "" {
return
}
// Any saved values can no longer be changed.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
continuing := false
if f.parentValuesStr != "" {
buf.WriteString(f.parentValuesStr)
continuing = true
}
if f.group != "" && f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
}
// NOTE: We don't close the scope here - that's done later, when a log line
// is actually rendered (because we have N scopes to close).
f.parentValuesStr = buf.String()
// Start collecting new values.
f.group = group
f.groupDepth++
f.valuesStr = ""
f.values = nil
}
// Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved.
@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller())
}
args = append(args, "level", level, "msg", msg)
if key := *f.opts.LogInfoLevel; key != "" {
args = append(args, key, level)
}
args = append(args, "msg", msg)
return prefix, f.render(args, kvList)
}

105
vendor/github.com/go-logr/logr/funcr/slogsink.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package funcr
import (
"context"
"log/slog"
"github.com/go-logr/logr"
)
var _ logr.SlogSink = &fnlogger{}
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, kvList)
return true
})
if record.Level >= slog.LevelError {
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
}
return nil
}
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, kvList)
}
l.AddValues(kvList)
return &l
}
func (l fnlogger) WithGroup(name string) logr.SlogSink {
l.startGroup(name)
return &l
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, grpKVs)
}
if attr.Key == "" {
// slog says we have to inline these
kvList = append(kvList, grpKVs...)
} else {
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
}
} else if attr.Key != "" {
kvList = append(kvList, attr.Key, attrVal.Any())
}
return kvList
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l fnlogger) levelFromSlog(level slog.Level) int {
result := -level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

View File

@ -207,10 +207,6 @@ limitations under the License.
// those.
package logr
import (
"context"
)
// New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users. Passing a nil sink will create
// a Logger which discards all log lines.
@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
return l.sink == nil
}
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// RuntimeInfo holds information that the logr "core" library knows which
// LogSinks might want to know.
type RuntimeInfo struct {

192
vendor/github.com/go-logr/logr/sloghandler.go generated vendored Normal file
View File

@ -0,0 +1,192 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
type slogHandler struct {
// May be nil, in which case all logs get discarded.
sink LogSink
// Non-nil if sink is non-nil and implements SlogSink.
slogSink SlogSink
// groupPrefix collects values from WithGroup calls. It gets added as
// prefix to value keys when handling a log record.
groupPrefix string
// levelBias can be set when constructing the handler to influence the
// slog.Level of log records. A positive levelBias reduces the
// slog.Level value. slog has no API to influence this value after the
// handler got created, so it can only be set indirectly through
// Logger.V.
levelBias slog.Level
}
var _ slog.Handler = &slogHandler{}
// groupSeparator is used to concatenate WithGroup names and attribute keys.
const groupSeparator = "."
// GetLevel is used for black box unit testing.
func (l *slogHandler) GetLevel() slog.Level {
return l.levelBias
}
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
}
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
if l.slogSink != nil {
// Only adjust verbosity level of log entries < slog.LevelError.
if record.Level < slog.LevelError {
record.Level -= l.levelBias
}
return l.slogSink.Handle(ctx, record)
}
// No need to check for nil sink here because Handle will only be called
// when Enabled returned true.
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
return true
})
if record.Level >= slog.LevelError {
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
}
return nil
}
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
// are called by Handle, code in slog gets skipped.
//
// This offset currently (Go 1.21.0) works for calls through
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
// chain won't change. Wrapping the handler will also break unwinding. It's
// still better than not adjusting at all....
//
// This cannot be done when constructing the handler because FromSlogHandler needs
// access to the original sink without this adjustment. A second copy would
// work, but then WithAttrs would have to be called for both of them.
func (l *slogHandler) sinkWithCallDepth() LogSink {
if sink, ok := l.sink.(CallDepthLogSink); ok {
return sink.WithCallDepth(2)
}
return l.sink
}
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
if l.sink == nil || len(attrs) == 0 {
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithAttrs(attrs)
clone.sink = clone.slogSink
} else {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
}
clone.sink = l.sink.WithValues(kvList...)
}
return &clone
}
func (l *slogHandler) WithGroup(name string) slog.Handler {
if l.sink == nil {
return l
}
if name == "" {
// slog says to inline empty groups
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithGroup(name)
clone.sink = clone.slogSink
} else {
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
}
return &clone
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
prefix := groupPrefix
if attr.Key != "" {
prefix = addPrefix(groupPrefix, attr.Key)
}
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, prefix, grpKVs)
}
kvList = append(kvList, grpKVs...)
} else if attr.Key != "" {
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
}
return kvList
}
func addPrefix(prefix, name string) string {
if prefix == "" {
return name
}
if name == "" {
return prefix
}
return prefix + groupSeparator + name
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l *slogHandler) levelFromSlog(level slog.Level) int {
result := -level
result += l.levelBias // in case the original Logger had a V level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

100
vendor/github.com/go-logr/logr/slogr.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
// FromSlogHandler returns a Logger which writes to the slog.Handler.
//
// The logr verbosity level is mapped to slog levels such that V(0) becomes
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
func FromSlogHandler(handler slog.Handler) Logger {
if handler, ok := handler.(*slogHandler); ok {
if handler.sink == nil {
return Discard()
}
return New(handler.sink).V(int(handler.levelBias))
}
return New(&slogSink{handler: handler})
}
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
//
// The returned logger writes all records with level >= slog.LevelError as
// error log entries with LogSink.Error, regardless of the verbosity level of
// the Logger:
//
// logger := <some Logger with 0 as verbosity level>
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
//
// The level of all other records gets reduced by the verbosity
// level of the Logger and the result is negated. If it happens
// to be negative, then it gets replaced by zero because a LogSink
// is not expected to handled negative levels:
//
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
func ToSlogHandler(logger Logger) slog.Handler {
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
return sink.handler
}
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
if slogSink, ok := handler.sink.(SlogSink); ok {
handler.slogSink = slogSink
}
return handler
}
// SlogSink is an optional interface that a LogSink can implement to support
// logging through the slog.Logger or slog.Handler APIs better. It then should
// also support special slog values like slog.Group. When used as a
// slog.Handler, the advantages are:
//
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
// as intended by slog
// - proper grouping of key/value pairs via WithGroup
// - verbosity levels > slog.LevelInfo can be recorded
// - less overhead
//
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
// well. Developers can pick whatever API suits them better and/or mix
// packages which use either API in the same binary with a common logging
// implementation.
//
// This interface is necessary because the type implementing the LogSink
// interface cannot also implement the slog.Handler interface due to the
// different prototype of the common Enabled method.
//
// An implementation could support both interfaces in two different types, but then
// additional interfaces would be needed to convert between those types in FromSlogHandler
// and ToSlogHandler.
type SlogSink interface {
LogSink
Handle(ctx context.Context, record slog.Record) error
WithAttrs(attrs []slog.Attr) SlogSink
WithGroup(name string) SlogSink
}

120
vendor/github.com/go-logr/logr/slogsink.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
"runtime"
"time"
)
var (
_ LogSink = &slogSink{}
_ CallDepthLogSink = &slogSink{}
_ Underlier = &slogSink{}
)
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
type Underlier interface {
// GetUnderlying returns the Handler used by the LogSink.
GetUnderlying() slog.Handler
}
const (
// nameKey is used to log the `WithName` values as an additional attribute.
nameKey = "logger"
// errKey is used to log the error parameter of Error as an additional attribute.
errKey = "err"
)
type slogSink struct {
callDepth int
name string
handler slog.Handler
}
func (l *slogSink) Init(info RuntimeInfo) {
l.callDepth = info.CallDepth
}
func (l *slogSink) GetUnderlying() slog.Handler {
return l.handler
}
func (l *slogSink) WithCallDepth(depth int) LogSink {
newLogger := *l
newLogger.callDepth += depth
return &newLogger
}
func (l *slogSink) Enabled(level int) bool {
return l.handler.Enabled(context.Background(), slog.Level(-level))
}
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
l.log(nil, msg, slog.Level(-level), kvList...)
}
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
l.log(err, msg, slog.LevelError, kvList...)
}
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
var pcs [1]uintptr
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
runtime.Callers(3+l.callDepth, pcs[:])
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
if l.name != "" {
record.AddAttrs(slog.String(nameKey, l.name))
}
if err != nil {
record.AddAttrs(slog.Any(errKey, err))
}
record.Add(kvList...)
_ = l.handler.Handle(context.Background(), record)
}
func (l slogSink) WithName(name string) LogSink {
if l.name != "" {
l.name += "/"
}
l.name += name
return &l
}
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
return &l
}
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
// We don't need the record itself, only its Add method.
record := slog.NewRecord(time.Time{}, 0, "", 0)
record.Add(kvList...)
attrs := make([]slog.Attr, 0, record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
attrs = append(attrs, attr)
return true
})
return attrs
}

View File

@ -1,530 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonpb
import (
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapJSONUnmarshalV2 = false
// UnmarshalNext unmarshals the next JSON object from d into m.
func UnmarshalNext(d *json.Decoder, m proto.Message) error {
return new(Unmarshaler).UnmarshalNext(d, m)
}
// Unmarshal unmarshals a JSON object from r into m.
func Unmarshal(r io.Reader, m proto.Message) error {
return new(Unmarshaler).Unmarshal(r, m)
}
// UnmarshalString unmarshals a JSON object from s into m.
func UnmarshalString(s string, m proto.Message) error {
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
}
// Unmarshaler is a configurable object for converting from a JSON
// representation to a protocol buffer object.
type Unmarshaler struct {
// AllowUnknownFields specifies whether to allow messages to contain
// unknown JSON fields, as opposed to failing to unmarshal.
AllowUnknownFields bool
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
// If unset, the global registry is used by default.
AnyResolver AnyResolver
}
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
// they are unmarshaled from JSON. Messages that implement this should also
// implement JSONPBMarshaler so that the custom format can be produced.
//
// The JSON unmarshaling must follow the JSON to proto specification:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.
type JSONPBUnmarshaler interface {
UnmarshalJSONPB(*Unmarshaler, []byte) error
}
// Unmarshal unmarshals a JSON object from r into m.
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
return u.UnmarshalNext(json.NewDecoder(r), m)
}
// UnmarshalNext unmarshals the next JSON object from d into m.
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
if m == nil {
return errors.New("invalid nil message")
}
// Parse the next JSON object from the stream.
raw := json.RawMessage{}
if err := d.Decode(&raw); err != nil {
return err
}
// Check for custom unmarshalers first since they may not properly
// implement protobuf reflection that the logic below relies on.
if jsu, ok := m.(JSONPBUnmarshaler); ok {
return jsu.UnmarshalJSONPB(u, raw)
}
mr := proto.MessageReflect(m)
// NOTE: For historical reasons, a top-level null is treated as a noop.
// This is incorrect, but kept for compatibility.
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
return nil
}
if wrapJSONUnmarshalV2 {
// NOTE: If input message is non-empty, we need to preserve merge semantics
// of the old jsonpb implementation. These semantics are not supported by
// the protobuf JSON specification.
isEmpty := true
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
isEmpty = false // at least one iteration implies non-empty
return false
})
if !isEmpty {
// Perform unmarshaling into a newly allocated, empty message.
mr = mr.New()
// Use a defer to copy all unmarshaled fields into the original message.
dst := proto.MessageReflect(m)
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
dst.Set(fd, v)
return true
})
}
// Unmarshal using the v2 JSON unmarshaler.
opts := protojson.UnmarshalOptions{
DiscardUnknown: u.AllowUnknownFields,
}
if u.AnyResolver != nil {
opts.Resolver = anyResolver{u.AnyResolver}
}
return opts.Unmarshal(raw, mr.Interface())
} else {
if err := u.unmarshalMessage(mr, raw); err != nil {
return err
}
return protoV2.CheckInitialized(mr.Interface())
}
}
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
md := m.Descriptor()
fds := md.Fields()
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
return jsu.UnmarshalJSONPB(u, in)
}
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
return nil
}
switch wellKnownType(md.FullName()) {
case "Any":
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return err
}
rawTypeURL, ok := jsonObject["@type"]
if !ok {
return errors.New("Any JSON doesn't have '@type'")
}
typeURL, err := unquoteString(string(rawTypeURL))
if err != nil {
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
}
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
var m2 protoreflect.Message
if u.AnyResolver != nil {
mi, err := u.AnyResolver.Resolve(typeURL)
if err != nil {
return err
}
m2 = proto.MessageReflect(mi)
} else {
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
if err != nil {
if err == protoregistry.NotFound {
return fmt.Errorf("could not resolve Any message type: %v", typeURL)
}
return err
}
m2 = mt.New()
}
if wellKnownType(m2.Descriptor().FullName()) != "" {
rawValue, ok := jsonObject["value"]
if !ok {
return errors.New("Any JSON doesn't have 'value'")
}
if err := u.unmarshalMessage(m2, rawValue); err != nil {
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
}
} else {
delete(jsonObject, "@type")
rawJSON, err := json.Marshal(jsonObject)
if err != nil {
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
}
if err = u.unmarshalMessage(m2, rawJSON); err != nil {
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
}
}
rawWire, err := protoV2.Marshal(m2.Interface())
if err != nil {
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
}
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
return nil
case "BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue":
fd := fds.ByNumber(1)
v, err := u.unmarshalValue(m.NewField(fd), in, fd)
if err != nil {
return err
}
m.Set(fd, v)
return nil
case "Duration":
v, err := unquoteString(string(in))
if err != nil {
return err
}
d, err := time.ParseDuration(v)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
sec := d.Nanoseconds() / 1e9
nsec := d.Nanoseconds() % 1e9
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
return nil
case "Timestamp":
v, err := unquoteString(string(in))
if err != nil {
return err
}
t, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
sec := t.Unix()
nsec := t.Nanosecond()
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
return nil
case "Value":
switch {
case string(in) == "null":
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
case string(in) == "true":
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
case string(in) == "false":
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
case hasPrefixAndSuffix('"', in, '"'):
s, err := unquoteString(string(in))
if err != nil {
return fmt.Errorf("unrecognized type for Value %q", in)
}
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
case hasPrefixAndSuffix('[', in, ']'):
v := m.Mutable(fds.ByNumber(6))
return u.unmarshalMessage(v.Message(), in)
case hasPrefixAndSuffix('{', in, '}'):
v := m.Mutable(fds.ByNumber(5))
return u.unmarshalMessage(v.Message(), in)
default:
f, err := strconv.ParseFloat(string(in), 0)
if err != nil {
return fmt.Errorf("unrecognized type for Value %q", in)
}
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
}
return nil
case "ListValue":
var jsonArray []json.RawMessage
if err := json.Unmarshal(in, &jsonArray); err != nil {
return fmt.Errorf("bad ListValue: %v", err)
}
lv := m.Mutable(fds.ByNumber(1)).List()
for _, raw := range jsonArray {
ve := lv.NewElement()
if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
return err
}
lv.Append(ve)
}
return nil
case "Struct":
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return fmt.Errorf("bad StructValue: %v", err)
}
mv := m.Mutable(fds.ByNumber(1)).Map()
for key, raw := range jsonObject {
kv := protoreflect.ValueOf(key).MapKey()
vv := mv.NewValue()
if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
}
mv.Set(kv, vv)
}
return nil
}
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return err
}
// Handle known fields.
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if fd.IsWeak() && fd.Message().IsPlaceholder() {
continue // weak reference is not linked in
}
// Search for any raw JSON value associated with this field.
var raw json.RawMessage
name := string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
name = string(fd.Message().Name())
}
if v, ok := jsonObject[name]; ok {
delete(jsonObject, name)
raw = v
}
name = string(fd.JSONName())
if v, ok := jsonObject[name]; ok {
delete(jsonObject, name)
raw = v
}
field := m.NewField(fd)
// Unmarshal the field value.
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
continue
}
v, err := u.unmarshalValue(field, raw, fd)
if err != nil {
return err
}
m.Set(fd, v)
}
// Handle extension fields.
for name, raw := range jsonObject {
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
continue
}
// Resolve the extension field by name.
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
if xt == nil && isMessageSet(md) {
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
}
if xt == nil {
continue
}
delete(jsonObject, name)
fd := xt.TypeDescriptor()
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
}
field := m.NewField(fd)
// Unmarshal the field value.
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
continue
}
v, err := u.unmarshalValue(field, raw, fd)
if err != nil {
return err
}
m.Set(fd, v)
}
if !u.AllowUnknownFields && len(jsonObject) > 0 {
for name := range jsonObject {
return fmt.Errorf("unknown field %q in %v", name, md.FullName())
}
}
return nil
}
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
if fd.Cardinality() == protoreflect.Repeated {
return false
}
if md := fd.Message(); md != nil {
return md.FullName() == "google.protobuf.Value"
}
if ed := fd.Enum(); ed != nil {
return ed.FullName() == "google.protobuf.NullValue"
}
return false
}
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
return ok
}
return false
}
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
switch {
case fd.IsList():
var jsonArray []json.RawMessage
if err := json.Unmarshal(in, &jsonArray); err != nil {
return v, err
}
lv := v.List()
for _, raw := range jsonArray {
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
if err != nil {
return v, err
}
lv.Append(ve)
}
return v, nil
case fd.IsMap():
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return v, err
}
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := v.Map()
for key, raw := range jsonObject {
var kv protoreflect.MapKey
if kfd.Kind() == protoreflect.StringKind {
kv = protoreflect.ValueOf(key).MapKey()
} else {
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
if err != nil {
return v, err
}
kv = v.MapKey()
}
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
if err != nil {
return v, err
}
mv.Set(kv, vv)
}
return v, nil
default:
return u.unmarshalSingularValue(v, in, fd)
}
}
var nonFinite = map[string]float64{
`"NaN"`: math.NaN(),
`"Infinity"`: math.Inf(+1),
`"-Infinity"`: math.Inf(-1),
}
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
switch fd.Kind() {
case protoreflect.BoolKind:
return unmarshalValue(in, new(bool))
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
return unmarshalValue(trimQuote(in), new(int32))
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return unmarshalValue(trimQuote(in), new(int64))
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
return unmarshalValue(trimQuote(in), new(uint32))
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return unmarshalValue(trimQuote(in), new(uint64))
case protoreflect.FloatKind:
if f, ok := nonFinite[string(in)]; ok {
return protoreflect.ValueOfFloat32(float32(f)), nil
}
return unmarshalValue(trimQuote(in), new(float32))
case protoreflect.DoubleKind:
if f, ok := nonFinite[string(in)]; ok {
return protoreflect.ValueOfFloat64(float64(f)), nil
}
return unmarshalValue(trimQuote(in), new(float64))
case protoreflect.StringKind:
return unmarshalValue(in, new(string))
case protoreflect.BytesKind:
return unmarshalValue(in, new([]byte))
case protoreflect.EnumKind:
if hasPrefixAndSuffix('"', in, '"') {
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
if vd == nil {
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
}
return protoreflect.ValueOfEnum(vd.Number()), nil
}
return unmarshalValue(in, new(protoreflect.EnumNumber))
case protoreflect.MessageKind, protoreflect.GroupKind:
err := u.unmarshalMessage(v.Message(), in)
return v, err
default:
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
}
}
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
err := json.Unmarshal(in, v)
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
}
func unquoteString(in string) (out string, err error) {
err = json.Unmarshal([]byte(in), &out)
return out, err
}
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
return true
}
return false
}
// trimQuote is like unquoteString but simply strips surrounding quotes.
// This is incorrect, but is behavior done by the legacy implementation.
func trimQuote(in []byte) []byte {
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
in = in[1 : len(in)-1]
}
return in
}

View File

@ -1,559 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonpb
import (
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapJSONMarshalV2 = false
// Marshaler is a configurable object for marshaling protocol buffer messages
// to the specified JSON representation.
type Marshaler struct {
// OrigName specifies whether to use the original protobuf name for fields.
OrigName bool
// EnumsAsInts specifies whether to render enum values as integers,
// as opposed to string values.
EnumsAsInts bool
// EmitDefaults specifies whether to render fields with zero values.
EmitDefaults bool
// Indent controls whether the output is compact or not.
// If empty, the output is compact JSON. Otherwise, every JSON object
// entry and JSON array value will be on its own line.
// Each line will be preceded by repeated copies of Indent, where the
// number of copies is the current indentation depth.
Indent string
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
// If unset, the global registry is used by default.
AnyResolver AnyResolver
}
// JSONPBMarshaler is implemented by protobuf messages that customize the
// way they are marshaled to JSON. Messages that implement this should also
// implement JSONPBUnmarshaler so that the custom format can be parsed.
//
// The JSON marshaling must follow the proto to JSON specification:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.
type JSONPBMarshaler interface {
MarshalJSONPB(*Marshaler) ([]byte, error)
}
// Marshal serializes a protobuf message as JSON into w.
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
b, err := jm.marshal(m)
if len(b) > 0 {
if _, err := w.Write(b); err != nil {
return err
}
}
return err
}
// MarshalToString serializes a protobuf message as JSON in string form.
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
b, err := jm.marshal(m)
if err != nil {
return "", err
}
return string(b), nil
}
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
v := reflect.ValueOf(m)
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
return nil, errors.New("Marshal called with nil")
}
// Check for custom marshalers first since they may not properly
// implement protobuf reflection that the logic below relies on.
if jsm, ok := m.(JSONPBMarshaler); ok {
return jsm.MarshalJSONPB(jm)
}
if wrapJSONMarshalV2 {
opts := protojson.MarshalOptions{
UseProtoNames: jm.OrigName,
UseEnumNumbers: jm.EnumsAsInts,
EmitUnpopulated: jm.EmitDefaults,
Indent: jm.Indent,
}
if jm.AnyResolver != nil {
opts.Resolver = anyResolver{jm.AnyResolver}
}
return opts.Marshal(proto.MessageReflect(m).Interface())
} else {
// Check for unpopulated required fields first.
m2 := proto.MessageReflect(m)
if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
return nil, err
}
w := jsonWriter{Marshaler: jm}
err := w.marshalMessage(m2, "", "")
return w.buf, err
}
}
type jsonWriter struct {
*Marshaler
buf []byte
}
func (w *jsonWriter) write(s string) {
w.buf = append(w.buf, s...)
}
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
b, err := jsm.MarshalJSONPB(w.Marshaler)
if err != nil {
return err
}
if typeURL != "" {
// we are marshaling this object to an Any type
var js map[string]*json.RawMessage
if err = json.Unmarshal(b, &js); err != nil {
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
}
turl, err := json.Marshal(typeURL)
if err != nil {
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
}
js["@type"] = (*json.RawMessage)(&turl)
if b, err = json.Marshal(js); err != nil {
return err
}
}
w.write(string(b))
return nil
}
md := m.Descriptor()
fds := md.Fields()
// Handle well-known types.
const secondInNanos = int64(time.Second / time.Nanosecond)
switch wellKnownType(md.FullName()) {
case "Any":
return w.marshalAny(m, indent)
case "BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue":
fd := fds.ByNumber(1)
return w.marshalValue(fd, m.Get(fd), indent)
case "Duration":
const maxSecondsInDuration = 315576000000
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
// depending on required precision."
s := m.Get(fds.ByNumber(1)).Int()
ns := m.Get(fds.ByNumber(2)).Int()
if s < -maxSecondsInDuration || s > maxSecondsInDuration {
return fmt.Errorf("seconds out of range %v", s)
}
if ns <= -secondInNanos || ns >= secondInNanos {
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
}
if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
return errors.New("signs of seconds and nanos do not match")
}
var sign string
if s < 0 || ns < 0 {
sign, s, ns = "-", -1*s, -1*ns
}
x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
w.write(fmt.Sprintf(`"%vs"`, x))
return nil
case "Timestamp":
// "RFC 3339, where generated output will always be Z-normalized
// and uses 0, 3, 6 or 9 fractional digits."
s := m.Get(fds.ByNumber(1)).Int()
ns := m.Get(fds.ByNumber(2)).Int()
if ns < 0 || ns >= secondInNanos {
return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
}
t := time.Unix(s, ns).UTC()
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
x := t.Format("2006-01-02T15:04:05.000000000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
w.write(fmt.Sprintf(`"%vZ"`, x))
return nil
case "Value":
// JSON value; which is a null, number, string, bool, object, or array.
od := md.Oneofs().Get(0)
fd := m.WhichOneof(od)
if fd == nil {
return errors.New("nil Value")
}
return w.marshalValue(fd, m.Get(fd), indent)
case "Struct", "ListValue":
// JSON object or array.
fd := fds.ByNumber(1)
return w.marshalValue(fd, m.Get(fd), indent)
}
w.write("{")
if w.Indent != "" {
w.write("\n")
}
firstField := true
if typeURL != "" {
if err := w.marshalTypeURL(indent, typeURL); err != nil {
return err
}
firstField = false
}
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil {
fd = m.WhichOneof(od)
i += od.Fields().Len()
if fd == nil {
continue
}
} else {
i++
}
v := m.Get(fd)
if !m.Has(fd) {
if !w.EmitDefaults || fd.ContainingOneof() != nil {
continue
}
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
}
}
if !firstField {
w.writeComma()
}
if err := w.marshalField(fd, v, indent); err != nil {
return err
}
firstField = false
}
// Handle proto2 extensions.
if md.ExtensionRanges().Len() > 0 {
// Collect a sorted list of all extension descriptor and values.
type ext struct {
desc protoreflect.FieldDescriptor
val protoreflect.Value
}
var exts []ext
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
exts = append(exts, ext{fd, v})
}
return true
})
sort.Slice(exts, func(i, j int) bool {
return exts[i].desc.Number() < exts[j].desc.Number()
})
for _, ext := range exts {
if !firstField {
w.writeComma()
}
if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
return err
}
firstField = false
}
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
}
w.write("}")
return nil
}
func (w *jsonWriter) writeComma() {
if w.Indent != "" {
w.write(",\n")
} else {
w.write(",")
}
}
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
// "If the Any contains a value that has a special JSON mapping,
// it will be converted as follows: {"@type": xxx, "value": yyy}.
// Otherwise, the value will be converted into a JSON object,
// and the "@type" field will be inserted to indicate the actual data type."
md := m.Descriptor()
typeURL := m.Get(md.Fields().ByNumber(1)).String()
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
var m2 protoreflect.Message
if w.AnyResolver != nil {
mi, err := w.AnyResolver.Resolve(typeURL)
if err != nil {
return err
}
m2 = proto.MessageReflect(mi)
} else {
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
if err != nil {
return err
}
m2 = mt.New()
}
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
return err
}
if wellKnownType(m2.Descriptor().FullName()) == "" {
return w.marshalMessage(m2, indent, typeURL)
}
w.write("{")
if w.Indent != "" {
w.write("\n")
}
if err := w.marshalTypeURL(indent, typeURL); err != nil {
return err
}
w.writeComma()
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
w.write(`"value": `)
} else {
w.write(`"value":`)
}
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
return err
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
}
w.write("}")
return nil
}
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
}
w.write(`"@type":`)
if w.Indent != "" {
w.write(" ")
}
b, err := json.Marshal(typeURL)
if err != nil {
return err
}
w.write(string(b))
return nil
}
// marshalField writes field description and value to the Writer.
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
}
w.write(`"`)
switch {
case fd.IsExtension():
// For message set, use the fname of the message as the extension name.
name := string(fd.FullName())
if isMessageSet(fd.ContainingMessage()) {
name = strings.TrimSuffix(name, ".message_set_extension")
}
w.write("[" + name + "]")
case w.OrigName:
name := string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
name = string(fd.Message().Name())
}
w.write(name)
default:
w.write(string(fd.JSONName()))
}
w.write(`":`)
if w.Indent != "" {
w.write(" ")
}
return w.marshalValue(fd, v, indent)
}
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
switch {
case fd.IsList():
w.write("[")
comma := ""
lv := v.List()
for i := 0; i < lv.Len(); i++ {
w.write(comma)
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
w.write(w.Indent)
}
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
return err
}
comma = ","
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
}
w.write("]")
return nil
case fd.IsMap():
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := v.Map()
// Collect a sorted list of all map keys and values.
type entry struct{ key, val protoreflect.Value }
var entries []entry
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
entries = append(entries, entry{k.Value(), v})
return true
})
sort.Slice(entries, func(i, j int) bool {
switch kfd.Kind() {
case protoreflect.BoolKind:
return !entries[i].key.Bool() && entries[j].key.Bool()
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return entries[i].key.Int() < entries[j].key.Int()
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return entries[i].key.Uint() < entries[j].key.Uint()
case protoreflect.StringKind:
return entries[i].key.String() < entries[j].key.String()
default:
panic("invalid kind")
}
})
w.write(`{`)
comma := ""
for _, entry := range entries {
w.write(comma)
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
w.write(w.Indent)
}
s := fmt.Sprint(entry.key.Interface())
b, err := json.Marshal(s)
if err != nil {
return err
}
w.write(string(b))
w.write(`:`)
if w.Indent != "" {
w.write(` `)
}
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
return err
}
comma = ","
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
}
w.write(`}`)
return nil
default:
return w.marshalSingularValue(fd, v, indent)
}
}
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
switch {
case !v.IsValid():
w.write("null")
return nil
case fd.Message() != nil:
return w.marshalMessage(v.Message(), indent+w.Indent, "")
case fd.Enum() != nil:
if fd.Enum().FullName() == "google.protobuf.NullValue" {
w.write("null")
return nil
}
vd := fd.Enum().Values().ByNumber(v.Enum())
if vd == nil || w.EnumsAsInts {
w.write(strconv.Itoa(int(v.Enum())))
} else {
w.write(`"` + string(vd.Name()) + `"`)
}
return nil
default:
switch v.Interface().(type) {
case float32, float64:
switch {
case math.IsInf(v.Float(), +1):
w.write(`"Infinity"`)
return nil
case math.IsInf(v.Float(), -1):
w.write(`"-Infinity"`)
return nil
case math.IsNaN(v.Float()):
w.write(`"NaN"`)
return nil
}
case int64, uint64:
w.write(fmt.Sprintf(`"%d"`, v.Interface()))
return nil
}
b, err := json.Marshal(v.Interface())
if err != nil {
return err
}
w.write(string(b))
return nil
}
}

View File

@ -1,69 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jsonpb provides functionality to marshal and unmarshal between a
// protocol buffer message and JSON. It follows the specification at
// https://developers.google.com/protocol-buffers/docs/proto3#json.
//
// Do not rely on the default behavior of the standard encoding/json package
// when called on generated message types as it does not operate correctly.
//
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
// package instead.
package jsonpb
import (
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoimpl"
)
// AnyResolver takes a type URL, present in an Any message,
// and resolves it into an instance of the associated message.
type AnyResolver interface {
Resolve(typeURL string) (proto.Message, error)
}
type anyResolver struct{ AnyResolver }
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
return r.FindMessageByURL(string(message))
}
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
m, err := r.Resolve(url)
if err != nil {
return nil, err
}
return protoimpl.X.MessageTypeOf(m), nil
}
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
return protoregistry.GlobalTypes.FindExtensionByName(field)
}
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
func wellKnownType(s protoreflect.FullName) string {
if s.Parent() == "google.protobuf" {
switch s.Name() {
case "Empty", "Any",
"BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue",
"Duration", "Timestamp",
"NullValue", "Struct", "Value", "ListValue":
return string(s.Name())
}
}
return ""
}
func isMessageSet(md protoreflect.MessageDescriptor) bool {
ms, ok := md.(interface{ IsMessageSet() bool })
return ok && ms.IsMessageSet()
}

View File

@ -1,179 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"fmt"
"strings"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
anypb "github.com/golang/protobuf/ptypes/any"
)
const urlPrefix = "type.googleapis.com/"
// AnyMessageName returns the message name contained in an anypb.Any message.
// Most type assertions should use the Is function instead.
//
// Deprecated: Call the any.MessageName method instead.
func AnyMessageName(any *anypb.Any) (string, error) {
name, err := anyMessageName(any)
return string(name), err
}
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
name := protoreflect.FullName(any.TypeUrl)
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
name = name[i+len("/"):]
}
if !name.IsValid() {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return name, nil
}
// MarshalAny marshals the given message m into an anypb.Any message.
//
// Deprecated: Call the anypb.New function instead.
func MarshalAny(m proto.Message) (*anypb.Any, error) {
switch dm := m.(type) {
case DynamicAny:
m = dm.Message
case *DynamicAny:
if dm == nil {
return nil, proto.ErrNil
}
m = dm.Message
}
b, err := proto.Marshal(m)
if err != nil {
return nil, err
}
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
}
// Empty returns a new message of the type specified in an anypb.Any message.
// It returns protoregistry.NotFound if the corresponding message type could not
// be resolved in the global registry.
//
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
// to resolve the message name and create a new instance of it.
func Empty(any *anypb.Any) (proto.Message, error) {
name, err := anyMessageName(any)
if err != nil {
return nil, err
}
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
if err != nil {
return nil, err
}
return proto.MessageV1(mt.New().Interface()), nil
}
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
// into the provided message m. It returns an error if the target message
// does not match the type in the Any message or if an unmarshal error occurs.
//
// The target message m may be a *DynamicAny message. If the underlying message
// type could not be resolved, then this returns protoregistry.NotFound.
//
// Deprecated: Call the any.UnmarshalTo method instead.
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
if dm, ok := m.(*DynamicAny); ok {
if dm.Message == nil {
var err error
dm.Message, err = Empty(any)
if err != nil {
return err
}
}
m = dm.Message
}
anyName, err := AnyMessageName(any)
if err != nil {
return err
}
msgName := proto.MessageName(m)
if anyName != msgName {
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
}
return proto.Unmarshal(any.Value, m)
}
// Is reports whether the Any message contains a message of the specified type.
//
// Deprecated: Call the any.MessageIs method instead.
func Is(any *anypb.Any, m proto.Message) bool {
if any == nil || m == nil {
return false
}
name := proto.MessageName(m)
if !strings.HasSuffix(any.TypeUrl, name) {
return false
}
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in an anypb.Any message.
// The allocated message is stored in the embedded proto.Message.
//
// Example:
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
//
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
// the any message contents into a new instance of the underlying message.
type DynamicAny struct{ proto.Message }
func (m DynamicAny) String() string {
if m.Message == nil {
return "<nil>"
}
return m.Message.String()
}
func (m DynamicAny) Reset() {
if m.Message == nil {
return
}
m.Message.Reset()
}
func (m DynamicAny) ProtoMessage() {
return
}
func (m DynamicAny) ProtoReflect() protoreflect.Message {
if m.Message == nil {
return nil
}
return dynamicAny{proto.MessageReflect(m.Message)}
}
type dynamicAny struct{ protoreflect.Message }
func (m dynamicAny) Type() protoreflect.MessageType {
return dynamicAnyType{m.Message.Type()}
}
func (m dynamicAny) New() protoreflect.Message {
return dynamicAnyType{m.Message.Type()}.New()
}
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
return DynamicAny{proto.MessageV1(m.Message.Interface())}
}
type dynamicAnyType struct{ protoreflect.MessageType }
func (t dynamicAnyType) New() protoreflect.Message {
return dynamicAny{t.MessageType.New()}
}
func (t dynamicAnyType) Zero() protoreflect.Message {
return dynamicAny{t.MessageType.Zero()}
}

View File

@ -1,62 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
package any
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/any.proto.
type Any = anypb.Any
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
}

View File

@ -1,10 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ptypes provides functionality for interacting with well-known types.
//
// Deprecated: Well-known types have specialized functionality directly
// injected into the generated packages for each message type.
// See the deprecation notice for each function for the suggested alternative.
package ptypes

View File

@ -1,76 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"errors"
"fmt"
"time"
durationpb "github.com/golang/protobuf/ptypes/duration"
)
// Range of google.protobuf.Duration as specified in duration.proto.
// This is about 10,000 years in seconds.
const (
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// Duration converts a durationpb.Duration to a time.Duration.
// Duration returns an error if dur is invalid or overflows a time.Duration.
//
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
func Duration(dur *durationpb.Duration) (time.Duration, error) {
if err := validateDuration(dur); err != nil {
return 0, err
}
d := time.Duration(dur.Seconds) * time.Second
if int64(d/time.Second) != dur.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
if dur.Nanos != 0 {
d += time.Duration(dur.Nanos) * time.Nanosecond
if (d < 0) != (dur.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durationpb.Duration.
//
// Deprecated: Call the durationpb.New function instead.
func DurationProto(d time.Duration) *durationpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durationpb.Duration{
Seconds: int64(secs),
Nanos: int32(nanos),
}
}
// validateDuration determines whether the durationpb.Duration is valid
// according to the definition in google/protobuf/duration.proto.
// A valid durpb.Duration may still be too large to fit into a time.Duration
// Note that the range of durationpb.Duration is about 10,000 years,
// while the range of time.Duration is about 290 years.
func validateDuration(dur *durationpb.Duration) error {
if dur == nil {
return errors.New("duration: nil Duration")
}
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", dur)
}
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", dur)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
}
return nil
}

View File

@ -1,63 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
package duration
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/duration.proto.
type Duration = durationpb.Duration
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
}

View File

@ -1,112 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"errors"
"fmt"
"time"
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
)
// Range of google.protobuf.Duration as specified in timestamp.proto.
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// Timestamp converts a timestamppb.Timestamp to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return
// value is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
//
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
//
// Deprecated: Call the timestamppb.Now function instead.
func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
//
// Deprecated: Call the timestamppb.New function instead.
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
ts := &timestamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps.
// For invalid Timestamps, it returns an error message in parentheses.
//
// Deprecated: Call the ts.AsTime method instead,
// followed by a call to the Format method on the time.Time value.
func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
// and has a Nanos field in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes the problem.
//
// Every valid Timestamp can be represented by a time.Time,
// but the converse is not true.
func validateTimestamp(ts *timestamppb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}

View File

@ -1,5 +1,36 @@
# Changelog
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
### Features
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
### Bug Fixes
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
### Features
* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
### Features
* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
### Fixes
* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)

View File

@ -11,7 +11,7 @@ please explain why in the pull request description.
### Releasing
Commits that would precipitate a SemVer change, as desrcibed in the Conventional
Commits that would precipitate a SemVer change, as described in the Conventional
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
to create a release candidate pull request. Once submitted, `release-please`
will create a release.

View File

@ -17,6 +17,12 @@ var (
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
Max = UUID{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
}
)
// NewHash returns a new UUID derived from the hash of space concatenated with

View File

@ -108,12 +108,23 @@ func setClockSequence(seq int) {
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs.
// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time)
var t Time
switch uuid.Version() {
case 6:
time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
t = Time(time)
case 7:
time := binary.BigEndian.Uint64(uuid[:8])
t = Time((time>>16)*10000 + g1582ns100)
default: // forward compatible
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
t = Time(time)
}
return t
}
// ClockSequence returns the clock sequence encoded in uuid.

View File

@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool {
return ok
}
// Parse decodes s into a UUID or returns an error. Both the standard UUID
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
// the standard UUID forms defined in RFC 4122
// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
// Parse accepts non-standard strings such as the raw hex encoding
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
// examined in the latter case. Parse should not be used to validate strings as
// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
@ -182,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
return uuid
}
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
// It returns an error if the format is invalid, otherwise nil.
func Validate(s string) error {
switch len(s) {
// Standard UUID format
case 36:
// UUID with "urn:uuid:" prefix
case 36 + 9:
if !strings.EqualFold(s[:9], "urn:uuid:") {
return fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// UUID enclosed in braces
case 36 + 2:
if s[0] != '{' || s[len(s)-1] != '}' {
return fmt.Errorf("invalid bracketed UUID format")
}
s = s[1 : len(s)-1]
// UUID without hyphens
case 32:
for i := 0; i < len(s); i += 2 {
_, ok := xtob(s[i], s[i+1])
if !ok {
return errors.New("invalid UUID format")
}
}
default:
return invalidLengthError{len(s)}
}
// Check for standard UUID format
if len(s) == 36 {
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return errors.New("invalid UUID format")
}
for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
if _, ok := xtob(s[x], s[x+1]); !ok {
return errors.New("invalid UUID format")
}
}
}
return nil
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
@ -294,3 +351,15 @@ func DisableRandPool() {
poolMu.Lock()
poolPos = randPoolSize
}
// UUIDs is a slice of UUID types.
type UUIDs []UUID
// Strings returns a string slice containing the string form of each UUID in uuids.
func (uuids UUIDs) Strings() []string {
var uuidStrs = make([]string, len(uuids))
for i, uuid := range uuids {
uuidStrs[i] = uuid.String()
}
return uuidStrs
}

56
vendor/github.com/google/uuid/version6.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "encoding/binary"
// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
//
// NewV6 returns a Version 6 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewV6 returns Nil and an error.
func NewV6() (UUID, error) {
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_high |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_mid | time_low_and_version |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|clk_seq_hi_res | clk_seq_low | node (0-1) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| node (2-5) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
binary.BigEndian.PutUint64(uuid[0:], uint64(now))
binary.BigEndian.PutUint16(uuid[8:], seq)
uuid[6] = 0x60 | (uuid[6] & 0x0F)
uuid[8] = 0x80 | (uuid[8] & 0x3F)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil
}

104
vendor/github.com/google/uuid/version7.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// UUID version 7 features a time-ordered value field derived from the widely
// implemented and well known Unix Epoch timestamp source,
// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
// As well as improved entropy characteristics over versions 1 or 6.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
//
// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
//
// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
// Uses the randomness pool if it was enabled with EnableRandPool.
// On error, NewV7 returns Nil and an error
func NewV7() (UUID, error) {
uuid, err := NewRandom()
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
// it use NewRandomFromReader fill random bits.
// On error, NewV7FromReader returns Nil and an error.
func NewV7FromReader(r io.Reader) (UUID, error) {
uuid, err := NewRandomFromReader(r)
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
// uuid[8] already has the right version number (Variant is 10)
// see function NewV7 and NewV7FromReader
func makeV7(uuid []byte) {
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms | ver | rand_a (12 bit seq) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|var| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
_ = uuid[15] // bounds check
t, s := getV7Time()
uuid[0] = byte(t >> 40)
uuid[1] = byte(t >> 32)
uuid[2] = byte(t >> 24)
uuid[3] = byte(t >> 16)
uuid[4] = byte(t >> 8)
uuid[5] = byte(t)
uuid[6] = 0x70 | (0x0F & byte(s>>8))
uuid[7] = byte(s)
}
// lastV7time is the last time we returned stored as:
//
// 52 bits of time in milliseconds since epoch
// 12 bits of (fractional nanoseconds) >> 8
var lastV7time int64
const nanoPerMilli = 1000000
// getV7Time returns the time in milliseconds and nanoseconds / 256.
// The returned (milli << 12 + seq) is guarenteed to be greater than
// (milli << 12 + seq) returned by any previous call to getV7Time.
func getV7Time() (milli, seq int64) {
timeMu.Lock()
defer timeMu.Unlock()
nano := timeNow().UnixNano()
milli = nano / nanoPerMilli
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
seq = (nano - milli*nanoPerMilli) >> 8
now := milli<<12 + seq
if now <= lastV7time {
now = lastV7time + 1
milli = now >> 12
seq = now & 0xfff
}
lastV7time = now
return milli, seq
}

View File

@ -24,7 +24,7 @@ go_test(
embed = [":httprule"],
deps = [
"//utilities",
"@com_github_golang_glog//:glog",
"@org_golang_google_grpc//grpclog",
],
)

View File

@ -26,7 +26,7 @@ go_library(
deps = [
"//internal/httprule",
"//utilities",
"@go_googleapis//google/api:httpbody_go_proto",
"@org_golang_google_genproto_googleapis_api//httpbody",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//grpclog",
"@org_golang_google_grpc//health/grpc_health_v1",
@ -70,9 +70,9 @@ go_test(
"//utilities",
"@com_github_google_go_cmp//cmp",
"@com_github_google_go_cmp//cmp/cmpopts",
"@go_googleapis//google/api:httpbody_go_proto",
"@go_googleapis//google/rpc:errdetails_go_proto",
"@go_googleapis//google/rpc:status_go_proto",
"@org_golang_google_genproto_googleapis_api//httpbody",
"@org_golang_google_genproto_googleapis_rpc//errdetails",
"@org_golang_google_genproto_googleapis_rpc//status",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//health/grpc_health_v1",

View File

@ -137,7 +137,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
doForwardTrailers := requestAcceptsTrailers(r)
if doForwardTrailers {
handleForwardResponseTrailerHeader(w, md)
handleForwardResponseTrailerHeader(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
@ -152,7 +152,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
}
if doForwardTrailers {
handleForwardResponseTrailer(w, md)
handleForwardResponseTrailer(w, mux, md)
}
}

View File

@ -27,7 +27,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
var root interface{}
if err := json.NewDecoder(r).Decode(&root); err != nil {
if err == io.EOF {
if errors.Is(err, io.EOF) {
return fm, nil
}
return nil, err
@ -41,7 +41,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
m, ok := item.node.(map[string]interface{})
switch {
case ok:
case ok && len(m) > 0:
// if the item is an object, then enqueue all of its children
for k, v := range m {
if item.msg == nil {
@ -96,6 +96,8 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
queue = append(queue, child)
}
}
case ok && len(m) == 0:
fallthrough
case len(item.path) > 0:
// otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path)

View File

@ -2,7 +2,7 @@ package runtime
import (
"context"
"fmt"
"errors"
"io"
"net/http"
"net/textproto"
@ -48,7 +48,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
var wroteHeader bool
for {
resp, err := recv()
if err == io.EOF {
if errors.Is(err, io.EOF) {
return
}
if err != nil {
@ -108,18 +108,20 @@ func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, m
}
}
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
for k := range md.TrailerMD {
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
w.Header().Add("Trailer", tKey)
if h, ok := mux.outgoingTrailerMatcher(k); ok {
w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h))
}
}
}
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
for k, vs := range md.TrailerMD {
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
for _, v := range vs {
w.Header().Add(tKey, v)
if h, ok := mux.outgoingTrailerMatcher(k); ok {
for _, v := range vs {
w.Header().Add(h, v)
}
}
}
}
@ -147,12 +149,10 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
doForwardTrailers := requestAcceptsTrailers(req)
if doForwardTrailers {
handleForwardResponseTrailerHeader(w, md)
handleForwardResponseTrailerHeader(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
handleForwardResponseTrailerHeader(w, md)
contentType := marshaler.ContentType(resp)
w.Header().Set("Content-Type", contentType)
@ -178,7 +178,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
}
if doForwardTrailers {
handleForwardResponseTrailer(w, md)
handleForwardResponseTrailer(w, mux, md)
}
}

View File

@ -26,7 +26,7 @@ func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
if httpBody, ok := v.(*httpbody.HttpBody); ok {
return httpBody.Data, nil
return httpBody.GetData(), nil
}
return h.Marshaler.Marshal(v)
}

View File

@ -57,6 +57,7 @@ type ServeMux struct {
marshalers marshalerRegistry
incomingHeaderMatcher HeaderMatcherFunc
outgoingHeaderMatcher HeaderMatcherFunc
outgoingTrailerMatcher HeaderMatcherFunc
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
errorHandler ErrorHandlerFunc
streamErrorHandler StreamErrorHandlerFunc
@ -114,10 +115,18 @@ func DefaultHeaderMatcher(key string) (string, bool) {
return "", false
}
func defaultOutgoingHeaderMatcher(key string) (string, bool) {
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
}
func defaultOutgoingTrailerMatcher(key string) (string, bool) {
return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true
}
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
//
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header.
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
for _, header := range fn.matchedMalformedHeaders() {
grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
@ -147,13 +156,24 @@ func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
//
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
// passed to http response returned from gateway. To transform the header before passing to response,
// matcher should return modified header.
// matcher should return the modified header.
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) {
mux.outgoingHeaderMatcher = fn
}
}
// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
//
// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be
// passed to http response returned from gateway. To transform the header before passing to response,
// matcher should return the modified header.
func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) {
mux.outgoingTrailerMatcher = fn
}
}
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
//
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
@ -273,11 +293,11 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux {
if serveMux.incomingHeaderMatcher == nil {
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
}
if serveMux.outgoingHeaderMatcher == nil {
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
}
serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher
}
if serveMux.outgoingTrailerMatcher == nil {
serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher
}
return serveMux
@ -321,13 +341,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
return
}
r.Method = strings.ToUpper(override)
}
var pathComponents []string

View File

@ -51,11 +51,13 @@ func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *u
key = match[1]
values = append([]string{match[2]}, values...)
}
fieldPath := strings.Split(key, ".")
msgValue := msg.ProtoReflect()
fieldPath := normalizeFieldPath(msgValue, strings.Split(key, "."))
if filter.HasCommonPrefix(fieldPath) {
continue
}
if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil {
return err
}
}
@ -68,6 +70,38 @@ func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value stri
return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
}
func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string {
newFieldPath := make([]string, 0, len(fieldPath))
for i, fieldName := range fieldPath {
fields := msgValue.Descriptor().Fields()
fieldDesc := fields.ByTextName(fieldName)
if fieldDesc == nil {
fieldDesc = fields.ByJSONName(fieldName)
}
if fieldDesc == nil {
// return initial field path values if no matching message field was found
return fieldPath
}
newFieldPath = append(newFieldPath, string(fieldDesc.Name()))
// If this is the last element, we're done
if i == len(fieldPath)-1 {
break
}
// Only singular message fields are allowed
if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated {
return fieldPath
}
// Get the nested message
msgValue = msgValue.Get(fieldDesc).Message()
}
return newFieldPath
}
func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
if len(fieldPath) < 1 {
return errors.New("no field path")

View File

@ -28,6 +28,8 @@ var (
uint32Type = reflect.TypeOf(uint32(1))
uint64Type = reflect.TypeOf(uint64(1))
uintptrType = reflect.TypeOf(uintptr(1))
float32Type = reflect.TypeOf(float32(1))
float64Type = reflect.TypeOf(float64(1))
@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
case reflect.Struct:
{
// All structs enter here. We're not interested in most types.
if !canConvert(obj1Value, timeType) {
if !obj1Value.CanConvert(timeType) {
break
}
// time.Time can compared!
// time.Time can be compared!
timeObj1, ok := obj1.(time.Time)
if !ok {
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
case reflect.Slice:
{
// We only care about the []byte type.
if !canConvert(obj1Value, bytesType) {
if !obj1Value.CanConvert(bytesType) {
break
}
@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
}
case reflect.Uintptr:
{
uintptrObj1, ok := obj1.(uintptr)
if !ok {
uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr)
}
uintptrObj2, ok := obj2.(uintptr)
if !ok {
uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr)
}
if uintptrObj1 > uintptrObj2 {
return compareGreater, true
}
if uintptrObj1 == uintptrObj2 {
return compareEqual, true
}
if uintptrObj1 < uintptrObj2 {
return compareLess, true
}
}
}
return compareEqual, false

View File

@ -1,16 +0,0 @@
//go:build go1.17
// +build go1.17
// TODO: once support for Go 1.16 is dropped, this file can be
// merged/removed with assertion_compare_go1.17_test.go and
// assertion_compare_legacy.go
package assert
import "reflect"
// Wrapper around reflect.Value.CanConvert, for compatibility
// reasons.
func canConvert(value reflect.Value, to reflect.Type) bool {
return value.CanConvert(to)
}

View File

@ -1,16 +0,0 @@
//go:build !go1.17
// +build !go1.17
// TODO: once support for Go 1.16 is dropped, this file can be
// merged/removed with assertion_compare_go1.17_test.go and
// assertion_compare_can_convert.go
package assert
import "reflect"
// Older versions of Go does not have the reflect.Value.CanConvert
// method.
func canConvert(value reflect.Value, to reflect.Type) bool {
return false
}

View File

@ -1,7 +1,4 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package assert
@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// EqualValuesf asserts that two objects are equal or convertable to the same types
// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
//
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
}
// NotImplementsf asserts that an object does not implement the specified interface.
//
// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
}
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
// Subsetf asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()

View File

@ -1,7 +1,4 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package assert
@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
return EqualExportedValuesf(a.t, expected, actual, msg, args...)
}
// EqualValues asserts that two objects are equal or convertable to the same types
// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
//
// a.EqualValues(uint32(123), int32(123))
@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
// EqualValuesf asserts that two objects are equal or convertable to the same types
// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
//
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in
return NotErrorIsf(a.t, err, target, msg, args...)
}
// NotImplements asserts that an object does not implement the specified interface.
//
// a.NotImplements((*MyInterface)(nil), new(MyObject))
func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotImplements(a.t, interfaceObject, object, msgAndArgs...)
}
// NotImplementsf asserts that an object does not implement the specified interface.
//
// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotImplementsf(a.t, interfaceObject, object, msg, args...)
}
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
return NotSamef(a.t, expected, actual, msg, args...)
}
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
// NotSubset asserts that the specified list(array, slice...) or map does NOT
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
// a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
return NotSubset(a.t, list, subset, msgAndArgs...)
}
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
return Samef(a.t, expected, actual, msg, args...)
}
// Subset asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
// Subset asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
// a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1})
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
return Subset(a.t, list, subset, msgAndArgs...)
}
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
// Subsetf asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()

View File

@ -19,7 +19,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
yaml "gopkg.in/yaml.v3"
"gopkg.in/yaml.v3"
)
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} {
return result.Interface()
case reflect.Array, reflect.Slice:
result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
var result reflect.Value
if expectedKind == reflect.Array {
result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem()
} else {
result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
}
for i := 0; i < expectedValue.Len(); i++ {
index := expectedValue.Index(i)
if isNil(index) {
@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} {
// structures.
//
// This function does no assertion of any kind.
//
// Deprecated: Use [EqualExportedValues] instead.
func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool {
expectedCleaned := copyExportedFields(expected)
actualCleaned := copyExportedFields(actual)
@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool {
return true
}
actualType := reflect.TypeOf(actual)
if actualType == nil {
expectedValue := reflect.ValueOf(expected)
actualValue := reflect.ValueOf(actual)
if !expectedValue.IsValid() || !actualValue.IsValid() {
return false
}
expectedValue := reflect.ValueOf(expected)
if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
// Attempt comparison after type conversion
return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
expectedType := expectedValue.Type()
actualType := actualValue.Type()
if !expectedType.ConvertibleTo(actualType) {
return false
}
return false
if !isNumericType(expectedType) || !isNumericType(actualType) {
// Attempt comparison after type conversion
return reflect.DeepEqual(
expectedValue.Convert(actualType).Interface(), actual,
)
}
// If BOTH values are numeric, there are chances of false positives due
// to overflow or underflow. So, we need to make sure to always convert
// the smaller type to a larger type before comparing.
if expectedType.Size() >= actualType.Size() {
return actualValue.Convert(expectedType).Interface() == expected
}
return expectedValue.Convert(actualType).Interface() == actual
}
// isNumericType returns true if the type is one of:
// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64,
// float32, float64, complex64, complex128
func isNumericType(t reflect.Type) bool {
return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128
}
/* CallerInfo is necessary because the assert functions use the testing object
@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
// Aligns the provided message so that all lines after the first line start at the same location as the first line.
// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the
// basis on which the alignment occurs).
func indentMessageLines(message string, longestLabelLen int) string {
outBuf := new(bytes.Buffer)
@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
return true
}
// NotImplements asserts that an object does not implement the specified interface.
//
// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
interfaceType := reflect.TypeOf(interfaceObject).Elem()
if object == nil {
return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...)
}
if reflect.TypeOf(object).Implements(interfaceType) {
return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...)
}
return true
}
// IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool {
// representations appropriate to be presented to the user.
//
// If the values are not of like type, the returned strings will be prefixed
// with the type name, and the value will be enclosed in parenthesis similar
// with the type name, and the value will be enclosed in parentheses similar
// to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string {
return value
}
// EqualValues asserts that two objects are equal or convertable to the same types
// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
//
// assert.EqualValues(t, uint32(123), int32(123))
@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ..
return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
}
if aType.Kind() == reflect.Ptr {
aType = aType.Elem()
}
if bType.Kind() == reflect.Ptr {
bType = bType.Elem()
}
if aType.Kind() != reflect.Struct {
return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
}
if bType.Kind() != reflect.Struct {
return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
}
expected = copyExportedFields(expected)
@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
}
// containsKind checks if a specified kind in the slice of kinds.
func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
for i := 0; i < len(kinds); i++ {
if kind == kinds[i] {
return true
}
}
return false
}
// isNil checks if a specified object is nil or not, without Failing.
func isNil(object interface{}) bool {
if object == nil {
@ -638,16 +683,13 @@ func isNil(object interface{}) bool {
}
value := reflect.ValueOf(object)
kind := value.Kind()
isNilableKind := containsKind(
[]reflect.Kind{
reflect.Chan, reflect.Func,
reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
kind)
switch value.Kind() {
case
reflect.Chan, reflect.Func,
reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
if isNilableKind && value.IsNil() {
return true
return value.IsNil()
}
return false
@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
}
// getLen try to get length of object.
// return (false, 0) if impossible.
func getLen(x interface{}) (ok bool, length int) {
// getLen tries to get the length of an object.
// It returns (0, false) if impossible.
func getLen(x interface{}) (length int, ok bool) {
v := reflect.ValueOf(x)
defer func() {
if e := recover(); e != nil {
ok = false
}
ok = recover() == nil
}()
return true, v.Len()
return v.Len(), true
}
// Len asserts that the specified object has specific length.
@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok {
h.Helper()
}
ok, l := getLen(object)
l, ok := getLen(object)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...)
}
if l != length {
return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
}
return true
}
@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
}
// Subset asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
// Subset asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
// assert.Subset(t, [1, 2, 3], [1, 2])
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true
}
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
// NotSubset asserts that the specified list(array, slice...) or map does NOT
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
// assert.NotSubset(t, [1, 3, 4], [1, 2])
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
h.Helper()
}
if math.IsNaN(epsilon) {
return Fail(t, "epsilon must not be NaN")
return Fail(t, "epsilon must not be NaN", msgAndArgs...)
}
actualEpsilon, err := calcRelativeError(expected, actual)
if err != nil {
@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
if h, ok := t.(tHelper); ok {
h.Helper()
}
if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
reflect.TypeOf(expected).Kind() != reflect.Slice {
if expected == nil || actual == nil {
return Fail(t, "Parameters must be slice", msgAndArgs...)
}
actualSlice := reflect.ValueOf(actual)
expectedSlice := reflect.ValueOf(expected)
actualSlice := reflect.ValueOf(actual)
for i := 0; i < actualSlice.Len(); i++ {
result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
if !result {
return result
if expectedSlice.Type().Kind() != reflect.Slice {
return Fail(t, "Expected value must be slice", msgAndArgs...)
}
expectedLen := expectedSlice.Len()
if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) {
return false
}
for i := 0; i < expectedLen; i++ {
if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) {
return false
}
}
@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) {
}
// FailNow panics.
func (c *CollectT) FailNow() {
func (*CollectT) FailNow() {
panic("Assertion failed")
}
// Reset clears the collected errors.
func (c *CollectT) Reset() {
c.errors = nil
// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
func (*CollectT) Reset() {
panic("Reset() is deprecated")
}
// Copy copies the collected errors to the supplied t.
func (c *CollectT) Copy(t TestingT) {
if tt, ok := t.(tHelper); ok {
tt.Helper()
}
for _, err := range c.errors {
t.Errorf("%v", err)
}
// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
func (*CollectT) Copy(TestingT) {
panic("Copy() is deprecated")
}
// EventuallyWithT asserts that given condition will be met in waitFor time,
@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
h.Helper()
}
collect := new(CollectT)
ch := make(chan bool, 1)
var lastFinishedTickErrs []error
ch := make(chan []error, 1)
timer := time.NewTimer(waitFor)
defer timer.Stop()
@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
for tick := ticker.C; ; {
select {
case <-timer.C:
collect.Copy(t)
for _, err := range lastFinishedTickErrs {
t.Errorf("%v", err)
}
return Fail(t, "Condition never satisfied", msgAndArgs...)
case <-tick:
tick = nil
collect.Reset()
go func() {
collect := new(CollectT)
defer func() {
ch <- collect.errors
}()
condition(collect)
ch <- len(collect.errors) == 0
}()
case v := <-ch:
if v {
case errs := <-ch:
if len(errs) == 0 {
return true
}
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
lastFinishedTickErrs = errs
tick = ticker.C
}
}

View File

@ -12,7 +12,7 @@ import (
// an error if building a new request fails.
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url, nil)
req, err := http.NewRequest(method, url, http.NoBody)
if err != nil {
return -1, err
}
@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
if !isSuccessCode {
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
}
return isSuccessCode
@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
if !isRedirectCode {
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
}
return isRedirectCode
@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
isErrorCode := code >= http.StatusBadRequest
if !isErrorCode {
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
}
return isErrorCode
@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
successful := code == statuscode
if !successful {
Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...)
}
return successful
@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va
// empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if len(values) > 0 {
url += "?" + values.Encode()
}
req, err := http.NewRequest(method, url, http.NoBody)
if err != nil {
return ""
}
@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return contains
@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return !contains

View File

@ -1,7 +1,4 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package require
@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
t.FailNow()
}
// EqualValues asserts that two objects are equal or convertable to the same types
// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
//
// assert.EqualValues(t, uint32(123), int32(123))
@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
t.FailNow()
}
// EqualValuesf asserts that two objects are equal or convertable to the same types
// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
//
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
t.FailNow()
}
// NotImplements asserts that an object does not implement the specified interface.
//
// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) {
return
}
t.FailNow()
}
// NotImplementsf asserts that an object does not implement the specified interface.
//
// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotImplementsf(t, interfaceObject, object, msg, args...) {
return
}
t.FailNow()
}
// NotNil asserts that the specified object is not nil.
//
// assert.NotNil(t, err)
@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
t.FailNow()
}
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
// NotSubset asserts that the specified list(array, slice...) or map does NOT
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
// assert.NotSubset(t, [1, 3, 4], [1, 2])
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i
t.FailNow()
}
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
t.FailNow()
}
// Subset asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
// Subset asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
// assert.Subset(t, [1, 2, 3], [1, 2])
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte
t.FailNow()
}
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
// Subsetf asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()

View File

@ -1,7 +1,4 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package require
@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
EqualExportedValuesf(a.t, expected, actual, msg, args...)
}
// EqualValues asserts that two objects are equal or convertable to the same types
// EqualValues asserts that two objects are equal or convertible to the same types
// and equal.
//
// a.EqualValues(uint32(123), int32(123))
@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
EqualValues(a.t, expected, actual, msgAndArgs...)
}
// EqualValuesf asserts that two objects are equal or convertable to the same types
// EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal.
//
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in
NotErrorIsf(a.t, err, target, msg, args...)
}
// NotImplements asserts that an object does not implement the specified interface.
//
// a.NotImplements((*MyInterface)(nil), new(MyObject))
func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotImplements(a.t, interfaceObject, object, msgAndArgs...)
}
// NotImplementsf asserts that an object does not implement the specified interface.
//
// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotImplementsf(a.t, interfaceObject, object, msg, args...)
}
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
NotSamef(a.t, expected, actual, msg, args...)
}
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
// NotSubset asserts that the specified list(array, slice...) or map does NOT
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
// a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
NotSubset(a.t, list, subset, msgAndArgs...)
}
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
// contain all elements given in the specified subset list(array, slice...) or
// map.
//
// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
Samef(a.t, expected, actual, msg, args...)
}
// Subset asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
// Subset asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
// a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1})
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
Subset(a.t, list, subset, msgAndArgs...)
}
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
// Subsetf asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset list(array, slice...) or map.
//
// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()

View File

@ -3,3 +3,5 @@ fo
te
collison
consequentially
ans
nam

View File

@ -8,6 +8,192 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24
### Added
- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134)
- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194)
- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`.
This new module contains the Go implementation of the OpenTelemetry Logs SDK.
This module is unstable and breaking changes may be introduced.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`.
This new module contains an OTLP exporter that transmits log telemetry using HTTP.
This module is unstable and breaking changes may be introduced.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`.
This new module contains an exporter prints log records to STDOUT.
This module is unstable and breaking changes may be introduced.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
- The `go.opentelemetry.io/otel/semconv/v1.25.0` package.
The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254)
### Changed
- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177)
- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214)
## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05
### Added
- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906)
- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906)
- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032)
- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`.
This method is used to notify users if a log record will be emitted or not. (#5071)
- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`.
This value represents an unset severity level. (#5072)
- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076)
- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`.
This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes.
At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085)
- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100)
- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108)
- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116)
- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117)
- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111)
- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528)
### Changed
- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049)
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151)
Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError).
Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated.
### Fixed
- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027)
- Prevent default `ErrorHandler` self-delegation. (#5137)
- Update all dependencies to address [GO-2024-2687]. (#5139)
### Removed
- Drop support for [Go 1.20]. (#4967)
### Deprecated
- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734)
- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734)
- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734)
## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23
This release is the last to support [Go 1.20].
The next release will require at least [Go 1.21].
### Added
- Support [Go 1.22]. (#4890)
- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900)
- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900)
- The `go.opentelemetry.io/otel/log` module is added.
This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
This module is in an alpha state, it is subject to breaking changes.
See our [versioning policy](./VERSIONING.md) for more info. (#4961)
- ARM64 platform to the compatibility testing suite. (#4994)
### Fixed
- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945)
- Fix negative buckets in output of exponential histograms. (#4956)
## [1.23.1] 2024-02-07
### Fixed
- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888)
## [1.23.0] 2024-02-06
This release contains the first stable, `v1`, release of the following modules:
- `go.opentelemetry.io/otel/bridge/opencensus`
- `go.opentelemetry.io/otel/bridge/opencensus/test`
- `go.opentelemetry.io/otel/example/opencensus`
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Added
- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808)
- Experimental exemplar exporting is added to the metric SDK.
See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871)
- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`.
This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876)
### Changed
- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict.
Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned.
It is up to the user to decide if they want to use the returned `Resource` or not.
It may have desired attributes overwritten or include stale semantic conventions. (#4876)
### Fixed
- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449)
- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820)
- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827)
## [1.23.0-rc.1] 2024-01-18
This is a release candidate for the v1.23.0 release.
That release is expected to include the `v1` release of the following modules:
- `go.opentelemetry.io/otel/bridge/opencensus`
- `go.opentelemetry.io/otel/bridge/opencensus/test`
- `go.opentelemetry.io/otel/example/opencensus`
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
## [1.22.0/0.45.0] 2024-01-17
### Added
- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
- Experimental cardinality limiting is added to the metric SDK.
See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
### Changed
- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754)
- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671)
- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
### Fixed
- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
## [1.21.0/0.44.0] 2023-11-16
### Removed
@ -2735,7 +2921,14 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.26.0...HEAD
[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0
[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0
[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1
[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1
[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
@ -2809,6 +3002,8 @@ It contains api and sdk for trace and meter.
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
[Go 1.22]: https://go.dev/doc/go1.22
[Go 1.21]: https://go.dev/doc/go1.21
[Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18
@ -2816,3 +3011,5 @@ It contains api and sdk for trace and meter.
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687

View File

@ -14,4 +14,4 @@
* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
CODEOWNERS @MrAlias @MadVikingGod @pellared
CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole

View File

@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way:
[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
is an example of a very well-documented package.
### README files
Each (non-internal, non-test, non-documentation) package must contain a
`README.md` file containing at least a title, and a `pkg.go.dev` badge.
The README should not be a repetition of Go doc comments.
You can verify the presence of all README files with the `make verify-readmes`
command.
## Style Guide
One of the primary goals of this project is that it is actually used by
@ -591,25 +601,46 @@ this.
[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
### Ignoring context cancellation
OpenTelemetry API implementations need to ignore the cancellation of the context that are
passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
Recording methods should not return an error describing the cancellation state of the context
when they complete, nor should they abort any work.
This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
the method. In that case the context cancellation can be used for the timeout with the
restriction that this behavior is documented for the method. Otherwise, timeouts
are expected to be handled by the user calling the API, not the implementation.
Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
of a provider. It is assumed the context passed from a user is not used for this purpose.
Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
force flushing telemetry, shutting down a signal provider) the context cancellation
should be honored. This means all work done on behalf of the user provided context
should be canceled.
## Approvers and Maintainers
### Approvers
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [David Ashpole](https://github.com/dashpole), Google
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Elastic
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
### Maintainers
- [David Ashpole](https://github.com/dashpole), Google
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Robert Pająk](https://github.com/pellared), Splunk
- [Tyler Yahn](https://github.com/MrAlias), Splunk
### Emeritus
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Josh MacDonald](https://github.com/jmacd), LightStep

Some files were not shown because too many files have changed in this diff Show More