TUN-6772: Add a JWT Validator as an ingress verifier
This adds a new verifier interface that can be attached to ingress.Rule. This would act as a middleware layer that gets executed at the start of proxy.ProxyHTTP. A jwt validator implementation for this verifier is also provided. The validator downloads the public key from the access teams endpoint and uses it to verify the JWT sent to cloudflared with the audtag (clientID) information provided in the config.
This commit is contained in:
parent
e9a2c85671
commit
de07da02cd
11
go.mod
11
go.mod
|
@ -36,7 +36,7 @@ require (
|
||||||
go.uber.org/automaxprocs v1.4.0
|
go.uber.org/automaxprocs v1.4.0
|
||||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
|
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
|
||||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591
|
golang.org/x/net v0.0.0-20220909164309-bea034e7d591
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||||
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664
|
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||||
google.golang.org/protobuf v1.28.0
|
google.golang.org/protobuf v1.28.0
|
||||||
|
@ -56,6 +56,7 @@ require (
|
||||||
github.com/cheekybits/genny v1.0.0 // indirect
|
github.com/cheekybits/genny v1.0.0 // indirect
|
||||||
github.com/cloudflare/circl v1.2.1-0.20220809205628-0a9554f37a47 // indirect
|
github.com/cloudflare/circl v1.2.1-0.20220809205628-0a9554f37a47 // indirect
|
||||||
github.com/coredns/caddy v1.1.1 // indirect
|
github.com/coredns/caddy v1.1.1 // indirect
|
||||||
|
github.com/coreos/go-oidc/v3 v3.4.0 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
|
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
|
||||||
|
@ -89,13 +90,13 @@ require (
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
golang.org/x/mod v0.4.2 // indirect
|
golang.org/x/mod v0.4.2 // indirect
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 // indirect
|
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect
|
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90 // indirect
|
||||||
google.golang.org/grpc v1.45.0 // indirect
|
google.golang.org/grpc v1.47.0 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
90
go.sum
90
go.sum
|
@ -28,14 +28,23 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y
|
||||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||||
|
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||||
|
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||||
|
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
|
||||||
|
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
|
||||||
|
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
|
||||||
|
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
|
||||||
|
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
|
||||||
|
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
|
||||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||||
|
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
|
||||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||||
|
@ -45,6 +54,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
||||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
|
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
|
||||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||||
|
@ -133,11 +143,14 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
|
||||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0=
|
github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0=
|
||||||
github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
|
github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
|
||||||
github.com/coredns/coredns v1.8.7 h1:wVMjAnyFnY7Mc18AFO+9qbGD6ODPtdVUIlzoWrHr3hk=
|
github.com/coredns/coredns v1.8.7 h1:wVMjAnyFnY7Mc18AFO+9qbGD6ODPtdVUIlzoWrHr3hk=
|
||||||
github.com/coredns/coredns v1.8.7/go.mod h1:bFmbgEfeRz5aizL2VsQ5LRlsvJuXWkgG/MWG9zxqjVM=
|
github.com/coredns/coredns v1.8.7/go.mod h1:bFmbgEfeRz5aizL2VsQ5LRlsvJuXWkgG/MWG9zxqjVM=
|
||||||
|
github.com/coreos/go-oidc/v3 v3.4.0 h1:xz7elHb/LDwm/ERpwHd+5nb7wFHL32rsr6bBOgaeu6g=
|
||||||
|
github.com/coreos/go-oidc/v3 v3.4.0/go.mod h1:eHUXhZtXPQLgEaDrOVTgwbgmz1xGOkJNye6h3zkD2Pw=
|
||||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
@ -171,6 +184,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
|
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
|
||||||
|
@ -292,8 +306,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
|
||||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||||
|
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||||
|
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
@ -325,14 +340,19 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||||
|
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
|
@ -714,6 +734,12 @@ golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
|
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
|
||||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||||
|
@ -734,8 +760,13 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
|
||||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 h1:2o1E+E8TpNLklK9nHiPiK1uzIYrIHt+cQx3ynCwq9V8=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
@ -747,8 +778,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
|
||||||
|
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
@ -819,9 +851,19 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs=
|
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs=
|
||||||
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
@ -907,8 +949,11 @@ golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyj
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||||
|
@ -942,7 +987,16 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
|
||||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||||
|
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||||
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
|
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
|
||||||
|
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
||||||
|
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
||||||
|
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
|
||||||
|
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
|
||||||
|
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
|
||||||
|
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||||
|
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||||
|
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
@ -997,6 +1051,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D
|
||||||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||||
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||||
|
@ -1016,8 +1071,27 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc
|
||||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb h1:ZrsicilzPCS/Xr8qtBZZLpy4P9TYXAfl49ctG1/5tgw=
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
|
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
|
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
|
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
|
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
||||||
|
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||||
|
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||||
|
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||||
|
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||||
|
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||||
|
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||||
|
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||||
|
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||||
|
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||||
|
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90 h1:4SPz2GL2CXJt28MTF8V6Ap/9ZiVbQlJeGSd9qtA7DLs=
|
||||||
|
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
|
@ -1052,8 +1126,12 @@ google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
|
||||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
|
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||||
|
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
|
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
|
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||||
|
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/coreos/go-oidc/v3/oidc"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
headerKeyAccessJWTAssertion = "Cf-Access-Jwt-Assertion"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNoAccessToken = errors.New("no access token provided in request")
|
||||||
|
cloudflareAccessCertsURL = "https://%s.cloudflareaccess.com"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JWTValidator is an implementation of Verifier that validates access based JWT tokens.
|
||||||
|
type JWTValidator struct {
|
||||||
|
*oidc.IDTokenVerifier
|
||||||
|
audTags []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewJWTValidator(teamName string, certsURL string, audTags []string) *JWTValidator {
|
||||||
|
if certsURL == "" {
|
||||||
|
certsURL = fmt.Sprintf(cloudflareAccessCertsURL, teamName)
|
||||||
|
}
|
||||||
|
certsEndpoint := fmt.Sprintf("%s/cdn-cgi/access/certs", certsURL)
|
||||||
|
|
||||||
|
config := &oidc.Config{
|
||||||
|
SkipClientIDCheck: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
keySet := oidc.NewRemoteKeySet(ctx, certsEndpoint)
|
||||||
|
verifier := oidc.NewVerifier(certsURL, keySet, config)
|
||||||
|
return &JWTValidator{
|
||||||
|
IDTokenVerifier: verifier,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *JWTValidator) Handle(ctx context.Context, headers http.Header) error {
|
||||||
|
accessJWT := headers.Get(headerKeyAccessJWTAssertion)
|
||||||
|
if accessJWT == "" {
|
||||||
|
return ErrNoAccessToken
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := v.IDTokenVerifier.Verify(ctx, accessJWT)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Invalid token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We want atleast one audTag to match
|
||||||
|
for _, jwtAudTag := range token.Audience {
|
||||||
|
for _, acceptedAudTag := range v.audTags {
|
||||||
|
if acceptedAudTag == jwtAudTag {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Invalid token: %w", err)
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Handler interface {
|
||||||
|
Handle(ctx context.Context, r *http.Request) error
|
||||||
|
}
|
|
@ -20,6 +20,11 @@ type Rule struct {
|
||||||
// address.
|
// address.
|
||||||
Service OriginService `json:"service"`
|
Service OriginService `json:"service"`
|
||||||
|
|
||||||
|
// Handlers is a list of functions that acts as a middleware during ProxyHTTP
|
||||||
|
// TODO TUN-6774: Uncomment when we parse ingress to this. This serves as a demonstration on how
|
||||||
|
// we want to plug in Verifiers.
|
||||||
|
// Handlers []middleware.Handler
|
||||||
|
|
||||||
// Configure the request cloudflared sends to this specific origin.
|
// Configure the request cloudflared sends to this specific origin.
|
||||||
Config OriginRequestConfig `json:"originRequest"`
|
Config OriginRequestConfig `json:"originRequest"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
CoreOS Project
|
||||||
|
Copyright 2014 CoreOS, Inc
|
||||||
|
|
||||||
|
This product includes software developed at CoreOS, Inc.
|
||||||
|
(http://www.coreos.com/).
|
|
@ -0,0 +1,16 @@
|
||||||
|
package oidc
|
||||||
|
|
||||||
|
// JOSE asymmetric signing algorithm values as defined by RFC 7518
|
||||||
|
//
|
||||||
|
// see: https://tools.ietf.org/html/rfc7518#section-3.1
|
||||||
|
const (
|
||||||
|
RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
|
||||||
|
RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
|
||||||
|
RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
|
||||||
|
ES256 = "ES256" // ECDSA using P-256 and SHA-256
|
||||||
|
ES384 = "ES384" // ECDSA using P-384 and SHA-384
|
||||||
|
ES512 = "ES512" // ECDSA using P-521 and SHA-512
|
||||||
|
PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
|
||||||
|
PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
|
||||||
|
PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
|
||||||
|
)
|
|
@ -0,0 +1,248 @@
|
||||||
|
package oidc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/rsa"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
jose "gopkg.in/square/go-jose.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StaticKeySet is a verifier that validates JWT against a static set of public keys.
|
||||||
|
type StaticKeySet struct {
|
||||||
|
// PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and
|
||||||
|
// *ecdsa.PublicKey.
|
||||||
|
PublicKeys []crypto.PublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignature compares the signature against a static set of public keys.
|
||||||
|
func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
|
||||||
|
jws, err := jose.ParseSigned(jwt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing jwt: %v", err)
|
||||||
|
}
|
||||||
|
for _, pub := range s.PublicKeys {
|
||||||
|
switch pub.(type) {
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid public key type provided: %T", pub)
|
||||||
|
}
|
||||||
|
payload, err := jws.Verify(pub)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return payload, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("no public keys able to verify jwt")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP
|
||||||
|
// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically
|
||||||
|
// used by NewProvider using the URLs returned by OpenID Connect discovery, but is
|
||||||
|
// exposed for providers that don't support discovery or to prevent round trips to the
|
||||||
|
// discovery URL.
|
||||||
|
//
|
||||||
|
// The returned KeySet is a long lived verifier that caches keys based on any
|
||||||
|
// keys change. Reuse a common remote key set instead of creating new ones as needed.
|
||||||
|
func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet {
|
||||||
|
return newRemoteKeySet(ctx, jwksURL, time.Now)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet {
|
||||||
|
if now == nil {
|
||||||
|
now = time.Now
|
||||||
|
}
|
||||||
|
return &RemoteKeySet{jwksURL: jwksURL, ctx: cloneContext(ctx), now: now}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
|
||||||
|
// a jwks_uri endpoint.
|
||||||
|
type RemoteKeySet struct {
|
||||||
|
jwksURL string
|
||||||
|
ctx context.Context
|
||||||
|
now func() time.Time
|
||||||
|
|
||||||
|
// guard all other fields
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
// inflight suppresses parallel execution of updateKeys and allows
|
||||||
|
// multiple goroutines to wait for its result.
|
||||||
|
inflight *inflight
|
||||||
|
|
||||||
|
// A set of cached keys.
|
||||||
|
cachedKeys []jose.JSONWebKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// inflight is used to wait on some in-flight request from multiple goroutines.
|
||||||
|
type inflight struct {
|
||||||
|
doneCh chan struct{}
|
||||||
|
|
||||||
|
keys []jose.JSONWebKey
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInflight() *inflight {
|
||||||
|
return &inflight{doneCh: make(chan struct{})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait returns a channel that multiple goroutines can receive on. Once it returns
|
||||||
|
// a value, the inflight request is done and result() can be inspected.
|
||||||
|
func (i *inflight) wait() <-chan struct{} {
|
||||||
|
return i.doneCh
|
||||||
|
}
|
||||||
|
|
||||||
|
// done can only be called by a single goroutine. It records the result of the
|
||||||
|
// inflight request and signals other goroutines that the result is safe to
|
||||||
|
// inspect.
|
||||||
|
func (i *inflight) done(keys []jose.JSONWebKey, err error) {
|
||||||
|
i.keys = keys
|
||||||
|
i.err = err
|
||||||
|
close(i.doneCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
// result cannot be called until the wait() channel has returned a value.
|
||||||
|
func (i *inflight) result() ([]jose.JSONWebKey, error) {
|
||||||
|
return i.keys, i.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// paresdJWTKey is a context key that allows common setups to avoid parsing the
|
||||||
|
// JWT twice. It holds a *jose.JSONWebSignature value.
|
||||||
|
var parsedJWTKey contextKey
|
||||||
|
|
||||||
|
// VerifySignature validates a payload against a signature from the jwks_uri.
|
||||||
|
//
|
||||||
|
// Users MUST NOT call this method directly and should use an IDTokenVerifier
|
||||||
|
// instead. This method skips critical validations such as 'alg' values and is
|
||||||
|
// only exported to implement the KeySet interface.
|
||||||
|
func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
|
||||||
|
jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature)
|
||||||
|
if !ok {
|
||||||
|
var err error
|
||||||
|
jws, err = jose.ParseSigned(jwt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r.verify(ctx, jws)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
|
||||||
|
// We don't support JWTs signed with multiple signatures.
|
||||||
|
keyID := ""
|
||||||
|
for _, sig := range jws.Signatures {
|
||||||
|
keyID = sig.Header.KeyID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := r.keysFromCache()
|
||||||
|
for _, key := range keys {
|
||||||
|
if keyID == "" || key.KeyID == keyID {
|
||||||
|
if payload, err := jws.Verify(&key); err == nil {
|
||||||
|
return payload, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the kid doesn't match, check for new keys from the remote. This is the
|
||||||
|
// strategy recommended by the spec.
|
||||||
|
//
|
||||||
|
// https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
|
||||||
|
keys, err := r.keysFromRemote(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("fetching keys %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
if keyID == "" || key.KeyID == keyID {
|
||||||
|
if payload, err := jws.Verify(&key); err == nil {
|
||||||
|
return payload, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errors.New("failed to verify id token signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) {
|
||||||
|
r.mu.RLock()
|
||||||
|
defer r.mu.RUnlock()
|
||||||
|
return r.cachedKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
// keysFromRemote syncs the key set from the remote set, records the values in the
|
||||||
|
// cache, and returns the key set.
|
||||||
|
func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
|
||||||
|
// Need to lock to inspect the inflight request field.
|
||||||
|
r.mu.Lock()
|
||||||
|
// If there's not a current inflight request, create one.
|
||||||
|
if r.inflight == nil {
|
||||||
|
r.inflight = newInflight()
|
||||||
|
|
||||||
|
// This goroutine has exclusive ownership over the current inflight
|
||||||
|
// request. It releases the resource by nil'ing the inflight field
|
||||||
|
// once the goroutine is done.
|
||||||
|
go func() {
|
||||||
|
// Sync keys and finish inflight when that's done.
|
||||||
|
keys, err := r.updateKeys()
|
||||||
|
|
||||||
|
r.inflight.done(keys, err)
|
||||||
|
|
||||||
|
// Lock to update the keys and indicate that there is no longer an
|
||||||
|
// inflight request.
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
r.cachedKeys = keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free inflight so a different request can run.
|
||||||
|
r.inflight = nil
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
inflight := r.inflight
|
||||||
|
r.mu.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
case <-inflight.wait():
|
||||||
|
return inflight.result()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) {
|
||||||
|
req, err := http.NewRequest("GET", r.jwksURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: can't create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := doRequest(r.ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: get keys failed %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
var keySet jose.JSONWebKeySet
|
||||||
|
err = unmarshalResp(resp, body, &keySet)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
|
||||||
|
}
|
||||||
|
return keySet.Keys, nil
|
||||||
|
}
|
|
@ -0,0 +1,522 @@
|
||||||
|
// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
|
||||||
|
package oidc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/sha512"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io/ioutil"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
|
||||||
|
ScopeOpenID = "openid"
|
||||||
|
|
||||||
|
// ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
|
||||||
|
// OAuth2 refresh tokens.
|
||||||
|
//
|
||||||
|
// Support for this scope differs between OpenID Connect providers. For instance
|
||||||
|
// Google rejects it, favoring appending "access_type=offline" as part of the
|
||||||
|
// authorization request instead.
|
||||||
|
//
|
||||||
|
// See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
|
||||||
|
ScopeOfflineAccess = "offline_access"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errNoAtHash = errors.New("id token did not have an access token hash")
|
||||||
|
errInvalidAtHash = errors.New("access token hash does not match value in ID token")
|
||||||
|
)
|
||||||
|
|
||||||
|
type contextKey int
|
||||||
|
|
||||||
|
var issuerURLKey contextKey
|
||||||
|
|
||||||
|
// ClientContext returns a new Context that carries the provided HTTP client.
|
||||||
|
//
|
||||||
|
// This method sets the same context key used by the golang.org/x/oauth2 package,
|
||||||
|
// so the returned context works for that package too.
|
||||||
|
//
|
||||||
|
// myClient := &http.Client{}
|
||||||
|
// ctx := oidc.ClientContext(parentContext, myClient)
|
||||||
|
//
|
||||||
|
// // This will use the custom client
|
||||||
|
// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
|
||||||
|
//
|
||||||
|
func ClientContext(ctx context.Context, client *http.Client) context.Context {
|
||||||
|
return context.WithValue(ctx, oauth2.HTTPClient, client)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cloneContext copies a context's bag-of-values into a new context that isn't
|
||||||
|
// associated with its cancellation. This is used to initialize remote keys sets
|
||||||
|
// which run in the background and aren't associated with the initial context.
|
||||||
|
func cloneContext(ctx context.Context) context.Context {
|
||||||
|
cp := context.Background()
|
||||||
|
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
||||||
|
cp = ClientContext(cp, c)
|
||||||
|
}
|
||||||
|
return cp
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsecureIssuerURLContext allows discovery to work when the issuer_url reported
|
||||||
|
// by upstream is mismatched with the discovery URL. This is meant for integration
|
||||||
|
// with off-spec providers such as Azure.
|
||||||
|
//
|
||||||
|
// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0"
|
||||||
|
// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0"
|
||||||
|
//
|
||||||
|
// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL)
|
||||||
|
//
|
||||||
|
// // Provider will be discovered with the discoveryBaseURL, but use issuerURL
|
||||||
|
// // for future issuer validation.
|
||||||
|
// provider, err := oidc.NewProvider(ctx, discoveryBaseURL)
|
||||||
|
//
|
||||||
|
// This is insecure because validating the correct issuer is critical for multi-tenant
|
||||||
|
// proivders. Any overrides here MUST be carefully reviewed.
|
||||||
|
func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context {
|
||||||
|
return context.WithValue(ctx, issuerURLKey, issuerURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||||
|
client := http.DefaultClient
|
||||||
|
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
||||||
|
client = c
|
||||||
|
}
|
||||||
|
return client.Do(req.WithContext(ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provider represents an OpenID Connect server's configuration.
|
||||||
|
type Provider struct {
|
||||||
|
issuer string
|
||||||
|
authURL string
|
||||||
|
tokenURL string
|
||||||
|
userInfoURL string
|
||||||
|
algorithms []string
|
||||||
|
|
||||||
|
// Raw claims returned by the server.
|
||||||
|
rawClaims []byte
|
||||||
|
|
||||||
|
remoteKeySet KeySet
|
||||||
|
}
|
||||||
|
|
||||||
|
type providerJSON struct {
|
||||||
|
Issuer string `json:"issuer"`
|
||||||
|
AuthURL string `json:"authorization_endpoint"`
|
||||||
|
TokenURL string `json:"token_endpoint"`
|
||||||
|
JWKSURL string `json:"jwks_uri"`
|
||||||
|
UserInfoURL string `json:"userinfo_endpoint"`
|
||||||
|
Algorithms []string `json:"id_token_signing_alg_values_supported"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// supportedAlgorithms is a list of algorithms explicitly supported by this
|
||||||
|
// package. If a provider supports other algorithms, such as HS256 or none,
|
||||||
|
// those values won't be passed to the IDTokenVerifier.
|
||||||
|
var supportedAlgorithms = map[string]bool{
|
||||||
|
RS256: true,
|
||||||
|
RS384: true,
|
||||||
|
RS512: true,
|
||||||
|
ES256: true,
|
||||||
|
ES384: true,
|
||||||
|
ES512: true,
|
||||||
|
PS256: true,
|
||||||
|
PS384: true,
|
||||||
|
PS512: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderConfig allows creating providers when discovery isn't supported. It's
|
||||||
|
// generally easier to use NewProvider directly.
|
||||||
|
type ProviderConfig struct {
|
||||||
|
// IssuerURL is the identity of the provider, and the string it uses to sign
|
||||||
|
// ID tokens with. For example "https://accounts.google.com". This value MUST
|
||||||
|
// match ID tokens exactly.
|
||||||
|
IssuerURL string
|
||||||
|
// AuthURL is the endpoint used by the provider to support the OAuth 2.0
|
||||||
|
// authorization endpoint.
|
||||||
|
AuthURL string
|
||||||
|
// TokenURL is the endpoint used by the provider to support the OAuth 2.0
|
||||||
|
// token endpoint.
|
||||||
|
TokenURL string
|
||||||
|
// UserInfoURL is the endpoint used by the provider to support the OpenID
|
||||||
|
// Connect UserInfo flow.
|
||||||
|
//
|
||||||
|
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
|
||||||
|
UserInfoURL string
|
||||||
|
// JWKSURL is the endpoint used by the provider to advertise public keys to
|
||||||
|
// verify issued ID tokens. This endpoint is polled as new keys are made
|
||||||
|
// available.
|
||||||
|
JWKSURL string
|
||||||
|
|
||||||
|
// Algorithms, if provided, indicate a list of JWT algorithms allowed to sign
|
||||||
|
// ID tokens. If not provided, this defaults to the algorithms advertised by
|
||||||
|
// the JWK endpoint, then the set of algorithms supported by this package.
|
||||||
|
Algorithms []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProvider initializes a provider from a set of endpoints, rather than
|
||||||
|
// through discovery.
|
||||||
|
func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
|
||||||
|
return &Provider{
|
||||||
|
issuer: p.IssuerURL,
|
||||||
|
authURL: p.AuthURL,
|
||||||
|
tokenURL: p.TokenURL,
|
||||||
|
userInfoURL: p.UserInfoURL,
|
||||||
|
algorithms: p.Algorithms,
|
||||||
|
remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
|
||||||
|
//
|
||||||
|
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
|
||||||
|
// or "https://login.salesforce.com".
|
||||||
|
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
|
||||||
|
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
|
||||||
|
req, err := http.NewRequest("GET", wellKnown, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := doRequest(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
var p providerJSON
|
||||||
|
err = unmarshalResp(resp, body, &p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
issuerURL, skipIssuerValidation := ctx.Value(issuerURLKey).(string)
|
||||||
|
if !skipIssuerValidation {
|
||||||
|
issuerURL = issuer
|
||||||
|
}
|
||||||
|
if p.Issuer != issuerURL && !skipIssuerValidation {
|
||||||
|
return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
|
||||||
|
}
|
||||||
|
var algs []string
|
||||||
|
for _, a := range p.Algorithms {
|
||||||
|
if supportedAlgorithms[a] {
|
||||||
|
algs = append(algs, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &Provider{
|
||||||
|
issuer: issuerURL,
|
||||||
|
authURL: p.AuthURL,
|
||||||
|
tokenURL: p.TokenURL,
|
||||||
|
userInfoURL: p.UserInfoURL,
|
||||||
|
algorithms: algs,
|
||||||
|
rawClaims: body,
|
||||||
|
remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Claims unmarshals raw fields returned by the server during discovery.
|
||||||
|
//
|
||||||
|
// var claims struct {
|
||||||
|
// ScopesSupported []string `json:"scopes_supported"`
|
||||||
|
// ClaimsSupported []string `json:"claims_supported"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if err := provider.Claims(&claims); err != nil {
|
||||||
|
// // handle unmarshaling error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// For a list of fields defined by the OpenID Connect spec see:
|
||||||
|
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
|
||||||
|
func (p *Provider) Claims(v interface{}) error {
|
||||||
|
if p.rawClaims == nil {
|
||||||
|
return errors.New("oidc: claims not set")
|
||||||
|
}
|
||||||
|
return json.Unmarshal(p.rawClaims, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
|
||||||
|
func (p *Provider) Endpoint() oauth2.Endpoint {
|
||||||
|
return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserInfo represents the OpenID Connect userinfo claims.
|
||||||
|
type UserInfo struct {
|
||||||
|
Subject string `json:"sub"`
|
||||||
|
Profile string `json:"profile"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
EmailVerified bool `json:"email_verified"`
|
||||||
|
|
||||||
|
claims []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type userInfoRaw struct {
|
||||||
|
Subject string `json:"sub"`
|
||||||
|
Profile string `json:"profile"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
// Handle providers that return email_verified as a string
|
||||||
|
// https://forums.aws.amazon.com/thread.jspa?messageID=949441󧳁 and
|
||||||
|
// https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11
|
||||||
|
EmailVerified stringAsBool `json:"email_verified"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Claims unmarshals the raw JSON object claims into the provided object.
|
||||||
|
func (u *UserInfo) Claims(v interface{}) error {
|
||||||
|
if u.claims == nil {
|
||||||
|
return errors.New("oidc: claims not set")
|
||||||
|
}
|
||||||
|
return json.Unmarshal(u.claims, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserInfo uses the token source to query the provider's user info endpoint.
|
||||||
|
func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
|
||||||
|
if p.userInfoURL == "" {
|
||||||
|
return nil, errors.New("oidc: user info endpoint is not supported by this provider")
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", p.userInfoURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: create GET request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := tokenSource.Token()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: get access token: %v", err)
|
||||||
|
}
|
||||||
|
token.SetAuthHeader(req)
|
||||||
|
|
||||||
|
resp, err := doRequest(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
ct := resp.Header.Get("Content-Type")
|
||||||
|
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
||||||
|
if parseErr == nil && mediaType == "application/jwt" {
|
||||||
|
payload, err := p.remoteKeySet.VerifySignature(ctx, string(body))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err)
|
||||||
|
}
|
||||||
|
body = payload
|
||||||
|
}
|
||||||
|
|
||||||
|
var userInfo userInfoRaw
|
||||||
|
if err := json.Unmarshal(body, &userInfo); err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
|
||||||
|
}
|
||||||
|
return &UserInfo{
|
||||||
|
Subject: userInfo.Subject,
|
||||||
|
Profile: userInfo.Profile,
|
||||||
|
Email: userInfo.Email,
|
||||||
|
EmailVerified: bool(userInfo.EmailVerified),
|
||||||
|
claims: body,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDToken is an OpenID Connect extension that provides a predictable representation
|
||||||
|
// of an authorization event.
|
||||||
|
//
|
||||||
|
// The ID Token only holds fields OpenID Connect requires. To access additional
|
||||||
|
// claims returned by the server, use the Claims method.
|
||||||
|
type IDToken struct {
|
||||||
|
// The URL of the server which issued this token. OpenID Connect
|
||||||
|
// requires this value always be identical to the URL used for
|
||||||
|
// initial discovery.
|
||||||
|
//
|
||||||
|
// Note: Because of a known issue with Google Accounts' implementation
|
||||||
|
// this value may differ when using Google.
|
||||||
|
//
|
||||||
|
// See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo
|
||||||
|
Issuer string
|
||||||
|
|
||||||
|
// The client ID, or set of client IDs, that this token is issued for. For
|
||||||
|
// common uses, this is the client that initialized the auth flow.
|
||||||
|
//
|
||||||
|
// This package ensures the audience contains an expected value.
|
||||||
|
Audience []string
|
||||||
|
|
||||||
|
// A unique string which identifies the end user.
|
||||||
|
Subject string
|
||||||
|
|
||||||
|
// Expiry of the token. Ths package will not process tokens that have
|
||||||
|
// expired unless that validation is explicitly turned off.
|
||||||
|
Expiry time.Time
|
||||||
|
// When the token was issued by the provider.
|
||||||
|
IssuedAt time.Time
|
||||||
|
|
||||||
|
// Initial nonce provided during the authentication redirect.
|
||||||
|
//
|
||||||
|
// This package does NOT provided verification on the value of this field
|
||||||
|
// and it's the user's responsibility to ensure it contains a valid value.
|
||||||
|
Nonce string
|
||||||
|
|
||||||
|
// at_hash claim, if set in the ID token. Callers can verify an access token
|
||||||
|
// that corresponds to the ID token using the VerifyAccessToken method.
|
||||||
|
AccessTokenHash string
|
||||||
|
|
||||||
|
// signature algorithm used for ID token, needed to compute a verification hash of an
|
||||||
|
// access token
|
||||||
|
sigAlgorithm string
|
||||||
|
|
||||||
|
// Raw payload of the id_token.
|
||||||
|
claims []byte
|
||||||
|
|
||||||
|
// Map of distributed claim names to claim sources
|
||||||
|
distributedClaims map[string]claimSource
|
||||||
|
}
|
||||||
|
|
||||||
|
// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
|
||||||
|
//
|
||||||
|
// idToken, err := idTokenVerifier.Verify(rawIDToken)
|
||||||
|
// if err != nil {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
// var claims struct {
|
||||||
|
// Email string `json:"email"`
|
||||||
|
// EmailVerified bool `json:"email_verified"`
|
||||||
|
// }
|
||||||
|
// if err := idToken.Claims(&claims); err != nil {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
func (i *IDToken) Claims(v interface{}) error {
|
||||||
|
if i.claims == nil {
|
||||||
|
return errors.New("oidc: claims not set")
|
||||||
|
}
|
||||||
|
return json.Unmarshal(i.claims, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token
|
||||||
|
// matches the hash in the id token. It returns an error if the hashes don't match.
|
||||||
|
// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token
|
||||||
|
// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
|
||||||
|
func (i *IDToken) VerifyAccessToken(accessToken string) error {
|
||||||
|
if i.AccessTokenHash == "" {
|
||||||
|
return errNoAtHash
|
||||||
|
}
|
||||||
|
var h hash.Hash
|
||||||
|
switch i.sigAlgorithm {
|
||||||
|
case RS256, ES256, PS256:
|
||||||
|
h = sha256.New()
|
||||||
|
case RS384, ES384, PS384:
|
||||||
|
h = sha512.New384()
|
||||||
|
case RS512, ES512, PS512:
|
||||||
|
h = sha512.New()
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
|
||||||
|
}
|
||||||
|
h.Write([]byte(accessToken)) // hash documents that Write will never return an error
|
||||||
|
sum := h.Sum(nil)[:h.Size()/2]
|
||||||
|
actual := base64.RawURLEncoding.EncodeToString(sum)
|
||||||
|
if actual != i.AccessTokenHash {
|
||||||
|
return errInvalidAtHash
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type idToken struct {
|
||||||
|
Issuer string `json:"iss"`
|
||||||
|
Subject string `json:"sub"`
|
||||||
|
Audience audience `json:"aud"`
|
||||||
|
Expiry jsonTime `json:"exp"`
|
||||||
|
IssuedAt jsonTime `json:"iat"`
|
||||||
|
NotBefore *jsonTime `json:"nbf"`
|
||||||
|
Nonce string `json:"nonce"`
|
||||||
|
AtHash string `json:"at_hash"`
|
||||||
|
ClaimNames map[string]string `json:"_claim_names"`
|
||||||
|
ClaimSources map[string]claimSource `json:"_claim_sources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type claimSource struct {
|
||||||
|
Endpoint string `json:"endpoint"`
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type stringAsBool bool
|
||||||
|
|
||||||
|
func (sb *stringAsBool) UnmarshalJSON(b []byte) error {
|
||||||
|
switch string(b) {
|
||||||
|
case "true", `"true"`:
|
||||||
|
*sb = true
|
||||||
|
case "false", `"false"`:
|
||||||
|
*sb = false
|
||||||
|
default:
|
||||||
|
return errors.New("invalid value for boolean")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type audience []string
|
||||||
|
|
||||||
|
func (a *audience) UnmarshalJSON(b []byte) error {
|
||||||
|
var s string
|
||||||
|
if json.Unmarshal(b, &s) == nil {
|
||||||
|
*a = audience{s}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var auds []string
|
||||||
|
if err := json.Unmarshal(b, &auds); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*a = auds
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonTime time.Time
|
||||||
|
|
||||||
|
func (j *jsonTime) UnmarshalJSON(b []byte) error {
|
||||||
|
var n json.Number
|
||||||
|
if err := json.Unmarshal(b, &n); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var unix int64
|
||||||
|
|
||||||
|
if t, err := n.Int64(); err == nil {
|
||||||
|
unix = t
|
||||||
|
} else {
|
||||||
|
f, err := n.Float64()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
unix = int64(f)
|
||||||
|
}
|
||||||
|
*j = jsonTime(time.Unix(unix, 0))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
|
||||||
|
err := json.Unmarshal(body, &v)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ct := r.Header.Get("Content-Type")
|
||||||
|
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
||||||
|
if parseErr == nil && mediaType == "application/json" {
|
||||||
|
return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
|
||||||
|
}
|
|
@ -0,0 +1,344 @@
|
||||||
|
package oidc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
jose "gopkg.in/square/go-jose.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
issuerGoogleAccounts = "https://accounts.google.com"
|
||||||
|
issuerGoogleAccountsNoScheme = "accounts.google.com"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TokenExpiredError indicates that Verify failed because the token was expired. This
|
||||||
|
// error does NOT indicate that the token is not also invalid for other reasons. Other
|
||||||
|
// checks might have failed if the expiration check had not failed.
|
||||||
|
type TokenExpiredError struct {
|
||||||
|
// Expiry is the time when the token expired.
|
||||||
|
Expiry time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TokenExpiredError) Error() string {
|
||||||
|
return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeySet is a set of publc JSON Web Keys that can be used to validate the signature
|
||||||
|
// of JSON web tokens. This is expected to be backed by a remote key set through
|
||||||
|
// provider metadata discovery or an in-memory set of keys delivered out-of-band.
|
||||||
|
type KeySet interface {
|
||||||
|
// VerifySignature parses the JSON web token, verifies the signature, and returns
|
||||||
|
// the raw payload. Header and claim fields are validated by other parts of the
|
||||||
|
// package. For example, the KeySet does not need to check values such as signature
|
||||||
|
// algorithm, issuer, and audience since the IDTokenVerifier validates these values
|
||||||
|
// independently.
|
||||||
|
//
|
||||||
|
// If VerifySignature makes HTTP requests to verify the token, it's expected to
|
||||||
|
// use any HTTP client associated with the context through ClientContext.
|
||||||
|
VerifySignature(ctx context.Context, jwt string) (payload []byte, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDTokenVerifier provides verification for ID Tokens.
|
||||||
|
type IDTokenVerifier struct {
|
||||||
|
keySet KeySet
|
||||||
|
config *Config
|
||||||
|
issuer string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVerifier returns a verifier manually constructed from a key set and issuer URL.
|
||||||
|
//
|
||||||
|
// It's easier to use provider discovery to construct an IDTokenVerifier than creating
|
||||||
|
// one directly. This method is intended to be used with provider that don't support
|
||||||
|
// metadata discovery, or avoiding round trips when the key set URL is already known.
|
||||||
|
//
|
||||||
|
// This constructor can be used to create a verifier directly using the issuer URL and
|
||||||
|
// JSON Web Key Set URL without using discovery:
|
||||||
|
//
|
||||||
|
// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
|
||||||
|
// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
|
||||||
|
//
|
||||||
|
// Or a static key set (e.g. for testing):
|
||||||
|
//
|
||||||
|
// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}}
|
||||||
|
// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
|
||||||
|
//
|
||||||
|
func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
|
||||||
|
return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is the configuration for an IDTokenVerifier.
|
||||||
|
type Config struct {
|
||||||
|
// Expected audience of the token. For a majority of the cases this is expected to be
|
||||||
|
// the ID of the client that initialized the login flow. It may occasionally differ if
|
||||||
|
// the provider supports the authorizing party (azp) claim.
|
||||||
|
//
|
||||||
|
// If not provided, users must explicitly set SkipClientIDCheck.
|
||||||
|
ClientID string
|
||||||
|
// If specified, only this set of algorithms may be used to sign the JWT.
|
||||||
|
//
|
||||||
|
// If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this
|
||||||
|
// defaults to the set of algorithms the provider supports. Otherwise this values
|
||||||
|
// defaults to RS256.
|
||||||
|
SupportedSigningAlgs []string
|
||||||
|
|
||||||
|
// If true, no ClientID check performed. Must be true if ClientID field is empty.
|
||||||
|
SkipClientIDCheck bool
|
||||||
|
// If true, token expiry is not checked.
|
||||||
|
SkipExpiryCheck bool
|
||||||
|
|
||||||
|
// SkipIssuerCheck is intended for specialized cases where the the caller wishes to
|
||||||
|
// defer issuer validation. When enabled, callers MUST independently verify the Token's
|
||||||
|
// Issuer is a known good value.
|
||||||
|
//
|
||||||
|
// Mismatched issuers often indicate client mis-configuration. If mismatches are
|
||||||
|
// unexpected, evaluate if the provided issuer URL is incorrect instead of enabling
|
||||||
|
// this option.
|
||||||
|
SkipIssuerCheck bool
|
||||||
|
|
||||||
|
// Time function to check Token expiry. Defaults to time.Now
|
||||||
|
Now func() time.Time
|
||||||
|
|
||||||
|
// InsecureSkipSignatureCheck causes this package to skip JWT signature validation.
|
||||||
|
// It's intended for special cases where providers (such as Azure), use the "none"
|
||||||
|
// algorithm.
|
||||||
|
//
|
||||||
|
// This option can only be enabled safely when the ID Token is received directly
|
||||||
|
// from the provider after the token exchange.
|
||||||
|
//
|
||||||
|
// This option MUST NOT be used when receiving an ID Token from sources other
|
||||||
|
// than the token endpoint.
|
||||||
|
InsecureSkipSignatureCheck bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
|
||||||
|
func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
|
||||||
|
if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 {
|
||||||
|
// Make a copy so we don't modify the config values.
|
||||||
|
cp := &Config{}
|
||||||
|
*cp = *config
|
||||||
|
cp.SupportedSigningAlgs = p.algorithms
|
||||||
|
config = cp
|
||||||
|
}
|
||||||
|
return NewVerifier(p.issuer, p.remoteKeySet, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseJWT(p string) ([]byte, error) {
|
||||||
|
parts := strings.Split(p, ".")
|
||||||
|
if len(parts) < 2 {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
|
||||||
|
}
|
||||||
|
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
|
||||||
|
}
|
||||||
|
return payload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func contains(sli []string, ele string) bool {
|
||||||
|
for _, s := range sli {
|
||||||
|
if s == ele {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the Claims from the distributed JWT token
|
||||||
|
func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) {
|
||||||
|
req, err := http.NewRequest("GET", src.Endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed request: %v", err)
|
||||||
|
}
|
||||||
|
if src.AccessToken != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+src.AccessToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := doRequest(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := verifier.Verify(ctx, string(body))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return token.claims, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify parses a raw ID Token, verifies it's been signed by the provider, performs
|
||||||
|
// any additional checks depending on the Config, and returns the payload.
|
||||||
|
//
|
||||||
|
// Verify does NOT do nonce validation, which is the callers responsibility.
|
||||||
|
//
|
||||||
|
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
|
||||||
|
//
|
||||||
|
// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
|
||||||
|
// if err != nil {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Extract the ID Token from oauth2 token.
|
||||||
|
// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
|
||||||
|
// if !ok {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// token, err := verifier.Verify(ctx, rawIDToken)
|
||||||
|
//
|
||||||
|
func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
|
||||||
|
// Throw out tokens with invalid claims before trying to verify the token. This lets
|
||||||
|
// us do cheap checks before possibly re-syncing keys.
|
||||||
|
payload, err := parseJWT(rawIDToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||||
|
}
|
||||||
|
var token idToken
|
||||||
|
if err := json.Unmarshal(payload, &token); err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
distributedClaims := make(map[string]claimSource)
|
||||||
|
|
||||||
|
//step through the token to map claim names to claim sources"
|
||||||
|
for cn, src := range token.ClaimNames {
|
||||||
|
if src == "" {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to obtain source from claim name")
|
||||||
|
}
|
||||||
|
s, ok := token.ClaimSources[src]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("oidc: source does not exist")
|
||||||
|
}
|
||||||
|
distributedClaims[cn] = s
|
||||||
|
}
|
||||||
|
|
||||||
|
t := &IDToken{
|
||||||
|
Issuer: token.Issuer,
|
||||||
|
Subject: token.Subject,
|
||||||
|
Audience: []string(token.Audience),
|
||||||
|
Expiry: time.Time(token.Expiry),
|
||||||
|
IssuedAt: time.Time(token.IssuedAt),
|
||||||
|
Nonce: token.Nonce,
|
||||||
|
AccessTokenHash: token.AtHash,
|
||||||
|
claims: payload,
|
||||||
|
distributedClaims: distributedClaims,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check issuer.
|
||||||
|
if !v.config.SkipIssuerCheck && t.Issuer != v.issuer {
|
||||||
|
// Google sometimes returns "accounts.google.com" as the issuer claim instead of
|
||||||
|
// the required "https://accounts.google.com". Detect this case and allow it only
|
||||||
|
// for Google.
|
||||||
|
//
|
||||||
|
// We will not add hooks to let other providers go off spec like this.
|
||||||
|
if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) {
|
||||||
|
return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty.
|
||||||
|
//
|
||||||
|
// This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party).
|
||||||
|
if !v.config.SkipClientIDCheck {
|
||||||
|
if v.config.ClientID != "" {
|
||||||
|
if !contains(t.Audience, v.config.ClientID) {
|
||||||
|
return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a SkipExpiryCheck is false, make sure token is not expired.
|
||||||
|
if !v.config.SkipExpiryCheck {
|
||||||
|
now := time.Now
|
||||||
|
if v.config.Now != nil {
|
||||||
|
now = v.config.Now
|
||||||
|
}
|
||||||
|
nowTime := now()
|
||||||
|
|
||||||
|
if t.Expiry.Before(nowTime) {
|
||||||
|
return nil, &TokenExpiredError{Expiry: t.Expiry}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If nbf claim is provided in token, ensure that it is indeed in the past.
|
||||||
|
if token.NotBefore != nil {
|
||||||
|
nbfTime := time.Time(*token.NotBefore)
|
||||||
|
// Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew.
|
||||||
|
// https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153
|
||||||
|
leeway := 5 * time.Minute
|
||||||
|
|
||||||
|
if nowTime.Add(leeway).Before(nbfTime) {
|
||||||
|
return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.config.InsecureSkipSignatureCheck {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
jws, err := jose.ParseSigned(rawIDToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(jws.Signatures) {
|
||||||
|
case 0:
|
||||||
|
return nil, fmt.Errorf("oidc: id token not signed")
|
||||||
|
case 1:
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := jws.Signatures[0]
|
||||||
|
supportedSigAlgs := v.config.SupportedSigningAlgs
|
||||||
|
if len(supportedSigAlgs) == 0 {
|
||||||
|
supportedSigAlgs = []string{RS256}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !contains(supportedSigAlgs, sig.Header.Algorithm) {
|
||||||
|
return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.sigAlgorithm = sig.Header.Algorithm
|
||||||
|
|
||||||
|
ctx = context.WithValue(ctx, parsedJWTKey, jws)
|
||||||
|
gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to verify signature: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the payload returned by the square actually matches the payload parsed earlier.
|
||||||
|
if !bytes.Equal(gotPayload, payload) {
|
||||||
|
return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
|
||||||
|
}
|
||||||
|
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nonce returns an auth code option which requires the ID Token created by the
|
||||||
|
// OpenID Connect provider to contain the specified nonce.
|
||||||
|
func Nonce(nonce string) oauth2.AuthCodeOption {
|
||||||
|
return oauth2.SetAuthURLParam("nonce", nonce)
|
||||||
|
}
|
|
@ -0,0 +1,524 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wrapJSONUnmarshalV2 = false
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||||
|
func UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||||
|
return new(Unmarshaler).UnmarshalNext(d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object from r into m.
|
||||||
|
func Unmarshal(r io.Reader, m proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(r, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalString unmarshals a JSON object from s into m.
|
||||||
|
func UnmarshalString(s string, m proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshaler is a configurable object for converting from a JSON
|
||||||
|
// representation to a protocol buffer object.
|
||||||
|
type Unmarshaler struct {
|
||||||
|
// AllowUnknownFields specifies whether to allow messages to contain
|
||||||
|
// unknown JSON fields, as opposed to failing to unmarshal.
|
||||||
|
AllowUnknownFields bool
|
||||||
|
|
||||||
|
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||||
|
// If unset, the global registry is used by default.
|
||||||
|
AnyResolver AnyResolver
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
|
||||||
|
// they are unmarshaled from JSON. Messages that implement this should also
|
||||||
|
// implement JSONPBMarshaler so that the custom format can be produced.
|
||||||
|
//
|
||||||
|
// The JSON unmarshaling must follow the JSON to proto specification:
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||||
|
//
|
||||||
|
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||||
|
type JSONPBUnmarshaler interface {
|
||||||
|
UnmarshalJSONPB(*Unmarshaler, []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object from r into m.
|
||||||
|
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
|
||||||
|
return u.UnmarshalNext(json.NewDecoder(r), m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||||
|
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||||
|
if m == nil {
|
||||||
|
return errors.New("invalid nil message")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the next JSON object from the stream.
|
||||||
|
raw := json.RawMessage{}
|
||||||
|
if err := d.Decode(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for custom unmarshalers first since they may not properly
|
||||||
|
// implement protobuf reflection that the logic below relies on.
|
||||||
|
if jsu, ok := m.(JSONPBUnmarshaler); ok {
|
||||||
|
return jsu.UnmarshalJSONPB(u, raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
mr := proto.MessageReflect(m)
|
||||||
|
|
||||||
|
// NOTE: For historical reasons, a top-level null is treated as a noop.
|
||||||
|
// This is incorrect, but kept for compatibility.
|
||||||
|
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if wrapJSONUnmarshalV2 {
|
||||||
|
// NOTE: If input message is non-empty, we need to preserve merge semantics
|
||||||
|
// of the old jsonpb implementation. These semantics are not supported by
|
||||||
|
// the protobuf JSON specification.
|
||||||
|
isEmpty := true
|
||||||
|
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
|
||||||
|
isEmpty = false // at least one iteration implies non-empty
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
if !isEmpty {
|
||||||
|
// Perform unmarshaling into a newly allocated, empty message.
|
||||||
|
mr = mr.New()
|
||||||
|
|
||||||
|
// Use a defer to copy all unmarshaled fields into the original message.
|
||||||
|
dst := proto.MessageReflect(m)
|
||||||
|
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
dst.Set(fd, v)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal using the v2 JSON unmarshaler.
|
||||||
|
opts := protojson.UnmarshalOptions{
|
||||||
|
DiscardUnknown: u.AllowUnknownFields,
|
||||||
|
}
|
||||||
|
if u.AnyResolver != nil {
|
||||||
|
opts.Resolver = anyResolver{u.AnyResolver}
|
||||||
|
}
|
||||||
|
return opts.Unmarshal(raw, mr.Interface())
|
||||||
|
} else {
|
||||||
|
if err := u.unmarshalMessage(mr, raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return protoV2.CheckInitialized(mr.Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
|
||||||
|
md := m.Descriptor()
|
||||||
|
fds := md.Fields()
|
||||||
|
|
||||||
|
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
|
||||||
|
return jsu.UnmarshalJSONPB(u, in)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch wellKnownType(md.FullName()) {
|
||||||
|
case "Any":
|
||||||
|
var jsonObject map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawTypeURL, ok := jsonObject["@type"]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("Any JSON doesn't have '@type'")
|
||||||
|
}
|
||||||
|
typeURL, err := unquoteString(string(rawTypeURL))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
|
||||||
|
}
|
||||||
|
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
|
||||||
|
|
||||||
|
var m2 protoreflect.Message
|
||||||
|
if u.AnyResolver != nil {
|
||||||
|
mi, err := u.AnyResolver.Resolve(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m2 = proto.MessageReflect(mi)
|
||||||
|
} else {
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
if err == protoregistry.NotFound {
|
||||||
|
return fmt.Errorf("could not resolve Any message type: %v", typeURL)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m2 = mt.New()
|
||||||
|
}
|
||||||
|
|
||||||
|
if wellKnownType(m2.Descriptor().FullName()) != "" {
|
||||||
|
rawValue, ok := jsonObject["value"]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("Any JSON doesn't have 'value'")
|
||||||
|
}
|
||||||
|
if err := u.unmarshalMessage(m2, rawValue); err != nil {
|
||||||
|
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
delete(jsonObject, "@type")
|
||||||
|
rawJSON, err := json.Marshal(jsonObject)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
|
||||||
|
}
|
||||||
|
if err = u.unmarshalMessage(m2, rawJSON); err != nil {
|
||||||
|
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rawWire, err := protoV2.Marshal(m2.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
|
||||||
|
}
|
||||||
|
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
|
||||||
|
return nil
|
||||||
|
case "BoolValue", "BytesValue", "StringValue",
|
||||||
|
"Int32Value", "UInt32Value", "FloatValue",
|
||||||
|
"Int64Value", "UInt64Value", "DoubleValue":
|
||||||
|
fd := fds.ByNumber(1)
|
||||||
|
v, err := u.unmarshalValue(m.NewField(fd), in, fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
return nil
|
||||||
|
case "Duration":
|
||||||
|
v, err := unquoteString(string(in))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d, err := time.ParseDuration(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Duration: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sec := d.Nanoseconds() / 1e9
|
||||||
|
nsec := d.Nanoseconds() % 1e9
|
||||||
|
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||||
|
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||||
|
return nil
|
||||||
|
case "Timestamp":
|
||||||
|
v, err := unquoteString(string(in))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Timestamp: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sec := t.Unix()
|
||||||
|
nsec := t.Nanosecond()
|
||||||
|
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||||
|
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||||
|
return nil
|
||||||
|
case "Value":
|
||||||
|
switch {
|
||||||
|
case string(in) == "null":
|
||||||
|
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
|
||||||
|
case string(in) == "true":
|
||||||
|
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
|
||||||
|
case string(in) == "false":
|
||||||
|
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
|
||||||
|
case hasPrefixAndSuffix('"', in, '"'):
|
||||||
|
s, err := unquoteString(string(in))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||||
|
}
|
||||||
|
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
|
||||||
|
case hasPrefixAndSuffix('[', in, ']'):
|
||||||
|
v := m.Mutable(fds.ByNumber(6))
|
||||||
|
return u.unmarshalMessage(v.Message(), in)
|
||||||
|
case hasPrefixAndSuffix('{', in, '}'):
|
||||||
|
v := m.Mutable(fds.ByNumber(5))
|
||||||
|
return u.unmarshalMessage(v.Message(), in)
|
||||||
|
default:
|
||||||
|
f, err := strconv.ParseFloat(string(in), 0)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||||
|
}
|
||||||
|
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case "ListValue":
|
||||||
|
var jsonArray []json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||||
|
return fmt.Errorf("bad ListValue: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lv := m.Mutable(fds.ByNumber(1)).List()
|
||||||
|
for _, raw := range jsonArray {
|
||||||
|
ve := lv.NewElement()
|
||||||
|
if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
lv.Append(ve)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case "Struct":
|
||||||
|
var jsonObject map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||||
|
return fmt.Errorf("bad StructValue: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mv := m.Mutable(fds.ByNumber(1)).Map()
|
||||||
|
for key, raw := range jsonObject {
|
||||||
|
kv := protoreflect.ValueOf(key).MapKey()
|
||||||
|
vv := mv.NewValue()
|
||||||
|
if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
|
||||||
|
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
|
||||||
|
}
|
||||||
|
mv.Set(kv, vv)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var jsonObject map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle known fields.
|
||||||
|
for i := 0; i < fds.Len(); i++ {
|
||||||
|
fd := fds.Get(i)
|
||||||
|
if fd.IsWeak() && fd.Message().IsPlaceholder() {
|
||||||
|
continue // weak reference is not linked in
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search for any raw JSON value associated with this field.
|
||||||
|
var raw json.RawMessage
|
||||||
|
name := string(fd.Name())
|
||||||
|
if fd.Kind() == protoreflect.GroupKind {
|
||||||
|
name = string(fd.Message().Name())
|
||||||
|
}
|
||||||
|
if v, ok := jsonObject[name]; ok {
|
||||||
|
delete(jsonObject, name)
|
||||||
|
raw = v
|
||||||
|
}
|
||||||
|
name = string(fd.JSONName())
|
||||||
|
if v, ok := jsonObject[name]; ok {
|
||||||
|
delete(jsonObject, name)
|
||||||
|
raw = v
|
||||||
|
}
|
||||||
|
|
||||||
|
field := m.NewField(fd)
|
||||||
|
// Unmarshal the field value.
|
||||||
|
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, err := u.unmarshalValue(field, raw, fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle extension fields.
|
||||||
|
for name, raw := range jsonObject {
|
||||||
|
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve the extension field by name.
|
||||||
|
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
|
||||||
|
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||||
|
if xt == nil && isMessageSet(md) {
|
||||||
|
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||||
|
}
|
||||||
|
if xt == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(jsonObject, name)
|
||||||
|
fd := xt.TypeDescriptor()
|
||||||
|
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||||
|
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
field := m.NewField(fd)
|
||||||
|
// Unmarshal the field value.
|
||||||
|
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, err := u.unmarshalValue(field, raw, fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !u.AllowUnknownFields && len(jsonObject) > 0 {
|
||||||
|
for name := range jsonObject {
|
||||||
|
return fmt.Errorf("unknown field %q in %v", name, md.FullName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
|
||||||
|
if md := fd.Message(); md != nil {
|
||||||
|
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
|
||||||
|
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
|
||||||
|
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
var jsonArray []json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
lv := v.List()
|
||||||
|
for _, raw := range jsonArray {
|
||||||
|
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
lv.Append(ve)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
case fd.IsMap():
|
||||||
|
var jsonObject map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
kfd := fd.MapKey()
|
||||||
|
vfd := fd.MapValue()
|
||||||
|
mv := v.Map()
|
||||||
|
for key, raw := range jsonObject {
|
||||||
|
var kv protoreflect.MapKey
|
||||||
|
if kfd.Kind() == protoreflect.StringKind {
|
||||||
|
kv = protoreflect.ValueOf(key).MapKey()
|
||||||
|
} else {
|
||||||
|
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
kv = v.MapKey()
|
||||||
|
}
|
||||||
|
|
||||||
|
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
mv.Set(kv, vv)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
default:
|
||||||
|
return u.unmarshalSingularValue(v, in, fd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var nonFinite = map[string]float64{
|
||||||
|
`"NaN"`: math.NaN(),
|
||||||
|
`"Infinity"`: math.Inf(+1),
|
||||||
|
`"-Infinity"`: math.Inf(-1),
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
|
switch fd.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
return unmarshalValue(in, new(bool))
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
|
return unmarshalValue(trimQuote(in), new(int32))
|
||||||
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
return unmarshalValue(trimQuote(in), new(int64))
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
|
return unmarshalValue(trimQuote(in), new(uint32))
|
||||||
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
return unmarshalValue(trimQuote(in), new(uint64))
|
||||||
|
case protoreflect.FloatKind:
|
||||||
|
if f, ok := nonFinite[string(in)]; ok {
|
||||||
|
return protoreflect.ValueOfFloat32(float32(f)), nil
|
||||||
|
}
|
||||||
|
return unmarshalValue(trimQuote(in), new(float32))
|
||||||
|
case protoreflect.DoubleKind:
|
||||||
|
if f, ok := nonFinite[string(in)]; ok {
|
||||||
|
return protoreflect.ValueOfFloat64(float64(f)), nil
|
||||||
|
}
|
||||||
|
return unmarshalValue(trimQuote(in), new(float64))
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
return unmarshalValue(in, new(string))
|
||||||
|
case protoreflect.BytesKind:
|
||||||
|
return unmarshalValue(in, new([]byte))
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
if hasPrefixAndSuffix('"', in, '"') {
|
||||||
|
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
|
||||||
|
if vd == nil {
|
||||||
|
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||||
|
}
|
||||||
|
return unmarshalValue(in, new(protoreflect.EnumNumber))
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
err := u.unmarshalMessage(v.Message(), in)
|
||||||
|
return v, err
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
|
||||||
|
err := json.Unmarshal(in, v)
|
||||||
|
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func unquoteString(in string) (out string, err error) {
|
||||||
|
err = json.Unmarshal([]byte(in), &out)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
|
||||||
|
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// trimQuote is like unquoteString but simply strips surrounding quotes.
|
||||||
|
// This is incorrect, but is behavior done by the legacy implementation.
|
||||||
|
func trimQuote(in []byte) []byte {
|
||||||
|
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
|
||||||
|
in = in[1 : len(in)-1]
|
||||||
|
}
|
||||||
|
return in
|
||||||
|
}
|
|
@ -0,0 +1,559 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wrapJSONMarshalV2 = false
|
||||||
|
|
||||||
|
// Marshaler is a configurable object for marshaling protocol buffer messages
|
||||||
|
// to the specified JSON representation.
|
||||||
|
type Marshaler struct {
|
||||||
|
// OrigName specifies whether to use the original protobuf name for fields.
|
||||||
|
OrigName bool
|
||||||
|
|
||||||
|
// EnumsAsInts specifies whether to render enum values as integers,
|
||||||
|
// as opposed to string values.
|
||||||
|
EnumsAsInts bool
|
||||||
|
|
||||||
|
// EmitDefaults specifies whether to render fields with zero values.
|
||||||
|
EmitDefaults bool
|
||||||
|
|
||||||
|
// Indent controls whether the output is compact or not.
|
||||||
|
// If empty, the output is compact JSON. Otherwise, every JSON object
|
||||||
|
// entry and JSON array value will be on its own line.
|
||||||
|
// Each line will be preceded by repeated copies of Indent, where the
|
||||||
|
// number of copies is the current indentation depth.
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||||
|
// If unset, the global registry is used by default.
|
||||||
|
AnyResolver AnyResolver
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONPBMarshaler is implemented by protobuf messages that customize the
|
||||||
|
// way they are marshaled to JSON. Messages that implement this should also
|
||||||
|
// implement JSONPBUnmarshaler so that the custom format can be parsed.
|
||||||
|
//
|
||||||
|
// The JSON marshaling must follow the proto to JSON specification:
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||||
|
//
|
||||||
|
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||||
|
type JSONPBMarshaler interface {
|
||||||
|
MarshalJSONPB(*Marshaler) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal serializes a protobuf message as JSON into w.
|
||||||
|
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
|
||||||
|
b, err := jm.marshal(m)
|
||||||
|
if len(b) > 0 {
|
||||||
|
if _, err := w.Write(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalToString serializes a protobuf message as JSON in string form.
|
||||||
|
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
|
||||||
|
b, err := jm.marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
|
||||||
|
v := reflect.ValueOf(m)
|
||||||
|
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
||||||
|
return nil, errors.New("Marshal called with nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for custom marshalers first since they may not properly
|
||||||
|
// implement protobuf reflection that the logic below relies on.
|
||||||
|
if jsm, ok := m.(JSONPBMarshaler); ok {
|
||||||
|
return jsm.MarshalJSONPB(jm)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wrapJSONMarshalV2 {
|
||||||
|
opts := protojson.MarshalOptions{
|
||||||
|
UseProtoNames: jm.OrigName,
|
||||||
|
UseEnumNumbers: jm.EnumsAsInts,
|
||||||
|
EmitUnpopulated: jm.EmitDefaults,
|
||||||
|
Indent: jm.Indent,
|
||||||
|
}
|
||||||
|
if jm.AnyResolver != nil {
|
||||||
|
opts.Resolver = anyResolver{jm.AnyResolver}
|
||||||
|
}
|
||||||
|
return opts.Marshal(proto.MessageReflect(m).Interface())
|
||||||
|
} else {
|
||||||
|
// Check for unpopulated required fields first.
|
||||||
|
m2 := proto.MessageReflect(m)
|
||||||
|
if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
w := jsonWriter{Marshaler: jm}
|
||||||
|
err := w.marshalMessage(m2, "", "")
|
||||||
|
return w.buf, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonWriter struct {
|
||||||
|
*Marshaler
|
||||||
|
buf []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) write(s string) {
|
||||||
|
w.buf = append(w.buf, s...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
|
||||||
|
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
|
||||||
|
b, err := jsm.MarshalJSONPB(w.Marshaler)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if typeURL != "" {
|
||||||
|
// we are marshaling this object to an Any type
|
||||||
|
var js map[string]*json.RawMessage
|
||||||
|
if err = json.Unmarshal(b, &js); err != nil {
|
||||||
|
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
|
||||||
|
}
|
||||||
|
turl, err := json.Marshal(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
|
||||||
|
}
|
||||||
|
js["@type"] = (*json.RawMessage)(&turl)
|
||||||
|
if b, err = json.Marshal(js); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.write(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
md := m.Descriptor()
|
||||||
|
fds := md.Fields()
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
const secondInNanos = int64(time.Second / time.Nanosecond)
|
||||||
|
switch wellKnownType(md.FullName()) {
|
||||||
|
case "Any":
|
||||||
|
return w.marshalAny(m, indent)
|
||||||
|
case "BoolValue", "BytesValue", "StringValue",
|
||||||
|
"Int32Value", "UInt32Value", "FloatValue",
|
||||||
|
"Int64Value", "UInt64Value", "DoubleValue":
|
||||||
|
fd := fds.ByNumber(1)
|
||||||
|
return w.marshalValue(fd, m.Get(fd), indent)
|
||||||
|
case "Duration":
|
||||||
|
const maxSecondsInDuration = 315576000000
|
||||||
|
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||||
|
// depending on required precision."
|
||||||
|
s := m.Get(fds.ByNumber(1)).Int()
|
||||||
|
ns := m.Get(fds.ByNumber(2)).Int()
|
||||||
|
if s < -maxSecondsInDuration || s > maxSecondsInDuration {
|
||||||
|
return fmt.Errorf("seconds out of range %v", s)
|
||||||
|
}
|
||||||
|
if ns <= -secondInNanos || ns >= secondInNanos {
|
||||||
|
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
|
||||||
|
}
|
||||||
|
if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
|
||||||
|
return errors.New("signs of seconds and nanos do not match")
|
||||||
|
}
|
||||||
|
var sign string
|
||||||
|
if s < 0 || ns < 0 {
|
||||||
|
sign, s, ns = "-", -1*s, -1*ns
|
||||||
|
}
|
||||||
|
x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, ".000")
|
||||||
|
w.write(fmt.Sprintf(`"%vs"`, x))
|
||||||
|
return nil
|
||||||
|
case "Timestamp":
|
||||||
|
// "RFC 3339, where generated output will always be Z-normalized
|
||||||
|
// and uses 0, 3, 6 or 9 fractional digits."
|
||||||
|
s := m.Get(fds.ByNumber(1)).Int()
|
||||||
|
ns := m.Get(fds.ByNumber(2)).Int()
|
||||||
|
if ns < 0 || ns >= secondInNanos {
|
||||||
|
return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
|
||||||
|
}
|
||||||
|
t := time.Unix(s, ns).UTC()
|
||||||
|
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
|
||||||
|
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, ".000")
|
||||||
|
w.write(fmt.Sprintf(`"%vZ"`, x))
|
||||||
|
return nil
|
||||||
|
case "Value":
|
||||||
|
// JSON value; which is a null, number, string, bool, object, or array.
|
||||||
|
od := md.Oneofs().Get(0)
|
||||||
|
fd := m.WhichOneof(od)
|
||||||
|
if fd == nil {
|
||||||
|
return errors.New("nil Value")
|
||||||
|
}
|
||||||
|
return w.marshalValue(fd, m.Get(fd), indent)
|
||||||
|
case "Struct", "ListValue":
|
||||||
|
// JSON object or array.
|
||||||
|
fd := fds.ByNumber(1)
|
||||||
|
return w.marshalValue(fd, m.Get(fd), indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.write("{")
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
firstField := true
|
||||||
|
if typeURL != "" {
|
||||||
|
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < fds.Len(); {
|
||||||
|
fd := fds.Get(i)
|
||||||
|
if od := fd.ContainingOneof(); od != nil {
|
||||||
|
fd = m.WhichOneof(od)
|
||||||
|
i += od.Fields().Len()
|
||||||
|
if fd == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
v := m.Get(fd)
|
||||||
|
|
||||||
|
if !m.Has(fd) {
|
||||||
|
if !w.EmitDefaults || fd.ContainingOneof() != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
|
||||||
|
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !firstField {
|
||||||
|
w.writeComma()
|
||||||
|
}
|
||||||
|
if err := w.marshalField(fd, v, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle proto2 extensions.
|
||||||
|
if md.ExtensionRanges().Len() > 0 {
|
||||||
|
// Collect a sorted list of all extension descriptor and values.
|
||||||
|
type ext struct {
|
||||||
|
desc protoreflect.FieldDescriptor
|
||||||
|
val protoreflect.Value
|
||||||
|
}
|
||||||
|
var exts []ext
|
||||||
|
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
if fd.IsExtension() {
|
||||||
|
exts = append(exts, ext{fd, v})
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
sort.Slice(exts, func(i, j int) bool {
|
||||||
|
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, ext := range exts {
|
||||||
|
if !firstField {
|
||||||
|
w.writeComma()
|
||||||
|
}
|
||||||
|
if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
}
|
||||||
|
w.write("}")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) writeComma() {
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(",\n")
|
||||||
|
} else {
|
||||||
|
w.write(",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
|
||||||
|
// "If the Any contains a value that has a special JSON mapping,
|
||||||
|
// it will be converted as follows: {"@type": xxx, "value": yyy}.
|
||||||
|
// Otherwise, the value will be converted into a JSON object,
|
||||||
|
// and the "@type" field will be inserted to indicate the actual data type."
|
||||||
|
md := m.Descriptor()
|
||||||
|
typeURL := m.Get(md.Fields().ByNumber(1)).String()
|
||||||
|
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
|
||||||
|
|
||||||
|
var m2 protoreflect.Message
|
||||||
|
if w.AnyResolver != nil {
|
||||||
|
mi, err := w.AnyResolver.Resolve(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m2 = proto.MessageReflect(mi)
|
||||||
|
} else {
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m2 = mt.New()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if wellKnownType(m2.Descriptor().FullName()) == "" {
|
||||||
|
return w.marshalMessage(m2, indent, typeURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.write("{")
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
}
|
||||||
|
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.writeComma()
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
w.write(`"value": `)
|
||||||
|
} else {
|
||||||
|
w.write(`"value":`)
|
||||||
|
}
|
||||||
|
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
}
|
||||||
|
w.write("}")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
w.write(`"@type":`)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(" ")
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.write(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalField writes field description and value to the Writer.
|
||||||
|
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
w.write(`"`)
|
||||||
|
switch {
|
||||||
|
case fd.IsExtension():
|
||||||
|
// For message set, use the fname of the message as the extension name.
|
||||||
|
name := string(fd.FullName())
|
||||||
|
if isMessageSet(fd.ContainingMessage()) {
|
||||||
|
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||||
|
}
|
||||||
|
|
||||||
|
w.write("[" + name + "]")
|
||||||
|
case w.OrigName:
|
||||||
|
name := string(fd.Name())
|
||||||
|
if fd.Kind() == protoreflect.GroupKind {
|
||||||
|
name = string(fd.Message().Name())
|
||||||
|
}
|
||||||
|
w.write(name)
|
||||||
|
default:
|
||||||
|
w.write(string(fd.JSONName()))
|
||||||
|
}
|
||||||
|
w.write(`":`)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(" ")
|
||||||
|
}
|
||||||
|
return w.marshalValue(fd, v, indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
w.write("[")
|
||||||
|
comma := ""
|
||||||
|
lv := v.List()
|
||||||
|
for i := 0; i < lv.Len(); i++ {
|
||||||
|
w.write(comma)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
comma = ","
|
||||||
|
}
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
w.write("]")
|
||||||
|
return nil
|
||||||
|
case fd.IsMap():
|
||||||
|
kfd := fd.MapKey()
|
||||||
|
vfd := fd.MapValue()
|
||||||
|
mv := v.Map()
|
||||||
|
|
||||||
|
// Collect a sorted list of all map keys and values.
|
||||||
|
type entry struct{ key, val protoreflect.Value }
|
||||||
|
var entries []entry
|
||||||
|
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||||
|
entries = append(entries, entry{k.Value(), v})
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
sort.Slice(entries, func(i, j int) bool {
|
||||||
|
switch kfd.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
return entries[i].key.Int() < entries[j].key.Int()
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
return entries[i].key.String() < entries[j].key.String()
|
||||||
|
default:
|
||||||
|
panic("invalid kind")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
w.write(`{`)
|
||||||
|
comma := ""
|
||||||
|
for _, entry := range entries {
|
||||||
|
w.write(comma)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := fmt.Sprint(entry.key.Interface())
|
||||||
|
b, err := json.Marshal(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.write(string(b))
|
||||||
|
|
||||||
|
w.write(`:`)
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write(` `)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
comma = ","
|
||||||
|
}
|
||||||
|
if w.Indent != "" {
|
||||||
|
w.write("\n")
|
||||||
|
w.write(indent)
|
||||||
|
w.write(w.Indent)
|
||||||
|
}
|
||||||
|
w.write(`}`)
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return w.marshalSingularValue(fd, v, indent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||||
|
switch {
|
||||||
|
case !v.IsValid():
|
||||||
|
w.write("null")
|
||||||
|
return nil
|
||||||
|
case fd.Message() != nil:
|
||||||
|
return w.marshalMessage(v.Message(), indent+w.Indent, "")
|
||||||
|
case fd.Enum() != nil:
|
||||||
|
if fd.Enum().FullName() == "google.protobuf.NullValue" {
|
||||||
|
w.write("null")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
vd := fd.Enum().Values().ByNumber(v.Enum())
|
||||||
|
if vd == nil || w.EnumsAsInts {
|
||||||
|
w.write(strconv.Itoa(int(v.Enum())))
|
||||||
|
} else {
|
||||||
|
w.write(`"` + string(vd.Name()) + `"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
switch v.Interface().(type) {
|
||||||
|
case float32, float64:
|
||||||
|
switch {
|
||||||
|
case math.IsInf(v.Float(), +1):
|
||||||
|
w.write(`"Infinity"`)
|
||||||
|
return nil
|
||||||
|
case math.IsInf(v.Float(), -1):
|
||||||
|
w.write(`"-Infinity"`)
|
||||||
|
return nil
|
||||||
|
case math.IsNaN(v.Float()):
|
||||||
|
w.write(`"NaN"`)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case int64, uint64:
|
||||||
|
w.write(fmt.Sprintf(`"%d"`, v.Interface()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(v.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.write(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package jsonpb provides functionality to marshal and unmarshal between a
|
||||||
|
// protocol buffer message and JSON. It follows the specification at
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||||||
|
//
|
||||||
|
// Do not rely on the default behavior of the standard encoding/json package
|
||||||
|
// when called on generated message types as it does not operate correctly.
|
||||||
|
//
|
||||||
|
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
|
||||||
|
// package instead.
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnyResolver takes a type URL, present in an Any message,
|
||||||
|
// and resolves it into an instance of the associated message.
|
||||||
|
type AnyResolver interface {
|
||||||
|
Resolve(typeURL string) (proto.Message, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type anyResolver struct{ AnyResolver }
|
||||||
|
|
||||||
|
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
|
||||||
|
return r.FindMessageByURL(string(message))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
|
||||||
|
m, err := r.Resolve(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return protoimpl.X.MessageTypeOf(m), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||||
|
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||||
|
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||||
|
}
|
||||||
|
|
||||||
|
func wellKnownType(s protoreflect.FullName) string {
|
||||||
|
if s.Parent() == "google.protobuf" {
|
||||||
|
switch s.Name() {
|
||||||
|
case "Empty", "Any",
|
||||||
|
"BoolValue", "BytesValue", "StringValue",
|
||||||
|
"Int32Value", "UInt32Value", "FloatValue",
|
||||||
|
"Int64Value", "UInt64Value", "DoubleValue",
|
||||||
|
"Duration", "Timestamp",
|
||||||
|
"NullValue", "Struct", "Value", "ListValue":
|
||||||
|
return string(s.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||||
|
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||||
|
return ok && ms.IsMessageSet()
|
||||||
|
}
|
|
@ -1,3 +0,0 @@
|
||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
|
@ -1,3 +0,0 @@
|
||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
|
@ -8,22 +8,35 @@ package errgroup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type token struct{}
|
||||||
|
|
||||||
// A Group is a collection of goroutines working on subtasks that are part of
|
// A Group is a collection of goroutines working on subtasks that are part of
|
||||||
// the same overall task.
|
// the same overall task.
|
||||||
//
|
//
|
||||||
// A zero Group is valid and does not cancel on error.
|
// A zero Group is valid, has no limit on the number of active goroutines,
|
||||||
|
// and does not cancel on error.
|
||||||
type Group struct {
|
type Group struct {
|
||||||
cancel func()
|
cancel func()
|
||||||
|
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
sem chan token
|
||||||
|
|
||||||
errOnce sync.Once
|
errOnce sync.Once
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (g *Group) done() {
|
||||||
|
if g.sem != nil {
|
||||||
|
<-g.sem
|
||||||
|
}
|
||||||
|
g.wg.Done()
|
||||||
|
}
|
||||||
|
|
||||||
// WithContext returns a new Group and an associated Context derived from ctx.
|
// WithContext returns a new Group and an associated Context derived from ctx.
|
||||||
//
|
//
|
||||||
// The derived Context is canceled the first time a function passed to Go
|
// The derived Context is canceled the first time a function passed to Go
|
||||||
|
@ -45,14 +58,19 @@ func (g *Group) Wait() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go calls the given function in a new goroutine.
|
// Go calls the given function in a new goroutine.
|
||||||
|
// It blocks until the new goroutine can be added without the number of
|
||||||
|
// active goroutines in the group exceeding the configured limit.
|
||||||
//
|
//
|
||||||
// The first call to return a non-nil error cancels the group; its error will be
|
// The first call to return a non-nil error cancels the group; its error will be
|
||||||
// returned by Wait.
|
// returned by Wait.
|
||||||
func (g *Group) Go(f func() error) {
|
func (g *Group) Go(f func() error) {
|
||||||
g.wg.Add(1)
|
if g.sem != nil {
|
||||||
|
g.sem <- token{}
|
||||||
|
}
|
||||||
|
|
||||||
|
g.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer g.wg.Done()
|
defer g.done()
|
||||||
|
|
||||||
if err := f(); err != nil {
|
if err := f(); err != nil {
|
||||||
g.errOnce.Do(func() {
|
g.errOnce.Do(func() {
|
||||||
|
@ -64,3 +82,51 @@ func (g *Group) Go(f func() error) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TryGo calls the given function in a new goroutine only if the number of
|
||||||
|
// active goroutines in the group is currently below the configured limit.
|
||||||
|
//
|
||||||
|
// The return value reports whether the goroutine was started.
|
||||||
|
func (g *Group) TryGo(f func() error) bool {
|
||||||
|
if g.sem != nil {
|
||||||
|
select {
|
||||||
|
case g.sem <- token{}:
|
||||||
|
// Note: this allows barging iff channels in general allow barging.
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
g.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer g.done()
|
||||||
|
|
||||||
|
if err := f(); err != nil {
|
||||||
|
g.errOnce.Do(func() {
|
||||||
|
g.err = err
|
||||||
|
if g.cancel != nil {
|
||||||
|
g.cancel()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLimit limits the number of active goroutines in this group to at most n.
|
||||||
|
// A negative value indicates no limit.
|
||||||
|
//
|
||||||
|
// Any subsequent call to the Go method will block until it can add an active
|
||||||
|
// goroutine without exceeding the configured limit.
|
||||||
|
//
|
||||||
|
// The limit must not be modified while any goroutines in the group are active.
|
||||||
|
func (g *Group) SetLimit(n int) {
|
||||||
|
if n < 0 {
|
||||||
|
g.sem = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(g.sem) != 0 {
|
||||||
|
panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
|
||||||
|
}
|
||||||
|
g.sem = make(chan token, n)
|
||||||
|
}
|
||||||
|
|
|
@ -5,7 +5,8 @@
|
||||||
// Package xerrors implements functions to manipulate errors.
|
// Package xerrors implements functions to manipulate errors.
|
||||||
//
|
//
|
||||||
// This package is based on the Go 2 proposal for error values:
|
// This package is based on the Go 2 proposal for error values:
|
||||||
// https://golang.org/design/29934-error-values
|
//
|
||||||
|
// https://golang.org/design/29934-error-values
|
||||||
//
|
//
|
||||||
// These functions were incorporated into the standard library's errors package
|
// These functions were incorporated into the standard library's errors package
|
||||||
// in Go 1.13:
|
// in Go 1.13:
|
||||||
|
|
|
@ -33,6 +33,9 @@ const percentBangString = "%!"
|
||||||
// It is invalid to include more than one %w verb or to supply it with an
|
// It is invalid to include more than one %w verb or to supply it with an
|
||||||
// operand that does not implement the error interface. The %w verb is otherwise
|
// operand that does not implement the error interface. The %w verb is otherwise
|
||||||
// a synonym for %v.
|
// a synonym for %v.
|
||||||
|
//
|
||||||
|
// Note that as of Go 1.13, the fmt.Errorf function will do error formatting,
|
||||||
|
// but it will not capture a stack backtrace.
|
||||||
func Errorf(format string, a ...interface{}) error {
|
func Errorf(format string, a ...interface{}) error {
|
||||||
format = formatPlusW(format)
|
format = formatPlusW(format)
|
||||||
// Support a ": %[wsv]" suffix, which works well with xerrors.Formatter.
|
// Support a ": %[wsv]" suffix, which works well with xerrors.Formatter.
|
||||||
|
|
|
@ -35,6 +35,8 @@ func (e noWrapper) FormatError(p Printer) (next error) {
|
||||||
|
|
||||||
// Unwrap returns the result of calling the Unwrap method on err, if err implements
|
// Unwrap returns the result of calling the Unwrap method on err, if err implements
|
||||||
// Unwrap. Otherwise, Unwrap returns nil.
|
// Unwrap. Otherwise, Unwrap returns nil.
|
||||||
|
//
|
||||||
|
// Deprecated: As of Go 1.13, use errors.Unwrap instead.
|
||||||
func Unwrap(err error) error {
|
func Unwrap(err error) error {
|
||||||
u, ok := err.(Wrapper)
|
u, ok := err.(Wrapper)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -47,6 +49,8 @@ func Unwrap(err error) error {
|
||||||
//
|
//
|
||||||
// An error is considered to match a target if it is equal to that target or if
|
// An error is considered to match a target if it is equal to that target or if
|
||||||
// it implements a method Is(error) bool such that Is(target) returns true.
|
// it implements a method Is(error) bool such that Is(target) returns true.
|
||||||
|
//
|
||||||
|
// Deprecated: As of Go 1.13, use errors.Is instead.
|
||||||
func Is(err, target error) bool {
|
func Is(err, target error) bool {
|
||||||
if target == nil {
|
if target == nil {
|
||||||
return err == target
|
return err == target
|
||||||
|
@ -77,6 +81,8 @@ func Is(err, target error) bool {
|
||||||
//
|
//
|
||||||
// The As method should set the target to its value and return true if err
|
// The As method should set the target to its value and return true if err
|
||||||
// matches the type to which target points.
|
// matches the type to which target points.
|
||||||
|
//
|
||||||
|
// Deprecated: As of Go 1.13, use errors.As instead.
|
||||||
func As(err error, target interface{}) bool {
|
func As(err error, target interface{}) bool {
|
||||||
if target == nil {
|
if target == nil {
|
||||||
panic("errors: target cannot be nil")
|
panic("errors: target cannot be nil")
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/channelz"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
|
@ -192,7 +193,7 @@ type BuildOptions struct {
|
||||||
// server can ignore this field.
|
// server can ignore this field.
|
||||||
Authority string
|
Authority string
|
||||||
// ChannelzParentID is the parent ClientConn's channelz ID.
|
// ChannelzParentID is the parent ClientConn's channelz ID.
|
||||||
ChannelzParentID int64
|
ChannelzParentID *channelz.Identifier
|
||||||
// CustomUserAgent is the custom user agent set on the parent ClientConn.
|
// CustomUserAgent is the custom user agent set on the parent ClientConn.
|
||||||
// The balancer should set the same custom user agent if it creates a
|
// The balancer should set the same custom user agent if it creates a
|
||||||
// ClientConn.
|
// ClientConn.
|
||||||
|
|
|
@ -20,130 +20,178 @@ package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||||
"google.golang.org/grpc/internal/buffer"
|
"google.golang.org/grpc/internal/buffer"
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
// scStateUpdate contains the subConn and the new state it changed to.
|
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||||||
|
//
|
||||||
|
// ccBalancerWrapper implements methods corresponding to the ones on the
|
||||||
|
// balancer.Balancer interface. The ClientConn is free to call these methods
|
||||||
|
// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
|
||||||
|
// to the Balancer happen synchronously and in order.
|
||||||
|
//
|
||||||
|
// ccBalancerWrapper also implements the balancer.ClientConn interface and is
|
||||||
|
// passed to the Balancer implementations. It invokes unexported methods on the
|
||||||
|
// ClientConn to handle these calls from the Balancer.
|
||||||
|
//
|
||||||
|
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
||||||
|
// switches happen in a graceful manner.
|
||||||
|
type ccBalancerWrapper struct {
|
||||||
|
cc *ClientConn
|
||||||
|
|
||||||
|
// Since these fields are accessed only from handleXxx() methods which are
|
||||||
|
// synchronized by the watcher goroutine, we do not need a mutex to protect
|
||||||
|
// these fields.
|
||||||
|
balancer *gracefulswitch.Balancer
|
||||||
|
curBalancerName string
|
||||||
|
|
||||||
|
updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
|
||||||
|
resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
|
||||||
|
closed *grpcsync.Event // Indicates if close has been called.
|
||||||
|
done *grpcsync.Event // Indicates if close has completed its work.
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
|
||||||
|
// is not created until the switchTo() method is invoked.
|
||||||
|
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||||
|
ccb := &ccBalancerWrapper{
|
||||||
|
cc: cc,
|
||||||
|
updateCh: buffer.NewUnbounded(),
|
||||||
|
resultCh: buffer.NewUnbounded(),
|
||||||
|
closed: grpcsync.NewEvent(),
|
||||||
|
done: grpcsync.NewEvent(),
|
||||||
|
}
|
||||||
|
go ccb.watcher()
|
||||||
|
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
|
||||||
|
return ccb
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following xxxUpdate structs wrap the arguments received as part of the
|
||||||
|
// corresponding update. The watcher goroutine uses the 'type' of the update to
|
||||||
|
// invoke the appropriate handler routine to handle the update.
|
||||||
|
|
||||||
|
type ccStateUpdate struct {
|
||||||
|
ccs *balancer.ClientConnState
|
||||||
|
}
|
||||||
|
|
||||||
type scStateUpdate struct {
|
type scStateUpdate struct {
|
||||||
sc balancer.SubConn
|
sc balancer.SubConn
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// exitIdle contains no data and is just a signal sent on the updateCh in
|
type exitIdleUpdate struct{}
|
||||||
// ccBalancerWrapper to instruct the balancer to exit idle.
|
|
||||||
type exitIdle struct{}
|
|
||||||
|
|
||||||
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
type resolverErrorUpdate struct {
|
||||||
// It implements balancer.ClientConn interface.
|
err error
|
||||||
type ccBalancerWrapper struct {
|
|
||||||
cc *ClientConn
|
|
||||||
balancerMu sync.Mutex // synchronizes calls to the balancer
|
|
||||||
balancer balancer.Balancer
|
|
||||||
hasExitIdle bool
|
|
||||||
updateCh *buffer.Unbounded
|
|
||||||
closed *grpcsync.Event
|
|
||||||
done *grpcsync.Event
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
subConns map[*acBalancerWrapper]struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
type switchToUpdate struct {
|
||||||
ccb := &ccBalancerWrapper{
|
name string
|
||||||
cc: cc,
|
|
||||||
updateCh: buffer.NewUnbounded(),
|
|
||||||
closed: grpcsync.NewEvent(),
|
|
||||||
done: grpcsync.NewEvent(),
|
|
||||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
|
||||||
}
|
|
||||||
go ccb.watcher()
|
|
||||||
ccb.balancer = b.Build(ccb, bopts)
|
|
||||||
_, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler)
|
|
||||||
return ccb
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// watcher balancer functions sequentially, so the balancer can be implemented
|
type subConnUpdate struct {
|
||||||
// lock-free.
|
acbw *acBalancerWrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// watcher is a long-running goroutine which reads updates from a channel and
|
||||||
|
// invokes corresponding methods on the underlying balancer. It ensures that
|
||||||
|
// these methods are invoked in a synchronous fashion. It also ensures that
|
||||||
|
// these methods are invoked in the order in which the updates were received.
|
||||||
func (ccb *ccBalancerWrapper) watcher() {
|
func (ccb *ccBalancerWrapper) watcher() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case t := <-ccb.updateCh.Get():
|
case u := <-ccb.updateCh.Get():
|
||||||
ccb.updateCh.Load()
|
ccb.updateCh.Load()
|
||||||
if ccb.closed.HasFired() {
|
if ccb.closed.HasFired() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
switch u := t.(type) {
|
switch update := u.(type) {
|
||||||
|
case *ccStateUpdate:
|
||||||
|
ccb.handleClientConnStateChange(update.ccs)
|
||||||
case *scStateUpdate:
|
case *scStateUpdate:
|
||||||
ccb.balancerMu.Lock()
|
ccb.handleSubConnStateChange(update)
|
||||||
ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err})
|
case *exitIdleUpdate:
|
||||||
ccb.balancerMu.Unlock()
|
ccb.handleExitIdle()
|
||||||
case *acBalancerWrapper:
|
case *resolverErrorUpdate:
|
||||||
ccb.mu.Lock()
|
ccb.handleResolverError(update.err)
|
||||||
if ccb.subConns != nil {
|
case *switchToUpdate:
|
||||||
delete(ccb.subConns, u)
|
ccb.handleSwitchTo(update.name)
|
||||||
ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain)
|
case *subConnUpdate:
|
||||||
}
|
ccb.handleRemoveSubConn(update.acbw)
|
||||||
ccb.mu.Unlock()
|
|
||||||
case exitIdle:
|
|
||||||
if ccb.cc.GetState() == connectivity.Idle {
|
|
||||||
if ei, ok := ccb.balancer.(balancer.ExitIdler); ok {
|
|
||||||
// We already checked that the balancer implements
|
|
||||||
// ExitIdle before pushing the event to updateCh, but
|
|
||||||
// check conditionally again as defensive programming.
|
|
||||||
ccb.balancerMu.Lock()
|
|
||||||
ei.ExitIdle()
|
|
||||||
ccb.balancerMu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t)
|
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
|
||||||
}
|
}
|
||||||
case <-ccb.closed.Done():
|
case <-ccb.closed.Done():
|
||||||
}
|
}
|
||||||
|
|
||||||
if ccb.closed.HasFired() {
|
if ccb.closed.HasFired() {
|
||||||
ccb.balancerMu.Lock()
|
ccb.handleClose()
|
||||||
ccb.balancer.Close()
|
|
||||||
ccb.balancerMu.Unlock()
|
|
||||||
ccb.mu.Lock()
|
|
||||||
scs := ccb.subConns
|
|
||||||
ccb.subConns = nil
|
|
||||||
ccb.mu.Unlock()
|
|
||||||
ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
|
|
||||||
ccb.done.Fire()
|
|
||||||
// Fire done before removing the addr conns. We can safely unblock
|
|
||||||
// ccb.close and allow the removeAddrConns to happen
|
|
||||||
// asynchronously.
|
|
||||||
for acbw := range scs {
|
|
||||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) close() {
|
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
||||||
ccb.closed.Fire()
|
// the underlying balancer.
|
||||||
<-ccb.done.Done()
|
//
|
||||||
}
|
// Unlike other methods invoked by grpc to push updates to the underlying
|
||||||
|
// balancer, this method cannot simply push the update onto the update channel
|
||||||
|
// and return. It needs to return the error returned by the underlying balancer
|
||||||
|
// back to grpc which propagates that to the resolver.
|
||||||
|
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||||
|
ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) exitIdle() bool {
|
var res interface{}
|
||||||
if !ccb.hasExitIdle {
|
select {
|
||||||
return false
|
case res = <-ccb.resultCh.Get():
|
||||||
|
ccb.resultCh.Load()
|
||||||
|
case <-ccb.closed.Done():
|
||||||
|
// Return early if the balancer wrapper is closed while we are waiting for
|
||||||
|
// the underlying balancer to process a ClientConnState update.
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
ccb.updateCh.Put(exitIdle{})
|
// If the returned error is nil, attempting to type assert to error leads to
|
||||||
return true
|
// panic. So, this needs to handled separately.
|
||||||
|
if res == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return res.(error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
// handleClientConnStateChange handles a ClientConnState update from the update
|
||||||
|
// channel and invokes the appropriate method on the underlying balancer.
|
||||||
|
//
|
||||||
|
// If the addresses specified in the update contain addresses of type "grpclb"
|
||||||
|
// and the selected LB policy is not "grpclb", these addresses will be filtered
|
||||||
|
// out and ccs will be modified with the updated address list.
|
||||||
|
func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
|
||||||
|
if ccb.curBalancerName != grpclbName {
|
||||||
|
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
||||||
|
var addrs []resolver.Address
|
||||||
|
for _, addr := range ccs.ResolverState.Addresses {
|
||||||
|
if addr.Type == resolver.GRPCLB {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addrs = append(addrs, addr)
|
||||||
|
}
|
||||||
|
ccs.ResolverState.Addresses = addrs
|
||||||
|
}
|
||||||
|
ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateSubConnState is invoked by grpc to push a subConn state update to the
|
||||||
|
// underlying balancer.
|
||||||
|
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
|
||||||
// When updating addresses for a SubConn, if the address in use is not in
|
// When updating addresses for a SubConn, if the address in use is not in
|
||||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||||
// created. tearDown() generates a state change with Shutdown state, we
|
// created. tearDown() generates a state change with Shutdown state, we
|
||||||
|
@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
// handleSubConnStateChange handles a SubConnState update from the update
|
||||||
ccb.balancerMu.Lock()
|
// channel and invokes the appropriate method on the underlying balancer.
|
||||||
defer ccb.balancerMu.Unlock()
|
func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
|
||||||
return ccb.balancer.UpdateClientConnState(*ccs)
|
ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) exitIdle() {
|
||||||
|
ccb.updateCh.Put(&exitIdleUpdate{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) handleExitIdle() {
|
||||||
|
if ccb.cc.GetState() != connectivity.Idle {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ccb.balancer.ExitIdle()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||||
ccb.balancerMu.Lock()
|
ccb.updateCh.Put(&resolverErrorUpdate{err: err})
|
||||||
defer ccb.balancerMu.Unlock()
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) handleResolverError(err error) {
|
||||||
ccb.balancer.ResolverError(err)
|
ccb.balancer.ResolverError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
|
||||||
|
// LB policy identified by name.
|
||||||
|
//
|
||||||
|
// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
|
||||||
|
// first good update from the name resolver, it determines the LB policy to use
|
||||||
|
// and invokes the switchTo() method. Upon receipt of every subsequent update
|
||||||
|
// from the name resolver, it invokes this method.
|
||||||
|
//
|
||||||
|
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
|
||||||
|
// the graceful balancer switching process if the name does not change.
|
||||||
|
func (ccb *ccBalancerWrapper) switchTo(name string) {
|
||||||
|
ccb.updateCh.Put(&switchToUpdate{name: name})
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSwitchTo handles a balancer switch update from the update channel. It
|
||||||
|
// calls the SwitchTo() method on the gracefulswitch.Balancer with a
|
||||||
|
// balancer.Builder corresponding to name. If no balancer.Builder is registered
|
||||||
|
// for the given name, it uses the default LB policy which is "pick_first".
|
||||||
|
func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
|
||||||
|
// TODO: Other languages use case-insensitive balancer registries. We should
|
||||||
|
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
|
||||||
|
if strings.EqualFold(ccb.curBalancerName, name) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Ensure that name is a registered LB policy when we get here.
|
||||||
|
// We currently only validate the `loadBalancingConfig` field. We need to do
|
||||||
|
// the same for the `loadBalancingPolicy` field and reject the service config
|
||||||
|
// if the specified policy is not registered.
|
||||||
|
builder := balancer.Get(name)
|
||||||
|
if builder == nil {
|
||||||
|
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
|
||||||
|
builder = newPickfirstBuilder()
|
||||||
|
} else {
|
||||||
|
channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ccb.balancer.SwitchTo(builder); err != nil {
|
||||||
|
channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ccb.curBalancerName = builder.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleRemoveSucConn handles a request from the underlying balancer to remove
|
||||||
|
// a subConn.
|
||||||
|
//
|
||||||
|
// See comments in RemoveSubConn() for more details.
|
||||||
|
func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
|
||||||
|
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) close() {
|
||||||
|
ccb.closed.Fire()
|
||||||
|
<-ccb.done.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) handleClose() {
|
||||||
|
ccb.balancer.Close()
|
||||||
|
ccb.done.Fire()
|
||||||
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
if len(addrs) <= 0 {
|
if len(addrs) <= 0 {
|
||||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||||
}
|
}
|
||||||
ccb.mu.Lock()
|
|
||||||
defer ccb.mu.Unlock()
|
|
||||||
if ccb.subConns == nil {
|
|
||||||
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
|
|
||||||
}
|
|
||||||
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
acbw := &acBalancerWrapper{ac: ac}
|
acbw := &acBalancerWrapper{ac: ac}
|
||||||
acbw.ac.mu.Lock()
|
acbw.ac.mu.Lock()
|
||||||
ac.acbw = acbw
|
ac.acbw = acbw
|
||||||
acbw.ac.mu.Unlock()
|
acbw.ac.mu.Unlock()
|
||||||
ccb.subConns[acbw] = struct{}{}
|
|
||||||
return acbw, nil
|
return acbw, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||||
// The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock
|
// Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
|
||||||
// during switchBalancer() if the old balancer calls RemoveSubConn() in its
|
// was required to handle the RemoveSubConn() method asynchronously by pushing
|
||||||
// Close().
|
// the update onto the update channel. This was done to avoid a deadlock as
|
||||||
ccb.updateCh.Put(sc)
|
// switchBalancer() was holding cc.mu when calling Close() on the old
|
||||||
|
// balancer, which would in turn call RemoveSubConn().
|
||||||
|
//
|
||||||
|
// With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
|
||||||
|
// asynchronously is probably not required anymore since the switchTo() method
|
||||||
|
// handles the balancer switch by pushing the update onto the channel.
|
||||||
|
// TODO(easwars): Handle this inline.
|
||||||
|
acbw, ok := sc.(*acBalancerWrapper)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||||
|
@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||||
ccb.mu.Lock()
|
|
||||||
defer ccb.mu.Unlock()
|
|
||||||
if ccb.subConns == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Update picker before updating state. Even though the ordering here does
|
// Update picker before updating state. Even though the ordering here does
|
||||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||||
// case where we wait for ready and then perform an RPC. If the picker is
|
// case where we wait for ready and then perform an RPC. If the picker is
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2020 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package channelz exports internals of the channelz implementation as required
|
||||||
|
// by other gRPC packages.
|
||||||
|
//
|
||||||
|
// The implementation of the channelz spec as defined in
|
||||||
|
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
|
||||||
|
// the `internal/channelz` package.
|
||||||
|
//
|
||||||
|
// Experimental
|
||||||
|
//
|
||||||
|
// Notice: All APIs in this package are experimental and may be removed in a
|
||||||
|
// later release.
|
||||||
|
package channelz
|
||||||
|
|
||||||
|
import "google.golang.org/grpc/internal/channelz"
|
||||||
|
|
||||||
|
// Identifier is an opaque identifier which uniquely identifies an entity in the
|
||||||
|
// channelz database.
|
||||||
|
type Identifier = channelz.Identifier
|
|
@ -159,23 +159,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if channelz.IsOn() {
|
pid := cc.dopts.channelzParentID
|
||||||
if cc.dopts.channelzParentID != 0 {
|
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target)
|
||||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
|
ted := &channelz.TraceEventDesc{
|
||||||
channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{
|
Desc: "Channel created",
|
||||||
Desc: "Channel Created",
|
Severity: channelz.CtInfo,
|
||||||
Severity: channelz.CtInfo,
|
|
||||||
Parent: &channelz.TraceEventDesc{
|
|
||||||
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
|
|
||||||
Severity: channelz.CtInfo,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
|
|
||||||
channelz.Info(logger, cc.channelzID, "Channel Created")
|
|
||||||
}
|
|
||||||
cc.csMgr.channelzID = cc.channelzID
|
|
||||||
}
|
}
|
||||||
|
if cc.dopts.channelzParentID != nil {
|
||||||
|
ted.Parent = &channelz.TraceEventDesc{
|
||||||
|
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()),
|
||||||
|
Severity: channelz.CtInfo,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
channelz.AddTraceEvent(logger, cc.channelzID, 1, ted)
|
||||||
|
cc.csMgr.channelzID = cc.channelzID
|
||||||
|
|
||||||
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
||||||
return nil, errNoTransportSecurity
|
return nil, errNoTransportSecurity
|
||||||
|
@ -281,7 +278,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
||||||
credsClone = creds.Clone()
|
credsClone = creds.Clone()
|
||||||
}
|
}
|
||||||
cc.balancerBuildOpts = balancer.BuildOptions{
|
cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
|
||||||
DialCreds: credsClone,
|
DialCreds: credsClone,
|
||||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||||
Dialer: cc.dopts.copts.Dialer,
|
Dialer: cc.dopts.copts.Dialer,
|
||||||
|
@ -289,7 +286,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
CustomUserAgent: cc.dopts.copts.UserAgent,
|
CustomUserAgent: cc.dopts.copts.UserAgent,
|
||||||
ChannelzParentID: cc.channelzID,
|
ChannelzParentID: cc.channelzID,
|
||||||
Target: cc.parsedTarget,
|
Target: cc.parsedTarget,
|
||||||
}
|
})
|
||||||
|
|
||||||
// Build the resolver.
|
// Build the resolver.
|
||||||
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
|
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
|
||||||
|
@ -398,7 +395,7 @@ type connectivityStateManager struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
notifyChan chan struct{}
|
notifyChan chan struct{}
|
||||||
channelzID int64
|
channelzID *channelz.Identifier
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateState updates the connectivity.State of ClientConn.
|
// updateState updates the connectivity.State of ClientConn.
|
||||||
|
@ -464,34 +461,36 @@ var _ ClientConnInterface = (*ClientConn)(nil)
|
||||||
// handshakes. It also handles errors on established connections by
|
// handshakes. It also handles errors on established connections by
|
||||||
// re-resolving the name and reconnecting.
|
// re-resolving the name and reconnecting.
|
||||||
type ClientConn struct {
|
type ClientConn struct {
|
||||||
ctx context.Context
|
ctx context.Context // Initialized using the background context at dial time.
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc // Cancelled on close.
|
||||||
|
|
||||||
target string
|
// The following are initialized at dial time, and are read-only after that.
|
||||||
parsedTarget resolver.Target
|
target string // User's dial target.
|
||||||
authority string
|
parsedTarget resolver.Target // See parseTargetAndFindResolver().
|
||||||
dopts dialOptions
|
authority string // See determineAuthority().
|
||||||
csMgr *connectivityStateManager
|
dopts dialOptions // Default and user specified dial options.
|
||||||
|
channelzID *channelz.Identifier // Channelz identifier for the channel.
|
||||||
balancerBuildOpts balancer.BuildOptions
|
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
|
||||||
blockingpicker *pickerWrapper
|
|
||||||
|
|
||||||
|
// The following provide their own synchronization, and therefore don't
|
||||||
|
// require cc.mu to be held to access them.
|
||||||
|
csMgr *connectivityStateManager
|
||||||
|
blockingpicker *pickerWrapper
|
||||||
safeConfigSelector iresolver.SafeConfigSelector
|
safeConfigSelector iresolver.SafeConfigSelector
|
||||||
|
czData *channelzData
|
||||||
|
retryThrottler atomic.Value // Updated from service config.
|
||||||
|
|
||||||
mu sync.RWMutex
|
// firstResolveEvent is used to track whether the name resolver sent us at
|
||||||
resolverWrapper *ccResolverWrapper
|
// least one update. RPCs block on this event.
|
||||||
sc *ServiceConfig
|
|
||||||
conns map[*addrConn]struct{}
|
|
||||||
// Keepalive parameter can be updated if a GoAway is received.
|
|
||||||
mkp keepalive.ClientParameters
|
|
||||||
curBalancerName string
|
|
||||||
balancerWrapper *ccBalancerWrapper
|
|
||||||
retryThrottler atomic.Value
|
|
||||||
|
|
||||||
firstResolveEvent *grpcsync.Event
|
firstResolveEvent *grpcsync.Event
|
||||||
|
|
||||||
channelzID int64 // channelz unique identification number
|
// mu protects the following fields.
|
||||||
czData *channelzData
|
// TODO: split mu so the same mutex isn't used for everything.
|
||||||
|
mu sync.RWMutex
|
||||||
|
resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close.
|
||||||
|
sc *ServiceConfig // Latest service config received from the resolver.
|
||||||
|
conns map[*addrConn]struct{} // Set to nil on close.
|
||||||
|
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
|
||||||
|
|
||||||
lceMu sync.Mutex // protects lastConnectionError
|
lceMu sync.Mutex // protects lastConnectionError
|
||||||
lastConnectionError error
|
lastConnectionError error
|
||||||
|
@ -536,14 +535,7 @@ func (cc *ClientConn) GetState() connectivity.State {
|
||||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
||||||
// release.
|
// release.
|
||||||
func (cc *ClientConn) Connect() {
|
func (cc *ClientConn) Connect() {
|
||||||
cc.mu.Lock()
|
cc.balancerWrapper.exitIdle()
|
||||||
defer cc.mu.Unlock()
|
|
||||||
if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for ac := range cc.conns {
|
|
||||||
go ac.connect()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) scWatcher() {
|
func (cc *ClientConn) scWatcher() {
|
||||||
|
@ -623,9 +615,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||||
// with the new addresses.
|
// with the new addresses.
|
||||||
cc.maybeApplyDefaultServiceConfig(nil)
|
cc.maybeApplyDefaultServiceConfig(nil)
|
||||||
|
|
||||||
if cc.balancerWrapper != nil {
|
cc.balancerWrapper.resolverError(err)
|
||||||
cc.balancerWrapper.resolverError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// No addresses are valid with err set; return early.
|
// No addresses are valid with err set; return early.
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
|
@ -653,16 +643,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||||
cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
|
cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
|
||||||
} else {
|
} else {
|
||||||
ret = balancer.ErrBadResolverState
|
ret = balancer.ErrBadResolverState
|
||||||
if cc.balancerWrapper == nil {
|
if cc.sc == nil {
|
||||||
var err error
|
// Apply the failing LB only if we haven't received valid service config
|
||||||
if s.ServiceConfig.Err != nil {
|
// from the name resolver in the past.
|
||||||
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
|
cc.applyFailingLB(s.ServiceConfig)
|
||||||
} else {
|
|
||||||
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
|
|
||||||
}
|
|
||||||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc})
|
|
||||||
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
|
|
||||||
cc.csMgr.updateState(connectivity.TransientFailure)
|
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
@ -670,24 +654,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var balCfg serviceconfig.LoadBalancingConfig
|
var balCfg serviceconfig.LoadBalancingConfig
|
||||||
if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
|
if cc.sc != nil && cc.sc.lbConfig != nil {
|
||||||
balCfg = cc.sc.lbConfig.cfg
|
balCfg = cc.sc.lbConfig.cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
cbn := cc.curBalancerName
|
|
||||||
bw := cc.balancerWrapper
|
bw := cc.balancerWrapper
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
if cbn != grpclbName {
|
|
||||||
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
|
||||||
for i := 0; i < len(s.Addresses); {
|
|
||||||
if s.Addresses[i].Type == resolver.GRPCLB {
|
|
||||||
copy(s.Addresses[i:], s.Addresses[i+1:])
|
|
||||||
s.Addresses = s.Addresses[:len(s.Addresses)-1]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
|
uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
|
||||||
if ret == nil {
|
if ret == nil {
|
||||||
ret = uccsErr // prefer ErrBadResolver state since any other error is
|
ret = uccsErr // prefer ErrBadResolver state since any other error is
|
||||||
|
@ -696,56 +668,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// switchBalancer starts the switching from current balancer to the balancer
|
// applyFailingLB is akin to configuring an LB policy on the channel which
|
||||||
// with the given name.
|
// always fails RPCs. Here, an actual LB policy is not configured, but an always
|
||||||
//
|
// erroring picker is configured, which returns errors with information about
|
||||||
// It will NOT send the current address list to the new balancer. If needed,
|
// what was invalid in the received service config. A config selector with no
|
||||||
// caller of this function should send address list to the new balancer after
|
// service config is configured, and the connectivity state of the channel is
|
||||||
// this function returns.
|
// set to TransientFailure.
|
||||||
//
|
//
|
||||||
// Caller must hold cc.mu.
|
// Caller must hold cc.mu.
|
||||||
func (cc *ClientConn) switchBalancer(name string) {
|
func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
|
||||||
if strings.EqualFold(cc.curBalancerName, name) {
|
var err error
|
||||||
return
|
if sc.Err != nil {
|
||||||
}
|
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
|
||||||
|
|
||||||
channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name)
|
|
||||||
if cc.dopts.balancerBuilder != nil {
|
|
||||||
channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cc.balancerWrapper != nil {
|
|
||||||
// Don't hold cc.mu while closing the balancers. The balancers may call
|
|
||||||
// methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex
|
|
||||||
// would cause a deadlock in that case.
|
|
||||||
cc.mu.Unlock()
|
|
||||||
cc.balancerWrapper.close()
|
|
||||||
cc.mu.Lock()
|
|
||||||
}
|
|
||||||
|
|
||||||
builder := balancer.Get(name)
|
|
||||||
if builder == nil {
|
|
||||||
channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
|
|
||||||
channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
|
|
||||||
builder = newPickfirstBuilder()
|
|
||||||
} else {
|
} else {
|
||||||
channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name)
|
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
|
||||||
}
|
}
|
||||||
|
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
|
||||||
cc.curBalancerName = builder.Name()
|
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
|
||||||
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
|
cc.csMgr.updateState(connectivity.TransientFailure)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
||||||
cc.mu.Lock()
|
cc.balancerWrapper.updateSubConnState(sc, s, err)
|
||||||
if cc.conns == nil {
|
|
||||||
cc.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// TODO(bar switching) send updates to all balancer wrappers when balancer
|
|
||||||
// gracefully switching is supported.
|
|
||||||
cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
|
|
||||||
cc.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
||||||
|
@ -768,17 +712,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return nil, ErrClientConnClosing
|
return nil, ErrClientConnClosing
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
|
||||||
ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
|
var err error
|
||||||
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "")
|
||||||
Desc: "Subchannel Created",
|
if err != nil {
|
||||||
Severity: channelz.CtInfo,
|
return nil, err
|
||||||
Parent: &channelz.TraceEventDesc{
|
|
||||||
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
|
|
||||||
Severity: channelz.CtInfo,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
||||||
|
Desc: "Subchannel created",
|
||||||
|
Severity: channelz.CtInfo,
|
||||||
|
Parent: &channelz.TraceEventDesc{
|
||||||
|
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()),
|
||||||
|
Severity: channelz.CtInfo,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
cc.conns[ac] = struct{}{}
|
cc.conns[ac] = struct{}{}
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return ac, nil
|
return ac, nil
|
||||||
|
@ -853,16 +801,31 @@ func (ac *addrConn) connect() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func equalAddresses(a, b []resolver.Address) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, v := range a {
|
||||||
|
if !v.Equal(b[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
|
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
|
||||||
//
|
//
|
||||||
// If ac is Connecting, it returns false. The caller should tear down the ac and
|
|
||||||
// create a new one. Note that the backoff will be reset when this happens.
|
|
||||||
//
|
|
||||||
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
|
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
|
||||||
// addresses will be picked up by retry in the next iteration after backoff.
|
// addresses will be picked up by retry in the next iteration after backoff.
|
||||||
//
|
//
|
||||||
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
|
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
|
||||||
//
|
//
|
||||||
|
// If the addresses is the same as the old list, it does nothing and returns
|
||||||
|
// true.
|
||||||
|
//
|
||||||
|
// If ac is Connecting, it returns false. The caller should tear down the ac and
|
||||||
|
// create a new one. Note that the backoff will be reset when this happens.
|
||||||
|
//
|
||||||
// If ac is Ready, it checks whether current connected address of ac is in the
|
// If ac is Ready, it checks whether current connected address of ac is in the
|
||||||
// new addrs list.
|
// new addrs list.
|
||||||
// - If true, it updates ac.addrs and returns true. The ac will keep using
|
// - If true, it updates ac.addrs and returns true. The ac will keep using
|
||||||
|
@ -879,6 +842,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if equalAddresses(ac.addrs, addrs) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
if ac.state == connectivity.Connecting {
|
if ac.state == connectivity.Connecting {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -959,14 +926,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||||
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
||||||
Ctx: ctx,
|
Ctx: ctx,
|
||||||
FullMethodName: method,
|
FullMethodName: method,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return nil, nil, toRPCErr(err)
|
|
||||||
}
|
|
||||||
return t, done, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
|
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
|
||||||
|
@ -991,35 +954,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
|
||||||
cc.retryThrottler.Store((*retryThrottler)(nil))
|
cc.retryThrottler.Store((*retryThrottler)(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
if cc.dopts.balancerBuilder == nil {
|
var newBalancerName string
|
||||||
// Only look at balancer types and switch balancer if balancer dial
|
if cc.sc != nil && cc.sc.lbConfig != nil {
|
||||||
// option is not set.
|
newBalancerName = cc.sc.lbConfig.name
|
||||||
var newBalancerName string
|
} else {
|
||||||
if cc.sc != nil && cc.sc.lbConfig != nil {
|
var isGRPCLB bool
|
||||||
newBalancerName = cc.sc.lbConfig.name
|
for _, a := range addrs {
|
||||||
} else {
|
if a.Type == resolver.GRPCLB {
|
||||||
var isGRPCLB bool
|
isGRPCLB = true
|
||||||
for _, a := range addrs {
|
break
|
||||||
if a.Type == resolver.GRPCLB {
|
|
||||||
isGRPCLB = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isGRPCLB {
|
|
||||||
newBalancerName = grpclbName
|
|
||||||
} else if cc.sc != nil && cc.sc.LB != nil {
|
|
||||||
newBalancerName = *cc.sc.LB
|
|
||||||
} else {
|
|
||||||
newBalancerName = PickFirstBalancerName
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cc.switchBalancer(newBalancerName)
|
if isGRPCLB {
|
||||||
} else if cc.balancerWrapper == nil {
|
newBalancerName = grpclbName
|
||||||
// Balancer dial option was set, and this is the first time handling
|
} else if cc.sc != nil && cc.sc.LB != nil {
|
||||||
// resolved addresses. Build a balancer with dopts.balancerBuilder.
|
newBalancerName = *cc.sc.LB
|
||||||
cc.curBalancerName = cc.dopts.balancerBuilder.Name()
|
} else {
|
||||||
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
|
newBalancerName = PickFirstBalancerName
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
cc.balancerWrapper.switchTo(newBalancerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
||||||
|
@ -1070,11 +1024,11 @@ func (cc *ClientConn) Close() error {
|
||||||
rWrapper := cc.resolverWrapper
|
rWrapper := cc.resolverWrapper
|
||||||
cc.resolverWrapper = nil
|
cc.resolverWrapper = nil
|
||||||
bWrapper := cc.balancerWrapper
|
bWrapper := cc.balancerWrapper
|
||||||
cc.balancerWrapper = nil
|
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
|
|
||||||
|
// The order of closing matters here since the balancer wrapper assumes the
|
||||||
|
// picker is closed before it is closed.
|
||||||
cc.blockingpicker.close()
|
cc.blockingpicker.close()
|
||||||
|
|
||||||
if bWrapper != nil {
|
if bWrapper != nil {
|
||||||
bWrapper.close()
|
bWrapper.close()
|
||||||
}
|
}
|
||||||
|
@ -1085,22 +1039,22 @@ func (cc *ClientConn) Close() error {
|
||||||
for ac := range conns {
|
for ac := range conns {
|
||||||
ac.tearDown(ErrClientConnClosing)
|
ac.tearDown(ErrClientConnClosing)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
ted := &channelz.TraceEventDesc{
|
||||||
ted := &channelz.TraceEventDesc{
|
Desc: "Channel deleted",
|
||||||
Desc: "Channel Deleted",
|
Severity: channelz.CtInfo,
|
||||||
|
}
|
||||||
|
if cc.dopts.channelzParentID != nil {
|
||||||
|
ted.Parent = &channelz.TraceEventDesc{
|
||||||
|
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()),
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
}
|
}
|
||||||
if cc.dopts.channelzParentID != 0 {
|
|
||||||
ted.Parent = &channelz.TraceEventDesc{
|
|
||||||
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
|
|
||||||
Severity: channelz.CtInfo,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
|
||||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
|
|
||||||
// the entity being deleted, and thus prevent it from being deleted right away.
|
|
||||||
channelz.RemoveEntry(cc.channelzID)
|
|
||||||
}
|
}
|
||||||
|
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
||||||
|
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
|
||||||
|
// trace reference to the entity being deleted, and thus prevent it from being
|
||||||
|
// deleted right away.
|
||||||
|
channelz.RemoveEntry(cc.channelzID)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1130,7 +1084,7 @@ type addrConn struct {
|
||||||
backoffIdx int // Needs to be stateful for resetConnectBackoff.
|
backoffIdx int // Needs to be stateful for resetConnectBackoff.
|
||||||
resetBackoff chan struct{}
|
resetBackoff chan struct{}
|
||||||
|
|
||||||
channelzID int64 // channelz unique identification number.
|
channelzID *channelz.Identifier
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1284,6 +1238,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
defer ac.mu.Unlock()
|
defer ac.mu.Unlock()
|
||||||
defer connClosed.Fire()
|
defer connClosed.Fire()
|
||||||
|
defer hcancel()
|
||||||
if !hcStarted || hctx.Err() != nil {
|
if !hcStarted || hctx.Err() != nil {
|
||||||
// We didn't start the health check or set the state to READY, so
|
// We didn't start the health check or set the state to READY, so
|
||||||
// no need to do anything else here.
|
// no need to do anything else here.
|
||||||
|
@ -1294,7 +1249,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
// state, since there may be a new transport in this addrConn.
|
// state, since there may be a new transport in this addrConn.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hcancel()
|
|
||||||
ac.transport = nil
|
ac.transport = nil
|
||||||
// Refresh the name resolver
|
// Refresh the name resolver
|
||||||
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
||||||
|
@ -1312,14 +1266,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
|
|
||||||
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if channelz.IsOn() {
|
copts.ChannelzParentID = ac.channelzID
|
||||||
copts.ChannelzParentID = ac.channelzID
|
|
||||||
}
|
|
||||||
|
|
||||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
|
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// newTr is either nil, or closed.
|
// newTr is either nil, or closed.
|
||||||
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err)
|
hcancel()
|
||||||
|
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1332,7 +1285,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
newTr.Close(transport.ErrConnClosing)
|
newTr.Close(transport.ErrConnClosing)
|
||||||
if connectCtx.Err() == context.DeadlineExceeded {
|
if connectCtx.Err() == context.DeadlineExceeded {
|
||||||
err := errors.New("failed to receive server preface within timeout")
|
err := errors.New("failed to receive server preface within timeout")
|
||||||
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err)
|
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -1497,19 +1450,18 @@ func (ac *addrConn) tearDown(err error) {
|
||||||
curTr.GracefulClose()
|
curTr.GracefulClose()
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
||||||
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
Desc: "Subchannel deleted",
|
||||||
Desc: "Subchannel Deleted",
|
Severity: channelz.CtInfo,
|
||||||
|
Parent: &channelz.TraceEventDesc{
|
||||||
|
Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()),
|
||||||
Severity: channelz.CtInfo,
|
Severity: channelz.CtInfo,
|
||||||
Parent: &channelz.TraceEventDesc{
|
},
|
||||||
Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
|
})
|
||||||
Severity: channelz.CtInfo,
|
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
|
||||||
},
|
// trace reference to the entity being deleted, and thus prevent it from
|
||||||
})
|
// being deleted right away.
|
||||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
|
channelz.RemoveEntry(ac.channelzID)
|
||||||
// the entity being deleted, and thus prevent it from being deleted right away.
|
|
||||||
channelz.RemoveEntry(ac.channelzID)
|
|
||||||
}
|
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,12 +20,11 @@ package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/backoff"
|
"google.golang.org/grpc/backoff"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/channelz"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
|
@ -45,19 +44,17 @@ type dialOptions struct {
|
||||||
chainUnaryInts []UnaryClientInterceptor
|
chainUnaryInts []UnaryClientInterceptor
|
||||||
chainStreamInts []StreamClientInterceptor
|
chainStreamInts []StreamClientInterceptor
|
||||||
|
|
||||||
cp Compressor
|
cp Compressor
|
||||||
dc Decompressor
|
dc Decompressor
|
||||||
bs internalbackoff.Strategy
|
bs internalbackoff.Strategy
|
||||||
block bool
|
block bool
|
||||||
returnLastError bool
|
returnLastError bool
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
scChan <-chan ServiceConfig
|
scChan <-chan ServiceConfig
|
||||||
authority string
|
authority string
|
||||||
copts transport.ConnectOptions
|
copts transport.ConnectOptions
|
||||||
callOptions []CallOption
|
callOptions []CallOption
|
||||||
// This is used by WithBalancerName dial option.
|
channelzParentID *channelz.Identifier
|
||||||
balancerBuilder balancer.Builder
|
|
||||||
channelzParentID int64
|
|
||||||
disableServiceConfig bool
|
disableServiceConfig bool
|
||||||
disableRetry bool
|
disableRetry bool
|
||||||
disableHealthCheck bool
|
disableHealthCheck bool
|
||||||
|
@ -195,25 +192,6 @@ func WithDecompressor(dc Decompressor) DialOption {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
|
||||||
// with. Balancer registered with balancerName will be used. This function
|
|
||||||
// panics if no balancer was registered by balancerName.
|
|
||||||
//
|
|
||||||
// The balancer cannot be overridden by balancer option specified by service
|
|
||||||
// config.
|
|
||||||
//
|
|
||||||
// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
|
|
||||||
// instead. Will be removed in a future 1.x release.
|
|
||||||
func WithBalancerName(balancerName string) DialOption {
|
|
||||||
builder := balancer.Get(balancerName)
|
|
||||||
if builder == nil {
|
|
||||||
panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
|
|
||||||
}
|
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
|
||||||
o.balancerBuilder = builder
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithServiceConfig returns a DialOption which has a channel to read the
|
// WithServiceConfig returns a DialOption which has a channel to read the
|
||||||
// service configuration.
|
// service configuration.
|
||||||
//
|
//
|
||||||
|
@ -304,8 +282,8 @@ func WithReturnConnectionError() DialOption {
|
||||||
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
||||||
// security is incompatible and will cause grpc.Dial() to fail.
|
// security is incompatible and will cause grpc.Dial() to fail.
|
||||||
//
|
//
|
||||||
// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead.
|
// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
|
||||||
// Will be supported throughout 1.x.
|
// instead. Will be supported throughout 1.x.
|
||||||
func WithInsecure() DialOption {
|
func WithInsecure() DialOption {
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
o.copts.TransportCredentials = insecure.NewCredentials()
|
o.copts.TransportCredentials = insecure.NewCredentials()
|
||||||
|
@ -498,7 +476,7 @@ func WithAuthority(a string) DialOption {
|
||||||
//
|
//
|
||||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
// later release.
|
// later release.
|
||||||
func WithChannelzParentID(id int64) DialOption {
|
func WithChannelzParentID(id *channelz.Identifier) DialOption {
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
o.channelzParentID = id
|
o.channelzParentID = id
|
||||||
})
|
})
|
||||||
|
|
|
@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec)
|
||||||
// more details.
|
// more details.
|
||||||
//
|
//
|
||||||
// NOTE: this function must only be called during initialization time (i.e. in
|
// NOTE: this function must only be called during initialization time (i.e. in
|
||||||
// an init() function), and is not thread-safe. If multiple Compressors are
|
// an init() function), and is not thread-safe. If multiple Codecs are
|
||||||
// registered with the same name, the one registered last will take effect.
|
// registered with the same name, the one registered last will take effect.
|
||||||
func RegisterCodec(codec Codec) {
|
func RegisterCodec(codec Codec) {
|
||||||
if codec == nil {
|
if codec == nil {
|
||||||
|
|
382
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
382
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
|
@ -0,0 +1,382 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2022 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package gracefulswitch implements a graceful switch load balancer.
|
||||||
|
package gracefulswitch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/balancer/base"
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
|
||||||
|
var _ balancer.Balancer = (*Balancer)(nil)
|
||||||
|
|
||||||
|
// NewBalancer returns a graceful switch Balancer.
|
||||||
|
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
|
||||||
|
return &Balancer{
|
||||||
|
cc: cc,
|
||||||
|
bOpts: opts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Balancer is a utility to gracefully switch from one balancer to
|
||||||
|
// a new balancer. It implements the balancer.Balancer interface.
|
||||||
|
type Balancer struct {
|
||||||
|
bOpts balancer.BuildOptions
|
||||||
|
cc balancer.ClientConn
|
||||||
|
|
||||||
|
// mu protects the following fields and all fields within balancerCurrent
|
||||||
|
// and balancerPending. mu does not need to be held when calling into the
|
||||||
|
// child balancers, as all calls into these children happen only as a direct
|
||||||
|
// result of a call into the gracefulSwitchBalancer, which are also
|
||||||
|
// guaranteed to be synchronous. There is one exception: an UpdateState call
|
||||||
|
// from a child balancer when current and pending are populated can lead to
|
||||||
|
// calling Close() on the current. To prevent that racing with an
|
||||||
|
// UpdateSubConnState from the channel, we hold currentMu during Close and
|
||||||
|
// UpdateSubConnState calls.
|
||||||
|
mu sync.Mutex
|
||||||
|
balancerCurrent *balancerWrapper
|
||||||
|
balancerPending *balancerWrapper
|
||||||
|
closed bool // set to true when this balancer is closed
|
||||||
|
|
||||||
|
// currentMu must be locked before mu. This mutex guards against this
|
||||||
|
// sequence of events: UpdateSubConnState() called, finds the
|
||||||
|
// balancerCurrent, gives up lock, updateState comes in, causes Close() on
|
||||||
|
// balancerCurrent before the UpdateSubConnState is called on the
|
||||||
|
// balancerCurrent.
|
||||||
|
currentMu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// swap swaps out the current lb with the pending lb and updates the ClientConn.
|
||||||
|
// The caller must hold gsb.mu.
|
||||||
|
func (gsb *Balancer) swap() {
|
||||||
|
gsb.cc.UpdateState(gsb.balancerPending.lastState)
|
||||||
|
cur := gsb.balancerCurrent
|
||||||
|
gsb.balancerCurrent = gsb.balancerPending
|
||||||
|
gsb.balancerPending = nil
|
||||||
|
go func() {
|
||||||
|
gsb.currentMu.Lock()
|
||||||
|
defer gsb.currentMu.Unlock()
|
||||||
|
cur.Close()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function that checks if the balancer passed in is current or pending.
|
||||||
|
// The caller must hold gsb.mu.
|
||||||
|
func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
|
||||||
|
return bw == gsb.balancerCurrent || bw == gsb.balancerPending
|
||||||
|
}
|
||||||
|
|
||||||
|
// SwitchTo initializes the graceful switch process, which completes based on
|
||||||
|
// connectivity state changes on the current/pending balancer. Thus, the switch
|
||||||
|
// process is not complete when this method returns. This method must be called
|
||||||
|
// synchronously alongside the rest of the balancer.Balancer methods this
|
||||||
|
// Graceful Switch Balancer implements.
|
||||||
|
func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
|
||||||
|
gsb.mu.Lock()
|
||||||
|
if gsb.closed {
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
return errBalancerClosed
|
||||||
|
}
|
||||||
|
bw := &balancerWrapper{
|
||||||
|
gsb: gsb,
|
||||||
|
lastState: balancer.State{
|
||||||
|
ConnectivityState: connectivity.Connecting,
|
||||||
|
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||||
|
},
|
||||||
|
subconns: make(map[balancer.SubConn]bool),
|
||||||
|
}
|
||||||
|
balToClose := gsb.balancerPending // nil if there is no pending balancer
|
||||||
|
if gsb.balancerCurrent == nil {
|
||||||
|
gsb.balancerCurrent = bw
|
||||||
|
} else {
|
||||||
|
gsb.balancerPending = bw
|
||||||
|
}
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
balToClose.Close()
|
||||||
|
// This function takes a builder instead of a balancer because builder.Build
|
||||||
|
// can call back inline, and this utility needs to handle the callbacks.
|
||||||
|
newBalancer := builder.Build(bw, gsb.bOpts)
|
||||||
|
if newBalancer == nil {
|
||||||
|
// This is illegal and should never happen; we clear the balancerWrapper
|
||||||
|
// we were constructing if it happens to avoid a potential panic.
|
||||||
|
gsb.mu.Lock()
|
||||||
|
if gsb.balancerPending != nil {
|
||||||
|
gsb.balancerPending = nil
|
||||||
|
} else {
|
||||||
|
gsb.balancerCurrent = nil
|
||||||
|
}
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
return balancer.ErrBadResolverState
|
||||||
|
}
|
||||||
|
|
||||||
|
// This write doesn't need to take gsb.mu because this field never gets read
|
||||||
|
// or written to on any calls from the current or pending. Calls from grpc
|
||||||
|
// to this balancer are guaranteed to be called synchronously, so this
|
||||||
|
// bw.Balancer field will never be forwarded to until this SwitchTo()
|
||||||
|
// function returns.
|
||||||
|
bw.Balancer = newBalancer
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns nil if the graceful switch balancer is closed.
|
||||||
|
func (gsb *Balancer) latestBalancer() *balancerWrapper {
|
||||||
|
gsb.mu.Lock()
|
||||||
|
defer gsb.mu.Unlock()
|
||||||
|
if gsb.balancerPending != nil {
|
||||||
|
return gsb.balancerPending
|
||||||
|
}
|
||||||
|
return gsb.balancerCurrent
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateClientConnState forwards the update to the latest balancer created.
|
||||||
|
func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||||
|
// The resolver data is only relevant to the most recent LB Policy.
|
||||||
|
balToUpdate := gsb.latestBalancer()
|
||||||
|
if balToUpdate == nil {
|
||||||
|
return errBalancerClosed
|
||||||
|
}
|
||||||
|
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||||
|
// back into the channel. The latest balancer can never be closed during a
|
||||||
|
// call from the channel, even without gsb.mu held.
|
||||||
|
return balToUpdate.UpdateClientConnState(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolverError forwards the error to the latest balancer created.
|
||||||
|
func (gsb *Balancer) ResolverError(err error) {
|
||||||
|
// The resolver data is only relevant to the most recent LB Policy.
|
||||||
|
balToUpdate := gsb.latestBalancer()
|
||||||
|
if balToUpdate == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||||
|
// back into the channel. The latest balancer can never be closed during a
|
||||||
|
// call from the channel, even without gsb.mu held.
|
||||||
|
balToUpdate.ResolverError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitIdle forwards the call to the latest balancer created.
|
||||||
|
//
|
||||||
|
// If the latest balancer does not support ExitIdle, the subConns are
|
||||||
|
// re-connected to manually.
|
||||||
|
func (gsb *Balancer) ExitIdle() {
|
||||||
|
balToUpdate := gsb.latestBalancer()
|
||||||
|
if balToUpdate == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// There is no need to protect this read with a mutex, as the write to the
|
||||||
|
// Balancer field happens in SwitchTo, which completes before this can be
|
||||||
|
// called.
|
||||||
|
if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
|
||||||
|
ei.ExitIdle()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for sc := range balToUpdate.subconns {
|
||||||
|
sc.Connect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSubConnState forwards the update to the appropriate child.
|
||||||
|
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
gsb.currentMu.Lock()
|
||||||
|
defer gsb.currentMu.Unlock()
|
||||||
|
gsb.mu.Lock()
|
||||||
|
// Forward update to the appropriate child. Even if there is a pending
|
||||||
|
// balancer, the current balancer should continue to get SubConn updates to
|
||||||
|
// maintain the proper state while the pending is still connecting.
|
||||||
|
var balToUpdate *balancerWrapper
|
||||||
|
if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
|
||||||
|
balToUpdate = gsb.balancerCurrent
|
||||||
|
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
|
||||||
|
balToUpdate = gsb.balancerPending
|
||||||
|
}
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
if balToUpdate == nil {
|
||||||
|
// SubConn belonged to a stale lb policy that has not yet fully closed,
|
||||||
|
// or the balancer was already closed.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
balToUpdate.UpdateSubConnState(sc, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes any active child balancers.
|
||||||
|
func (gsb *Balancer) Close() {
|
||||||
|
gsb.mu.Lock()
|
||||||
|
gsb.closed = true
|
||||||
|
currentBalancerToClose := gsb.balancerCurrent
|
||||||
|
gsb.balancerCurrent = nil
|
||||||
|
pendingBalancerToClose := gsb.balancerPending
|
||||||
|
gsb.balancerPending = nil
|
||||||
|
gsb.mu.Unlock()
|
||||||
|
|
||||||
|
currentBalancerToClose.Close()
|
||||||
|
pendingBalancerToClose.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
|
||||||
|
// methods to help cleanup SubConns created by the wrapped balancer.
|
||||||
|
//
|
||||||
|
// It implements the balancer.ClientConn interface and is passed down in that
|
||||||
|
// capacity to the wrapped balancer. It maintains a set of subConns created by
|
||||||
|
// the wrapped balancer and calls from the latter to create/update/remove
|
||||||
|
// SubConns update this set before being forwarded to the parent ClientConn.
|
||||||
|
// State updates from the wrapped balancer can result in invocation of the
|
||||||
|
// graceful switch logic.
|
||||||
|
type balancerWrapper struct {
|
||||||
|
balancer.Balancer
|
||||||
|
gsb *Balancer
|
||||||
|
|
||||||
|
lastState balancer.State
|
||||||
|
subconns map[balancer.SubConn]bool // subconns created by this balancer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
if state.ConnectivityState == connectivity.Shutdown {
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
delete(bw.subconns, sc)
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
}
|
||||||
|
// There is no need to protect this read with a mutex, as the write to the
|
||||||
|
// Balancer field happens in SwitchTo, which completes before this can be
|
||||||
|
// called.
|
||||||
|
bw.Balancer.UpdateSubConnState(sc, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying LB policy and removes the subconns it created. bw
|
||||||
|
// must not be referenced via balancerCurrent or balancerPending in gsb when
|
||||||
|
// called. gsb.mu must not be held. Does not panic with a nil receiver.
|
||||||
|
func (bw *balancerWrapper) Close() {
|
||||||
|
// before Close is called.
|
||||||
|
if bw == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// There is no need to protect this read with a mutex, as Close() is
|
||||||
|
// impossible to be called concurrently with the write in SwitchTo(). The
|
||||||
|
// callsites of Close() for this balancer in Graceful Switch Balancer will
|
||||||
|
// never be called until SwitchTo() returns.
|
||||||
|
bw.Balancer.Close()
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
for sc := range bw.subconns {
|
||||||
|
bw.gsb.cc.RemoveSubConn(sc)
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) UpdateState(state balancer.State) {
|
||||||
|
// Hold the mutex for this entire call to ensure it cannot occur
|
||||||
|
// concurrently with other updateState() calls. This causes updates to
|
||||||
|
// lastState and calls to cc.UpdateState to happen atomically.
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
defer bw.gsb.mu.Unlock()
|
||||||
|
bw.lastState = state
|
||||||
|
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if bw == bw.gsb.balancerCurrent {
|
||||||
|
// In the case that the current balancer exits READY, and there is a pending
|
||||||
|
// balancer, you can forward the pending balancer's cached State up to
|
||||||
|
// ClientConn and swap the pending into the current. This is because there
|
||||||
|
// is no reason to gracefully switch from and keep using the old policy as
|
||||||
|
// the ClientConn is not connected to any backends.
|
||||||
|
if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
|
||||||
|
bw.gsb.swap()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Even if there is a pending balancer waiting to be gracefully switched to,
|
||||||
|
// continue to forward current balancer updates to the Client Conn. Ignoring
|
||||||
|
// state + picker from the current would cause undefined behavior/cause the
|
||||||
|
// system to behave incorrectly from the current LB policies perspective.
|
||||||
|
// Also, the current LB is still being used by grpc to choose SubConns per
|
||||||
|
// RPC, and thus should use the most updated form of the current balancer.
|
||||||
|
bw.gsb.cc.UpdateState(state)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// This method is now dealing with a state update from the pending balancer.
|
||||||
|
// If the current balancer is currently in a state other than READY, the new
|
||||||
|
// policy can be swapped into place immediately. This is because there is no
|
||||||
|
// reason to gracefully switch from and keep using the old policy as the
|
||||||
|
// ClientConn is not connected to any backends.
|
||||||
|
if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
|
||||||
|
bw.gsb.swap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
|
||||||
|
sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
|
||||||
|
bw.gsb.cc.RemoveSubConn(sc)
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||||
|
}
|
||||||
|
bw.subconns[sc] = true
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return sc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
|
||||||
|
// Ignore ResolveNow requests from anything other than the most recent
|
||||||
|
// balancer, because older balancers were already removed from the config.
|
||||||
|
if bw != bw.gsb.latestBalancer() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bw.gsb.cc.ResolveNow(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
bw.gsb.cc.RemoveSubConn(sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||||
|
bw.gsb.mu.Lock()
|
||||||
|
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bw.gsb.mu.Unlock()
|
||||||
|
bw.gsb.cc.UpdateAddresses(sc, addrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw *balancerWrapper) Target() string {
|
||||||
|
return bw.gsb.cc.Target()
|
||||||
|
}
|
|
@ -31,7 +31,7 @@ import (
|
||||||
// Logger is the global binary logger. It can be used to get binary logger for
|
// Logger is the global binary logger. It can be used to get binary logger for
|
||||||
// each method.
|
// each method.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
getMethodLogger(methodName string) *MethodLogger
|
GetMethodLogger(methodName string) MethodLogger
|
||||||
}
|
}
|
||||||
|
|
||||||
// binLogger is the global binary logger for the binary. One of this should be
|
// binLogger is the global binary logger for the binary. One of this should be
|
||||||
|
@ -49,17 +49,24 @@ func SetLogger(l Logger) {
|
||||||
binLogger = l
|
binLogger = l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLogger gets the binarg logger.
|
||||||
|
//
|
||||||
|
// Only call this at init time.
|
||||||
|
func GetLogger() Logger {
|
||||||
|
return binLogger
|
||||||
|
}
|
||||||
|
|
||||||
// GetMethodLogger returns the methodLogger for the given methodName.
|
// GetMethodLogger returns the methodLogger for the given methodName.
|
||||||
//
|
//
|
||||||
// methodName should be in the format of "/service/method".
|
// methodName should be in the format of "/service/method".
|
||||||
//
|
//
|
||||||
// Each methodLogger returned by this method is a new instance. This is to
|
// Each methodLogger returned by this method is a new instance. This is to
|
||||||
// generate sequence id within the call.
|
// generate sequence id within the call.
|
||||||
func GetMethodLogger(methodName string) *MethodLogger {
|
func GetMethodLogger(methodName string) MethodLogger {
|
||||||
if binLogger == nil {
|
if binLogger == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return binLogger.getMethodLogger(methodName)
|
return binLogger.GetMethodLogger(methodName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -68,17 +75,29 @@ func init() {
|
||||||
binLogger = NewLoggerFromConfigString(configStr)
|
binLogger = NewLoggerFromConfigString(configStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
type methodLoggerConfig struct {
|
// MethodLoggerConfig contains the setting for logging behavior of a method
|
||||||
|
// logger. Currently, it contains the max length of header and message.
|
||||||
|
type MethodLoggerConfig struct {
|
||||||
// Max length of header and message.
|
// Max length of header and message.
|
||||||
hdr, msg uint64
|
Header, Message uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoggerConfig contains the config for loggers to create method loggers.
|
||||||
|
type LoggerConfig struct {
|
||||||
|
All *MethodLoggerConfig
|
||||||
|
Services map[string]*MethodLoggerConfig
|
||||||
|
Methods map[string]*MethodLoggerConfig
|
||||||
|
|
||||||
|
Blacklist map[string]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type logger struct {
|
type logger struct {
|
||||||
all *methodLoggerConfig
|
config LoggerConfig
|
||||||
services map[string]*methodLoggerConfig
|
}
|
||||||
methods map[string]*methodLoggerConfig
|
|
||||||
|
|
||||||
blacklist map[string]struct{}
|
// NewLoggerFromConfig builds a logger with the given LoggerConfig.
|
||||||
|
func NewLoggerFromConfig(config LoggerConfig) Logger {
|
||||||
|
return &logger{config: config}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
||||||
|
@ -88,57 +107,57 @@ func newEmptyLogger() *logger {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set method logger for "*".
|
// Set method logger for "*".
|
||||||
func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
|
func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
|
||||||
if l.all != nil {
|
if l.config.All != nil {
|
||||||
return fmt.Errorf("conflicting global rules found")
|
return fmt.Errorf("conflicting global rules found")
|
||||||
}
|
}
|
||||||
l.all = ml
|
l.config.All = ml
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set method logger for "service/*".
|
// Set method logger for "service/*".
|
||||||
//
|
//
|
||||||
// New methodLogger with same service overrides the old one.
|
// New methodLogger with same service overrides the old one.
|
||||||
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
|
func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
|
||||||
if _, ok := l.services[service]; ok {
|
if _, ok := l.config.Services[service]; ok {
|
||||||
return fmt.Errorf("conflicting service rules for service %v found", service)
|
return fmt.Errorf("conflicting service rules for service %v found", service)
|
||||||
}
|
}
|
||||||
if l.services == nil {
|
if l.config.Services == nil {
|
||||||
l.services = make(map[string]*methodLoggerConfig)
|
l.config.Services = make(map[string]*MethodLoggerConfig)
|
||||||
}
|
}
|
||||||
l.services[service] = ml
|
l.config.Services[service] = ml
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set method logger for "service/method".
|
// Set method logger for "service/method".
|
||||||
//
|
//
|
||||||
// New methodLogger with same method overrides the old one.
|
// New methodLogger with same method overrides the old one.
|
||||||
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
|
func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
|
||||||
if _, ok := l.blacklist[method]; ok {
|
if _, ok := l.config.Blacklist[method]; ok {
|
||||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||||
}
|
}
|
||||||
if _, ok := l.methods[method]; ok {
|
if _, ok := l.config.Methods[method]; ok {
|
||||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||||
}
|
}
|
||||||
if l.methods == nil {
|
if l.config.Methods == nil {
|
||||||
l.methods = make(map[string]*methodLoggerConfig)
|
l.config.Methods = make(map[string]*MethodLoggerConfig)
|
||||||
}
|
}
|
||||||
l.methods[method] = ml
|
l.config.Methods[method] = ml
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set blacklist method for "-service/method".
|
// Set blacklist method for "-service/method".
|
||||||
func (l *logger) setBlacklist(method string) error {
|
func (l *logger) setBlacklist(method string) error {
|
||||||
if _, ok := l.blacklist[method]; ok {
|
if _, ok := l.config.Blacklist[method]; ok {
|
||||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||||
}
|
}
|
||||||
if _, ok := l.methods[method]; ok {
|
if _, ok := l.config.Methods[method]; ok {
|
||||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||||
}
|
}
|
||||||
if l.blacklist == nil {
|
if l.config.Blacklist == nil {
|
||||||
l.blacklist = make(map[string]struct{})
|
l.config.Blacklist = make(map[string]struct{})
|
||||||
}
|
}
|
||||||
l.blacklist[method] = struct{}{}
|
l.config.Blacklist[method] = struct{}{}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error {
|
||||||
//
|
//
|
||||||
// Each methodLogger returned by this method is a new instance. This is to
|
// Each methodLogger returned by this method is a new instance. This is to
|
||||||
// generate sequence id within the call.
|
// generate sequence id within the call.
|
||||||
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
|
func (l *logger) GetMethodLogger(methodName string) MethodLogger {
|
||||||
s, m, err := grpcutil.ParseMethod(methodName)
|
s, m, err := grpcutil.ParseMethod(methodName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if ml, ok := l.methods[s+"/"+m]; ok {
|
if ml, ok := l.config.Methods[s+"/"+m]; ok {
|
||||||
return newMethodLogger(ml.hdr, ml.msg)
|
return newMethodLogger(ml.Header, ml.Message)
|
||||||
}
|
}
|
||||||
if _, ok := l.blacklist[s+"/"+m]; ok {
|
if _, ok := l.config.Blacklist[s+"/"+m]; ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if ml, ok := l.services[s]; ok {
|
if ml, ok := l.config.Services[s]; ok {
|
||||||
return newMethodLogger(ml.hdr, ml.msg)
|
return newMethodLogger(ml.Header, ml.Message)
|
||||||
}
|
}
|
||||||
if l.all == nil {
|
if l.config.All == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return newMethodLogger(l.all.hdr, l.all.msg)
|
return newMethodLogger(l.config.All.Header, l.config.All.Message)
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||||
}
|
}
|
||||||
if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||||
return fmt.Errorf("invalid config: %v", err)
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||||
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
||||||
}
|
}
|
||||||
if m == "*" {
|
if m == "*" {
|
||||||
if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||||
return fmt.Errorf("invalid config: %v", err)
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||||
return fmt.Errorf("invalid config: %v", err)
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() {
|
||||||
var idGen callIDGenerator
|
var idGen callIDGenerator
|
||||||
|
|
||||||
// MethodLogger is the sub-logger for each method.
|
// MethodLogger is the sub-logger for each method.
|
||||||
type MethodLogger struct {
|
type MethodLogger interface {
|
||||||
|
Log(LogEntryConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
type methodLogger struct {
|
||||||
headerMaxLen, messageMaxLen uint64
|
headerMaxLen, messageMaxLen uint64
|
||||||
|
|
||||||
callID uint64
|
callID uint64
|
||||||
|
@ -57,8 +61,8 @@ type MethodLogger struct {
|
||||||
sink Sink // TODO(blog): make this plugable.
|
sink Sink // TODO(blog): make this plugable.
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMethodLogger(h, m uint64) *MethodLogger {
|
func newMethodLogger(h, m uint64) *methodLogger {
|
||||||
return &MethodLogger{
|
return &methodLogger{
|
||||||
headerMaxLen: h,
|
headerMaxLen: h,
|
||||||
messageMaxLen: m,
|
messageMaxLen: m,
|
||||||
|
|
||||||
|
@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log creates a proto binary log entry, and logs it to the sink.
|
// Build is an internal only method for building the proto message out of the
|
||||||
func (ml *MethodLogger) Log(c LogEntryConfig) {
|
// input event. It's made public to enable other library to reuse as much logic
|
||||||
|
// in methodLogger as possible.
|
||||||
|
func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||||||
m := c.toProto()
|
m := c.toProto()
|
||||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||||
m.Timestamp = timestamp
|
m.Timestamp = timestamp
|
||||||
|
@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) {
|
||||||
case *pb.GrpcLogEntry_Message:
|
case *pb.GrpcLogEntry_Message:
|
||||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||||
}
|
}
|
||||||
|
return m
|
||||||
ml.sink.Write(m)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
// Log creates a proto binary log entry, and logs it to the sink.
|
||||||
|
func (ml *methodLogger) Log(c LogEntryConfig) {
|
||||||
|
ml.sink.Write(ml.Build(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||||
if ml.headerMaxLen == maxUInt {
|
if ml.headerMaxLen == maxUInt {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||||
return truncated
|
return truncated
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||||
if ml.messageMaxLen == maxUInt {
|
if ml.messageMaxLen == maxUInt {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ package channelz
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -184,54 +185,77 @@ func GetServer(id int64) *ServerMetric {
|
||||||
return db.get().GetServer(id)
|
return db.get().GetServer(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterChannel registers the given channel c in channelz database with ref
|
// RegisterChannel registers the given channel c in the channelz database with
|
||||||
// as its reference name, and add it to the child list of its parent (identified
|
// ref as its reference name, and adds it to the child list of its parent
|
||||||
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
// (identified by pid). pid == nil means no parent.
|
||||||
// assigned to this channel.
|
//
|
||||||
func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
// Returns a unique channelz identifier assigned to this channel.
|
||||||
|
//
|
||||||
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
|
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
|
var parent int64
|
||||||
|
isTopChannel := true
|
||||||
|
if pid != nil {
|
||||||
|
isTopChannel = false
|
||||||
|
parent = pid.Int()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !IsOn() {
|
||||||
|
return newIdentifer(RefChannel, id, pid)
|
||||||
|
}
|
||||||
|
|
||||||
cn := &channel{
|
cn := &channel{
|
||||||
refName: ref,
|
refName: ref,
|
||||||
c: c,
|
c: c,
|
||||||
subChans: make(map[int64]string),
|
subChans: make(map[int64]string),
|
||||||
nestedChans: make(map[int64]string),
|
nestedChans: make(map[int64]string),
|
||||||
id: id,
|
id: id,
|
||||||
pid: pid,
|
pid: parent,
|
||||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||||
}
|
}
|
||||||
if pid == 0 {
|
db.get().addChannel(id, cn, isTopChannel, parent)
|
||||||
db.get().addChannel(id, cn, true, pid)
|
return newIdentifer(RefChannel, id, pid)
|
||||||
} else {
|
|
||||||
db.get().addChannel(id, cn, false, pid)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterSubChannel registers the given channel c in channelz database with ref
|
// RegisterSubChannel registers the given subChannel c in the channelz database
|
||||||
// as its reference name, and add it to the child list of its parent (identified
|
// with ref as its reference name, and adds it to the child list of its parent
|
||||||
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
// (identified by pid).
|
||||||
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
//
|
||||||
if pid == 0 {
|
// Returns a unique channelz identifier assigned to this subChannel.
|
||||||
logger.Error("a SubChannel's parent id cannot be 0")
|
//
|
||||||
return 0
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
|
func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
|
||||||
|
if pid == nil {
|
||||||
|
return nil, errors.New("a SubChannel's parent id cannot be nil")
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
|
if !IsOn() {
|
||||||
|
return newIdentifer(RefSubChannel, id, pid), nil
|
||||||
|
}
|
||||||
|
|
||||||
sc := &subChannel{
|
sc := &subChannel{
|
||||||
refName: ref,
|
refName: ref,
|
||||||
c: c,
|
c: c,
|
||||||
sockets: make(map[int64]string),
|
sockets: make(map[int64]string),
|
||||||
id: id,
|
id: id,
|
||||||
pid: pid,
|
pid: pid.Int(),
|
||||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||||
}
|
}
|
||||||
db.get().addSubChannel(id, sc, pid)
|
db.get().addSubChannel(id, sc, pid.Int())
|
||||||
return id
|
return newIdentifer(RefSubChannel, id, pid), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterServer registers the given server s in channelz database. It returns
|
// RegisterServer registers the given server s in channelz database. It returns
|
||||||
// the unique channelz tracking id assigned to this server.
|
// the unique channelz tracking id assigned to this server.
|
||||||
func RegisterServer(s Server, ref string) int64 {
|
//
|
||||||
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
|
func RegisterServer(s Server, ref string) *Identifier {
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
|
if !IsOn() {
|
||||||
|
return newIdentifer(RefServer, id, nil)
|
||||||
|
}
|
||||||
|
|
||||||
svr := &server{
|
svr := &server{
|
||||||
refName: ref,
|
refName: ref,
|
||||||
s: s,
|
s: s,
|
||||||
|
@ -240,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 {
|
||||||
id: id,
|
id: id,
|
||||||
}
|
}
|
||||||
db.get().addServer(id, svr)
|
db.get().addServer(id, svr)
|
||||||
return id
|
return newIdentifer(RefServer, id, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterListenSocket registers the given listen socket s in channelz database
|
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||||
// with ref as its reference name, and add it to the child list of its parent
|
// with ref as its reference name, and add it to the child list of its parent
|
||||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||||
// this listen socket.
|
// this listen socket.
|
||||||
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
//
|
||||||
if pid == 0 {
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
logger.Error("a ListenSocket's parent id cannot be 0")
|
func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||||
return 0
|
if pid == nil {
|
||||||
|
return nil, errors.New("a ListenSocket's parent id cannot be 0")
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
if !IsOn() {
|
||||||
db.get().addListenSocket(id, ls, pid)
|
return newIdentifer(RefListenSocket, id, pid), nil
|
||||||
return id
|
}
|
||||||
|
|
||||||
|
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||||
|
db.get().addListenSocket(id, ls, pid.Int())
|
||||||
|
return newIdentifer(RefListenSocket, id, pid), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterNormalSocket registers the given normal socket s in channelz database
|
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||||
// with ref as its reference name, and add it to the child list of its parent
|
// with ref as its reference name, and adds it to the child list of its parent
|
||||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||||
// this normal socket.
|
// this normal socket.
|
||||||
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
//
|
||||||
if pid == 0 {
|
// If channelz is not turned ON, the channelz database is not mutated.
|
||||||
logger.Error("a NormalSocket's parent id cannot be 0")
|
func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||||
return 0
|
if pid == nil {
|
||||||
|
return nil, errors.New("a NormalSocket's parent id cannot be 0")
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
if !IsOn() {
|
||||||
db.get().addNormalSocket(id, ns, pid)
|
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||||
return id
|
}
|
||||||
|
|
||||||
|
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||||
|
db.get().addNormalSocket(id, ns, pid.Int())
|
||||||
|
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveEntry removes an entry with unique channelz tracking id to be id from
|
// RemoveEntry removes an entry with unique channelz tracking id to be id from
|
||||||
// channelz database.
|
// channelz database.
|
||||||
func RemoveEntry(id int64) {
|
//
|
||||||
db.get().removeEntry(id)
|
// If channelz is not turned ON, this function is a no-op.
|
||||||
|
func RemoveEntry(id *Identifier) {
|
||||||
|
if !IsOn() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
db.get().removeEntry(id.Int())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
|
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
|
||||||
// to the channel trace.
|
// the event to be added to the channel trace.
|
||||||
// The Parent field is optional. It is used for event that will be recorded in the entity's parent
|
//
|
||||||
// trace also.
|
// The Parent field is optional. It is used for an event that will be recorded
|
||||||
|
// in the entity's parent trace.
|
||||||
type TraceEventDesc struct {
|
type TraceEventDesc struct {
|
||||||
Desc string
|
Desc string
|
||||||
Severity Severity
|
Severity Severity
|
||||||
Parent *TraceEventDesc
|
Parent *TraceEventDesc
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
|
// AddTraceEvent adds trace related to the entity with specified id, using the
|
||||||
func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
|
// provided TraceEventDesc.
|
||||||
for d := desc; d != nil; d = d.Parent {
|
//
|
||||||
switch d.Severity {
|
// If channelz is not turned ON, this will simply log the event descriptions.
|
||||||
case CtUnknown, CtInfo:
|
func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
|
||||||
l.InfoDepth(depth+1, d.Desc)
|
// Log only the trace description associated with the bottom most entity.
|
||||||
case CtWarning:
|
switch desc.Severity {
|
||||||
l.WarningDepth(depth+1, d.Desc)
|
case CtUnknown, CtInfo:
|
||||||
case CtError:
|
l.InfoDepth(depth+1, withParens(id)+desc.Desc)
|
||||||
l.ErrorDepth(depth+1, d.Desc)
|
case CtWarning:
|
||||||
}
|
l.WarningDepth(depth+1, withParens(id)+desc.Desc)
|
||||||
|
case CtError:
|
||||||
|
l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
if getMaxTraceEntry() == 0 {
|
if getMaxTraceEntry() == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
db.get().traceEvent(id, desc)
|
if IsOn() {
|
||||||
|
db.get().traceEvent(id.Int(), desc)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// channelMap is the storage data structure for channelz.
|
// channelMap is the storage data structure for channelz.
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2022 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package channelz
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Identifier is an opaque identifier which uniquely identifies an entity in the
|
||||||
|
// channelz database.
|
||||||
|
type Identifier struct {
|
||||||
|
typ RefChannelType
|
||||||
|
id int64
|
||||||
|
str string
|
||||||
|
pid *Identifier
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the entity type corresponding to id.
|
||||||
|
func (id *Identifier) Type() RefChannelType {
|
||||||
|
return id.typ
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns the integer identifier corresponding to id.
|
||||||
|
func (id *Identifier) Int() int64 {
|
||||||
|
return id.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the entity corresponding to id.
|
||||||
|
//
|
||||||
|
// This includes some information about the parent as well. Examples:
|
||||||
|
// Top-level channel: [Channel #channel-number]
|
||||||
|
// Nested channel: [Channel #parent-channel-number Channel #channel-number]
|
||||||
|
// Sub channel: [Channel #parent-channel SubChannel #subchannel-number]
|
||||||
|
func (id *Identifier) String() string {
|
||||||
|
return id.str
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if other is the same as id.
|
||||||
|
func (id *Identifier) Equal(other *Identifier) bool {
|
||||||
|
if (id != nil) != (other != nil) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if id == nil && other == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return id.typ == other.typ && id.id == other.id && id.pid == other.pid
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIdentifierForTesting returns a new opaque identifier to be used only for
|
||||||
|
// testing purposes.
|
||||||
|
func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||||
|
return newIdentifer(typ, id, pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||||
|
str := fmt.Sprintf("%s #%d", typ, id)
|
||||||
|
if pid != nil {
|
||||||
|
str = fmt.Sprintf("%s %s", pid, str)
|
||||||
|
}
|
||||||
|
return &Identifier{typ: typ, id: id, str: str, pid: pid}
|
||||||
|
}
|
|
@ -26,77 +26,54 @@ import (
|
||||||
|
|
||||||
var logger = grpclog.Component("channelz")
|
var logger = grpclog.Component("channelz")
|
||||||
|
|
||||||
|
func withParens(id *Identifier) string {
|
||||||
|
return "[" + id.String() + "] "
|
||||||
|
}
|
||||||
|
|
||||||
// Info logs and adds a trace event if channelz is on.
|
// Info logs and adds a trace event if channelz is on.
|
||||||
func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||||
if IsOn() {
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
Desc: fmt.Sprint(args...),
|
||||||
Desc: fmt.Sprint(args...),
|
Severity: CtInfo,
|
||||||
Severity: CtInfo,
|
})
|
||||||
})
|
|
||||||
} else {
|
|
||||||
l.InfoDepth(1, args...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof logs and adds a trace event if channelz is on.
|
// Infof logs and adds a trace event if channelz is on.
|
||||||
func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
if IsOn() {
|
Desc: fmt.Sprintf(format, args...),
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
Severity: CtInfo,
|
||||||
Desc: msg,
|
})
|
||||||
Severity: CtInfo,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
l.InfoDepth(1, msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning logs and adds a trace event if channelz is on.
|
// Warning logs and adds a trace event if channelz is on.
|
||||||
func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||||
if IsOn() {
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
Desc: fmt.Sprint(args...),
|
||||||
Desc: fmt.Sprint(args...),
|
Severity: CtWarning,
|
||||||
Severity: CtWarning,
|
})
|
||||||
})
|
|
||||||
} else {
|
|
||||||
l.WarningDepth(1, args...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warningf logs and adds a trace event if channelz is on.
|
// Warningf logs and adds a trace event if channelz is on.
|
||||||
func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
if IsOn() {
|
Desc: fmt.Sprintf(format, args...),
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
Severity: CtWarning,
|
||||||
Desc: msg,
|
})
|
||||||
Severity: CtWarning,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
l.WarningDepth(1, msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error logs and adds a trace event if channelz is on.
|
// Error logs and adds a trace event if channelz is on.
|
||||||
func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||||
if IsOn() {
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
Desc: fmt.Sprint(args...),
|
||||||
Desc: fmt.Sprint(args...),
|
Severity: CtError,
|
||||||
Severity: CtError,
|
})
|
||||||
})
|
|
||||||
} else {
|
|
||||||
l.ErrorDepth(1, args...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf logs and adds a trace event if channelz is on.
|
// Errorf logs and adds a trace event if channelz is on.
|
||||||
func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||||
if IsOn() {
|
Desc: fmt.Sprintf(format, args...),
|
||||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
Severity: CtError,
|
||||||
Desc: msg,
|
})
|
||||||
Severity: CtError,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
l.ErrorDepth(1, msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -686,12 +686,33 @@ const (
|
||||||
type RefChannelType int
|
type RefChannelType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// RefUnknown indicates an unknown entity type, the zero value for this type.
|
||||||
|
RefUnknown RefChannelType = iota
|
||||||
// RefChannel indicates the referenced entity is a Channel.
|
// RefChannel indicates the referenced entity is a Channel.
|
||||||
RefChannel RefChannelType = iota
|
RefChannel
|
||||||
// RefSubChannel indicates the referenced entity is a SubChannel.
|
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||||
RefSubChannel
|
RefSubChannel
|
||||||
|
// RefServer indicates the referenced entity is a Server.
|
||||||
|
RefServer
|
||||||
|
// RefListenSocket indicates the referenced entity is a ListenSocket.
|
||||||
|
RefListenSocket
|
||||||
|
// RefNormalSocket indicates the referenced entity is a NormalSocket.
|
||||||
|
RefNormalSocket
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var refChannelTypeToString = map[RefChannelType]string{
|
||||||
|
RefUnknown: "Unknown",
|
||||||
|
RefChannel: "Channel",
|
||||||
|
RefSubChannel: "SubChannel",
|
||||||
|
RefServer: "Server",
|
||||||
|
RefListenSocket: "ListenSocket",
|
||||||
|
RefNormalSocket: "NormalSocket",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r RefChannelType) String() string {
|
||||||
|
return refChannelTypeToString[r]
|
||||||
|
}
|
||||||
|
|
||||||
func (c *channelTrace) dumpData() *ChannelTrace {
|
func (c *channelTrace) dumpData() *ChannelTrace {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
||||||
|
|
|
@ -85,3 +85,9 @@ const (
|
||||||
// that supports backend returned by grpclb balancer.
|
// that supports backend returned by grpclb balancer.
|
||||||
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
|
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
|
||||||
|
//
|
||||||
|
// It currently has an experimental suffix which would be removed once
|
||||||
|
// end-to-end testing of the policy is completed.
|
||||||
|
const RLSLoadBalancingPolicyName = "rls_experimental"
|
||||||
|
|
|
@ -22,6 +22,9 @@
|
||||||
package metadata
|
package metadata
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address {
|
||||||
addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
|
addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
|
||||||
return addr
|
return addr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate returns an error if the input md contains invalid keys or values.
|
||||||
|
//
|
||||||
|
// If the header is not a pseudo-header, the following items are checked:
|
||||||
|
// - header names must contain one or more characters from this set [0-9 a-z _ - .].
|
||||||
|
// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
|
||||||
|
// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
|
||||||
|
func Validate(md metadata.MD) error {
|
||||||
|
for k, vals := range md {
|
||||||
|
// pseudo-header will be ignored
|
||||||
|
if k[0] == ':' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// check key, for i that saving a conversion if not using for range
|
||||||
|
for i := 0; i < len(k); i++ {
|
||||||
|
r := k[i]
|
||||||
|
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
||||||
|
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(k, "-bin") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// check value
|
||||||
|
for _, val := range vals {
|
||||||
|
if hasNotPrintable(val) {
|
||||||
|
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
|
||||||
|
func hasNotPrintable(msg string) bool {
|
||||||
|
// for i that saving a conversion if not using for range
|
||||||
|
for i := 0; i < len(msg); i++ {
|
||||||
|
if msg[i] < 0x20 || msg[i] > 0x7E {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,82 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2021 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package pretty defines helper functions to pretty-print structs for logging.
|
||||||
|
package pretty
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/jsonpb"
|
||||||
|
protov1 "github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
protov2 "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
const jsonIndent = " "
|
||||||
|
|
||||||
|
// ToJSON marshals the input into a json string.
|
||||||
|
//
|
||||||
|
// If marshal fails, it falls back to fmt.Sprintf("%+v").
|
||||||
|
func ToJSON(e interface{}) string {
|
||||||
|
switch ee := e.(type) {
|
||||||
|
case protov1.Message:
|
||||||
|
mm := jsonpb.Marshaler{Indent: jsonIndent}
|
||||||
|
ret, err := mm.MarshalToString(ee)
|
||||||
|
if err != nil {
|
||||||
|
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||||
|
// messages are not imported, and this will fail because the message
|
||||||
|
// is not found.
|
||||||
|
return fmt.Sprintf("%+v", ee)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
case protov2.Message:
|
||||||
|
mm := protojson.MarshalOptions{
|
||||||
|
Multiline: true,
|
||||||
|
Indent: jsonIndent,
|
||||||
|
}
|
||||||
|
ret, err := mm.Marshal(ee)
|
||||||
|
if err != nil {
|
||||||
|
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||||
|
// messages are not imported, and this will fail because the message
|
||||||
|
// is not found.
|
||||||
|
return fmt.Sprintf("%+v", ee)
|
||||||
|
}
|
||||||
|
return string(ret)
|
||||||
|
default:
|
||||||
|
ret, err := json.MarshalIndent(ee, "", jsonIndent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("%+v", ee)
|
||||||
|
}
|
||||||
|
return string(ret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatJSON formats the input json bytes with indentation.
|
||||||
|
//
|
||||||
|
// If Indent fails, it returns the unchanged input as string.
|
||||||
|
func FormatJSON(b []byte) string {
|
||||||
|
var out bytes.Buffer
|
||||||
|
err := json.Indent(&out, b, "", jsonIndent)
|
||||||
|
if err != nil {
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
return out.String()
|
||||||
|
}
|
|
@ -137,6 +137,7 @@ type earlyAbortStream struct {
|
||||||
streamID uint32
|
streamID uint32
|
||||||
contentSubtype string
|
contentSubtype string
|
||||||
status *status.Status
|
status *status.Status
|
||||||
|
rst bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
|
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
|
||||||
|
@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
|
||||||
if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
|
if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if eas.rst {
|
||||||
|
if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -132,7 +132,7 @@ type http2Client struct {
|
||||||
kpDormant bool
|
kpDormant bool
|
||||||
|
|
||||||
// Fields below are for channelz metric collection.
|
// Fields below are for channelz metric collection.
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID *channelz.Identifier
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
|
|
||||||
onGoAway func(GoAwayReason)
|
onGoAway func(GoAwayReason)
|
||||||
|
@ -351,8 +351,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||||
}
|
}
|
||||||
t.statsHandler.HandleConn(t.ctx, connBegin)
|
t.statsHandler.HandleConn(t.ctx, connBegin)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||||
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
if t.keepaliveEnabled {
|
if t.keepaliveEnabled {
|
||||||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||||||
|
@ -630,8 +631,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||||
// the wire. However, there are two notable exceptions:
|
// the wire. However, there are two notable exceptions:
|
||||||
//
|
//
|
||||||
// 1. If the stream headers violate the max header list size allowed by the
|
// 1. If the stream headers violate the max header list size allowed by the
|
||||||
// server. In this case there is no reason to retry at all, as it is
|
// server. It's possible this could succeed on another transport, even if
|
||||||
// assumed the RPC would continue to fail on subsequent attempts.
|
// it's unlikely, but do not transparently retry.
|
||||||
// 2. If the credentials errored when requesting their headers. In this case,
|
// 2. If the credentials errored when requesting their headers. In this case,
|
||||||
// it's possible a retry can fix the problem, but indefinitely transparently
|
// it's possible a retry can fix the problem, but indefinitely transparently
|
||||||
// retrying is not appropriate as it is likely the credentials, if they can
|
// retrying is not appropriate as it is likely the credentials, if they can
|
||||||
|
@ -639,8 +640,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||||
type NewStreamError struct {
|
type NewStreamError struct {
|
||||||
Err error
|
Err error
|
||||||
|
|
||||||
DoNotRetry bool
|
AllowTransparentRetry bool
|
||||||
DoNotTransparentRetry bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e NewStreamError) Error() string {
|
func (e NewStreamError) Error() string {
|
||||||
|
@ -649,11 +649,11 @@ func (e NewStreamError) Error() string {
|
||||||
|
|
||||||
// NewStream creates a stream and registers it into the transport as "active"
|
// NewStream creates a stream and registers it into the transport as "active"
|
||||||
// streams. All non-nil errors returned will be *NewStreamError.
|
// streams. All non-nil errors returned will be *NewStreamError.
|
||||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
|
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
|
||||||
ctx = peer.NewContext(ctx, t.getPeer())
|
ctx = peer.NewContext(ctx, t.getPeer())
|
||||||
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true}
|
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
|
||||||
}
|
}
|
||||||
s := t.newStream(ctx, callHdr)
|
s := t.newStream(ctx, callHdr)
|
||||||
cleanup := func(err error) {
|
cleanup := func(err error) {
|
||||||
|
@ -753,13 +753,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||||
return true
|
return true
|
||||||
}, hdr)
|
}, hdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &NewStreamError{Err: err}
|
// Connection closed.
|
||||||
|
return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
|
||||||
}
|
}
|
||||||
if success {
|
if success {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if hdrListSizeErr != nil {
|
if hdrListSizeErr != nil {
|
||||||
return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true}
|
return nil, &NewStreamError{Err: hdrListSizeErr}
|
||||||
}
|
}
|
||||||
firstTry = false
|
firstTry = false
|
||||||
select {
|
select {
|
||||||
|
@ -767,9 +768,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
|
return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
|
||||||
case <-t.goAway:
|
case <-t.goAway:
|
||||||
return nil, &NewStreamError{Err: errStreamDrain}
|
return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
|
||||||
case <-t.ctx.Done():
|
case <-t.ctx.Done():
|
||||||
return nil, &NewStreamError{Err: ErrConnClosing}
|
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if t.statsHandler != nil {
|
if t.statsHandler != nil {
|
||||||
|
@ -898,9 +899,7 @@ func (t *http2Client) Close(err error) {
|
||||||
t.controlBuf.finish()
|
t.controlBuf.finish()
|
||||||
t.cancel()
|
t.cancel()
|
||||||
t.conn.Close()
|
t.conn.Close()
|
||||||
if channelz.IsOn() {
|
channelz.RemoveEntry(t.channelzID)
|
||||||
channelz.RemoveEntry(t.channelzID)
|
|
||||||
}
|
|
||||||
// Append info about previous goaways if there were any, since this may be important
|
// Append info about previous goaways if there were any, since this may be important
|
||||||
// for understanding the root cause for this connection to be closed.
|
// for understanding the root cause for this connection to be closed.
|
||||||
_, goAwayDebugMessage := t.GetGoAwayReason()
|
_, goAwayDebugMessage := t.GetGoAwayReason()
|
||||||
|
|
|
@ -21,7 +21,6 @@ package transport
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
@ -36,6 +35,7 @@ import (
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/net/http2/hpack"
|
"golang.org/x/net/http2/hpack"
|
||||||
"google.golang.org/grpc/internal/grpcutil"
|
"google.golang.org/grpc/internal/grpcutil"
|
||||||
|
"google.golang.org/grpc/internal/syscall"
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
@ -52,10 +52,10 @@ import (
|
||||||
var (
|
var (
|
||||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||||
// the stream's state.
|
// the stream's state.
|
||||||
ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
|
||||||
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
|
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
|
||||||
// than the limit set by peer.
|
// than the limit set by peer.
|
||||||
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
|
ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
|
||||||
)
|
)
|
||||||
|
|
||||||
// serverConnectionCounter counts the number of connections a server has seen
|
// serverConnectionCounter counts the number of connections a server has seen
|
||||||
|
@ -117,7 +117,7 @@ type http2Server struct {
|
||||||
idle time.Time
|
idle time.Time
|
||||||
|
|
||||||
// Fields below are for channelz metric collection.
|
// Fields below are for channelz metric collection.
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID *channelz.Identifier
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
bufferPool *bufferPool
|
bufferPool *bufferPool
|
||||||
|
|
||||||
|
@ -231,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
if kp.Timeout == 0 {
|
if kp.Timeout == 0 {
|
||||||
kp.Timeout = defaultServerKeepaliveTimeout
|
kp.Timeout = defaultServerKeepaliveTimeout
|
||||||
}
|
}
|
||||||
|
if kp.Time != infinity {
|
||||||
|
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
||||||
|
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
kep := config.KeepalivePolicy
|
kep := config.KeepalivePolicy
|
||||||
if kep.MinTime == 0 {
|
if kep.MinTime == 0 {
|
||||||
kep.MinTime = defaultKeepalivePolicyMinTime
|
kep.MinTime = defaultKeepalivePolicyMinTime
|
||||||
|
@ -275,12 +280,12 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
connBegin := &stats.ConnBegin{}
|
connBegin := &stats.ConnBegin{}
|
||||||
t.stats.HandleConn(t.ctx, connBegin)
|
t.stats.HandleConn(t.ctx, connBegin)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||||
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
||||||
|
|
||||||
t.framer.writer.Flush()
|
t.framer.writer.Flush()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -443,6 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
streamID: streamID,
|
streamID: streamID,
|
||||||
contentSubtype: s.contentSubtype,
|
contentSubtype: s.contentSubtype,
|
||||||
status: status.New(codes.Internal, errMsg),
|
status: status.New(codes.Internal, errMsg),
|
||||||
|
rst: !frame.StreamEnded(),
|
||||||
})
|
})
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -516,14 +522,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
}
|
}
|
||||||
if httpMethod != http.MethodPost {
|
if httpMethod != http.MethodPost {
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
|
errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
|
||||||
if logger.V(logLevel) {
|
if logger.V(logLevel) {
|
||||||
logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
|
logger.Infof("transport: %v", errMsg)
|
||||||
}
|
}
|
||||||
t.controlBuf.put(&cleanupStream{
|
t.controlBuf.put(&earlyAbortStream{
|
||||||
streamID: streamID,
|
httpStatus: 405,
|
||||||
rst: true,
|
streamID: streamID,
|
||||||
rstCode: http2.ErrCodeProtocol,
|
contentSubtype: s.contentSubtype,
|
||||||
onWrite: func() {},
|
status: status.New(codes.Internal, errMsg),
|
||||||
|
rst: !frame.StreamEnded(),
|
||||||
})
|
})
|
||||||
s.cancel()
|
s.cancel()
|
||||||
return false
|
return false
|
||||||
|
@ -544,6 +552,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
streamID: s.id,
|
streamID: s.id,
|
||||||
contentSubtype: s.contentSubtype,
|
contentSubtype: s.contentSubtype,
|
||||||
status: stat,
|
status: stat,
|
||||||
|
rst: !frame.StreamEnded(),
|
||||||
})
|
})
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -925,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) streamContextErr(s *Stream) error {
|
||||||
|
select {
|
||||||
|
case <-t.done:
|
||||||
|
return ErrConnClosing
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return ContextErr(s.ctx.Err())
|
||||||
|
}
|
||||||
|
|
||||||
// WriteHeader sends the header metadata md back to the client.
|
// WriteHeader sends the header metadata md back to the client.
|
||||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
if s.updateHeaderSent() {
|
||||||
return ErrIllegalHeaderWrite
|
return ErrIllegalHeaderWrite
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.getState() == streamDone {
|
||||||
|
return t.streamContextErr(s)
|
||||||
|
}
|
||||||
|
|
||||||
s.hdrMu.Lock()
|
s.hdrMu.Lock()
|
||||||
if md.Len() > 0 {
|
if md.Len() > 0 {
|
||||||
if s.header.Len() > 0 {
|
if s.header.Len() > 0 {
|
||||||
|
@ -940,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
}
|
}
|
||||||
if err := t.writeHeaderLocked(s); err != nil {
|
if err := t.writeHeaderLocked(s); err != nil {
|
||||||
s.hdrMu.Unlock()
|
s.hdrMu.Unlock()
|
||||||
return err
|
return status.Convert(err).Err()
|
||||||
}
|
}
|
||||||
s.hdrMu.Unlock()
|
s.hdrMu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
|
@ -1056,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||||
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||||
if !s.isHeaderSent() { // Headers haven't been written yet.
|
if !s.isHeaderSent() { // Headers haven't been written yet.
|
||||||
if err := t.WriteHeader(s, nil); err != nil {
|
if err := t.WriteHeader(s, nil); err != nil {
|
||||||
if _, ok := err.(ConnectionError); ok {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
|
|
||||||
return status.Errorf(codes.Internal, "transport: %v", err)
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Writing headers checks for this condition.
|
// Writing headers checks for this condition.
|
||||||
if s.getState() == streamDone {
|
if s.getState() == streamDone {
|
||||||
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
|
return t.streamContextErr(s)
|
||||||
s.cancel()
|
|
||||||
select {
|
|
||||||
case <-t.done:
|
|
||||||
return ErrConnClosing
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return ContextErr(s.ctx.Err())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
df := &dataFrame{
|
df := &dataFrame{
|
||||||
|
@ -1082,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||||
onEachWrite: t.setResetPingStrikes,
|
onEachWrite: t.setResetPingStrikes,
|
||||||
}
|
}
|
||||||
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
||||||
select {
|
return t.streamContextErr(s)
|
||||||
case <-t.done:
|
|
||||||
return ErrConnClosing
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return ContextErr(s.ctx.Err())
|
|
||||||
}
|
}
|
||||||
return t.controlBuf.put(df)
|
return t.controlBuf.put(df)
|
||||||
}
|
}
|
||||||
|
@ -1210,9 +1217,7 @@ func (t *http2Server) Close() {
|
||||||
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
|
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
|
||||||
logger.Infof("transport: error closing conn during Close: %v", err)
|
logger.Infof("transport: error closing conn during Close: %v", err)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
channelz.RemoveEntry(t.channelzID)
|
||||||
channelz.RemoveEntry(t.channelzID)
|
|
||||||
}
|
|
||||||
// Cancel all active streams.
|
// Cancel all active streams.
|
||||||
for _, s := range streams {
|
for _, s := range streams {
|
||||||
s.cancel()
|
s.cancel()
|
||||||
|
@ -1225,10 +1230,6 @@ func (t *http2Server) Close() {
|
||||||
|
|
||||||
// deleteStream deletes the stream s from transport's active streams.
|
// deleteStream deletes the stream s from transport's active streams.
|
||||||
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||||
// In case stream sending and receiving are invoked in separate
|
|
||||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
|
||||||
// called to interrupt the potential blocking on other goroutines.
|
|
||||||
s.cancel()
|
|
||||||
|
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
if _, ok := t.activeStreams[s.id]; ok {
|
if _, ok := t.activeStreams[s.id]; ok {
|
||||||
|
@ -1250,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||||
|
|
||||||
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
|
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
|
||||||
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
||||||
|
// In case stream sending and receiving are invoked in separate
|
||||||
|
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||||
|
// called to interrupt the potential blocking on other goroutines.
|
||||||
|
s.cancel()
|
||||||
|
|
||||||
oldState := s.swapState(streamDone)
|
oldState := s.swapState(streamDone)
|
||||||
if oldState == streamDone {
|
if oldState == streamDone {
|
||||||
// If the stream was already done, return.
|
// If the stream was already done, return.
|
||||||
|
@ -1269,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
|
||||||
|
|
||||||
// closeStream clears the footprint of a stream when the stream is not needed any more.
|
// closeStream clears the footprint of a stream when the stream is not needed any more.
|
||||||
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
|
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
|
||||||
|
// In case stream sending and receiving are invoked in separate
|
||||||
|
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||||
|
// called to interrupt the potential blocking on other goroutines.
|
||||||
|
s.cancel()
|
||||||
|
|
||||||
s.swapState(streamDone)
|
s.swapState(streamDone)
|
||||||
t.deleteStream(s, eosReceived)
|
t.deleteStream(s, eosReceived)
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
|
@ -529,7 +530,7 @@ type ServerConfig struct {
|
||||||
InitialConnWindowSize int32
|
InitialConnWindowSize int32
|
||||||
WriteBufferSize int
|
WriteBufferSize int
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
ChannelzParentID int64
|
ChannelzParentID *channelz.Identifier
|
||||||
MaxHeaderListSize *uint32
|
MaxHeaderListSize *uint32
|
||||||
HeaderTableSize *uint32
|
HeaderTableSize *uint32
|
||||||
}
|
}
|
||||||
|
@ -563,7 +564,7 @@ type ConnectOptions struct {
|
||||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||||
ChannelzParentID int64
|
ChannelzParentID *channelz.Identifier
|
||||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||||
MaxHeaderListSize *uint32
|
MaxHeaderListSize *uint32
|
||||||
// UseProxy specifies if a proxy should be used.
|
// UseProxy specifies if a proxy should be used.
|
||||||
|
|
|
@ -188,7 +188,9 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
|
||||||
// map, and there's no guarantee that the MD attached to the context is
|
// map, and there's no guarantee that the MD attached to the context is
|
||||||
// created using our helper functions.
|
// created using our helper functions.
|
||||||
key := strings.ToLower(k)
|
key := strings.ToLower(k)
|
||||||
out[key] = v
|
s := make([]string, len(v))
|
||||||
|
copy(s, v)
|
||||||
|
out[key] = s
|
||||||
}
|
}
|
||||||
return out, true
|
return out, true
|
||||||
}
|
}
|
||||||
|
@ -226,7 +228,9 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) {
|
||||||
// map, and there's no guarantee that the MD attached to the context is
|
// map, and there's no guarantee that the MD attached to the context is
|
||||||
// created using our helper functions.
|
// created using our helper functions.
|
||||||
key := strings.ToLower(k)
|
key := strings.ToLower(k)
|
||||||
out[key] = v
|
s := make([]string, len(v))
|
||||||
|
copy(s, v)
|
||||||
|
out[key] = s
|
||||||
}
|
}
|
||||||
for _, added := range raw.added {
|
for _, added := range raw.added {
|
||||||
if len(added)%2 == 1 {
|
if len(added)%2 == 1 {
|
||||||
|
|
|
@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||||
}
|
}
|
||||||
if _, ok := status.FromError(err); ok {
|
if _, ok := status.FromError(err); ok {
|
||||||
// Status error: end the RPC unconditionally with this status.
|
// Status error: end the RPC unconditionally with this status.
|
||||||
return nil, nil, err
|
return nil, nil, dropError{error: err}
|
||||||
}
|
}
|
||||||
// For all other errors, wait for ready RPCs should block and other
|
// For all other errors, wait for ready RPCs should block and other
|
||||||
// RPCs should fail with unavailable.
|
// RPCs should fail with unavailable.
|
||||||
|
@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() {
|
||||||
pw.done = true
|
pw.done = true
|
||||||
close(pw.blockingCh)
|
close(pw.blockingCh)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dropError is a wrapper error that indicates the LB policy wishes to drop the
|
||||||
|
// RPC and not retry it.
|
||||||
|
type dropError struct {
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
|
@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type pickfirstBalancer struct {
|
type pickfirstBalancer struct {
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
sc balancer.SubConn
|
subConn balancer.SubConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||||
switch b.state {
|
|
||||||
case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
|
|
||||||
// Set a failing picker if we don't have a good picker.
|
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
|
||||||
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if logger.V(2) {
|
if logger.V(2) {
|
||||||
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
||||||
}
|
}
|
||||||
|
if b.subConn == nil {
|
||||||
|
b.state = connectivity.TransientFailure
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.state != connectivity.TransientFailure {
|
||||||
|
// The picker will not change since the balancer does not currently
|
||||||
|
// report an error.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.TransientFailure,
|
||||||
|
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error {
|
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||||
if len(cs.ResolverState.Addresses) == 0 {
|
if len(state.ResolverState.Addresses) == 0 {
|
||||||
|
// The resolver reported an empty address list. Treat it like an error by
|
||||||
|
// calling b.ResolverError.
|
||||||
|
if b.subConn != nil {
|
||||||
|
// Remove the old subConn. All addresses were removed, so it is no longer
|
||||||
|
// valid.
|
||||||
|
b.cc.RemoveSubConn(b.subConn)
|
||||||
|
b.subConn = nil
|
||||||
|
}
|
||||||
b.ResolverError(errors.New("produced zero addresses"))
|
b.ResolverError(errors.New("produced zero addresses"))
|
||||||
return balancer.ErrBadResolverState
|
return balancer.ErrBadResolverState
|
||||||
}
|
}
|
||||||
if b.sc == nil {
|
|
||||||
var err error
|
if b.subConn != nil {
|
||||||
b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses)
|
||||||
if err != nil {
|
return nil
|
||||||
if logger.V(2) {
|
|
||||||
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
|
||||||
}
|
|
||||||
b.state = connectivity.TransientFailure
|
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
|
||||||
Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
|
|
||||||
})
|
|
||||||
return balancer.ErrBadResolverState
|
|
||||||
}
|
|
||||||
b.state = connectivity.Idle
|
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
|
|
||||||
b.sc.Connect()
|
|
||||||
} else {
|
|
||||||
b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses)
|
|
||||||
b.sc.Connect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if logger.V(2) {
|
||||||
|
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||||
|
}
|
||||||
|
b.state = connectivity.TransientFailure
|
||||||
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.TransientFailure,
|
||||||
|
Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
|
||||||
|
})
|
||||||
|
return balancer.ErrBadResolverState
|
||||||
|
}
|
||||||
|
b.subConn = subConn
|
||||||
|
b.state = connectivity.Idle
|
||||||
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: connectivity.Idle,
|
||||||
|
Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}},
|
||||||
|
})
|
||||||
|
b.subConn.Connect()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
|
||||||
if logger.V(2) {
|
if logger.V(2) {
|
||||||
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s)
|
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state)
|
||||||
}
|
}
|
||||||
if b.sc != sc {
|
if b.subConn != subConn {
|
||||||
if logger.V(2) {
|
if logger.V(2) {
|
||||||
logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
|
logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
b.state = s.ConnectivityState
|
b.state = state.ConnectivityState
|
||||||
if s.ConnectivityState == connectivity.Shutdown {
|
if state.ConnectivityState == connectivity.Shutdown {
|
||||||
b.sc = nil
|
b.subConn = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch s.ConnectivityState {
|
switch state.ConnectivityState {
|
||||||
case connectivity.Ready:
|
case connectivity.Ready:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: state.ConnectivityState,
|
||||||
|
Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
|
||||||
|
})
|
||||||
case connectivity.Connecting:
|
case connectivity.Connecting:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: state.ConnectivityState,
|
||||||
|
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||||
|
})
|
||||||
case connectivity.Idle:
|
case connectivity.Idle:
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}})
|
b.cc.UpdateState(balancer.State{
|
||||||
|
ConnectivityState: state.ConnectivityState,
|
||||||
|
Picker: &idlePicker{subConn: subConn},
|
||||||
|
})
|
||||||
case connectivity.TransientFailure:
|
case connectivity.TransientFailure:
|
||||||
b.cc.UpdateState(balancer.State{
|
b.cc.UpdateState(balancer.State{
|
||||||
ConnectivityState: s.ConnectivityState,
|
ConnectivityState: state.ConnectivityState,
|
||||||
Picker: &picker{err: s.ConnectionError},
|
Picker: &picker{err: state.ConnectionError},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) ExitIdle() {
|
func (b *pickfirstBalancer) ExitIdle() {
|
||||||
if b.sc != nil && b.state == connectivity.Idle {
|
if b.subConn != nil && b.state == connectivity.Idle {
|
||||||
b.sc.Connect()
|
b.subConn.Connect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,18 +163,18 @@ type picker struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
return p.result, p.err
|
return p.result, p.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
|
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
|
||||||
// CONNECTING when Pick is called.
|
// CONNECTING when Pick is called.
|
||||||
type idlePicker struct {
|
type idlePicker struct {
|
||||||
sc balancer.SubConn
|
subConn balancer.SubConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
i.sc.Connect()
|
i.subConn.Connect()
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/attributes"
|
"google.golang.org/grpc/attributes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/internal/pretty"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -139,13 +140,18 @@ type Address struct {
|
||||||
|
|
||||||
// Equal returns whether a and o are identical. Metadata is compared directly,
|
// Equal returns whether a and o are identical. Metadata is compared directly,
|
||||||
// not with any recursive introspection.
|
// not with any recursive introspection.
|
||||||
func (a *Address) Equal(o Address) bool {
|
func (a Address) Equal(o Address) bool {
|
||||||
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
||||||
a.Attributes.Equal(o.Attributes) &&
|
a.Attributes.Equal(o.Attributes) &&
|
||||||
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
|
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
|
||||||
a.Type == o.Type && a.Metadata == o.Metadata
|
a.Type == o.Type && a.Metadata == o.Metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String returns JSON formatted string representation of the address.
|
||||||
|
func (a Address) String() string {
|
||||||
|
return pretty.ToJSON(a)
|
||||||
|
}
|
||||||
|
|
||||||
// BuildOptions includes additional information for the builder to create
|
// BuildOptions includes additional information for the builder to create
|
||||||
// the resolver.
|
// the resolver.
|
||||||
type BuildOptions struct {
|
type BuildOptions struct {
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -27,6 +26,7 @@ import (
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
|
"google.golang.org/grpc/internal/pretty"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
)
|
)
|
||||||
|
@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
||||||
if ccr.done.HasFired() {
|
if ccr.done.HasFired() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
|
ccr.addChannelzTraceEvent(s)
|
||||||
if channelz.IsOn() {
|
|
||||||
ccr.addChannelzTraceEvent(s)
|
|
||||||
}
|
|
||||||
ccr.curState = s
|
ccr.curState = s
|
||||||
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
||||||
return balancer.ErrBadResolverState
|
return balancer.ErrBadResolverState
|
||||||
|
@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||||
if ccr.done.HasFired() {
|
if ccr.done.HasFired() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
|
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
||||||
if channelz.IsOn() {
|
|
||||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
|
||||||
}
|
|
||||||
ccr.curState.Addresses = addrs
|
ccr.curState.Addresses = addrs
|
||||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||||
}
|
}
|
||||||
|
@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||||
if ccr.done.HasFired() {
|
if ccr.done.HasFired() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
|
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc)
|
||||||
if ccr.cc.dopts.disableServiceConfig {
|
if ccr.cc.dopts.disableServiceConfig {
|
||||||
channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
|
channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
|
||||||
return
|
return
|
||||||
|
@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||||
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
||||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
|
||||||
}
|
|
||||||
ccr.curState.ServiceConfig = scpr
|
ccr.curState.ServiceConfig = scpr
|
||||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||||
}
|
}
|
||||||
|
@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||||
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
||||||
updates = append(updates, "resolver returned new addresses")
|
updates = append(updates, "resolver returned new addresses")
|
||||||
}
|
}
|
||||||
channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
|
channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
||||||
Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
|
|
||||||
Severity: channelz.CtInfo,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,7 +134,7 @@ type Server struct {
|
||||||
channelzRemoveOnce sync.Once
|
channelzRemoveOnce sync.Once
|
||||||
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
||||||
|
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID *channelz.Identifier
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
|
|
||||||
serverWorkerChannels []chan *serverWorkerData
|
serverWorkerChannels []chan *serverWorkerData
|
||||||
|
@ -584,9 +584,8 @@ func NewServer(opt ...ServerOption) *Server {
|
||||||
s.initServerWorkers()
|
s.initServerWorkers()
|
||||||
}
|
}
|
||||||
|
|
||||||
if channelz.IsOn() {
|
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
|
||||||
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
|
channelz.Info(logger, s.channelzID, "Server created")
|
||||||
}
|
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -712,7 +711,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped")
|
||||||
|
|
||||||
type listenSocket struct {
|
type listenSocket struct {
|
||||||
net.Listener
|
net.Listener
|
||||||
channelzID int64
|
channelzID *channelz.Identifier
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||||
|
@ -724,9 +723,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||||
|
|
||||||
func (l *listenSocket) Close() error {
|
func (l *listenSocket) Close() error {
|
||||||
err := l.Listener.Close()
|
err := l.Listener.Close()
|
||||||
if channelz.IsOn() {
|
channelz.RemoveEntry(l.channelzID)
|
||||||
channelz.RemoveEntry(l.channelzID)
|
channelz.Info(logger, l.channelzID, "ListenSocket deleted")
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -759,11 +757,6 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||||
ls := &listenSocket{Listener: lis}
|
ls := &listenSocket{Listener: lis}
|
||||||
s.lis[ls] = true
|
s.lis[ls] = true
|
||||||
|
|
||||||
if channelz.IsOn() {
|
|
||||||
ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
|
|
||||||
}
|
|
||||||
s.mu.Unlock()
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if s.lis != nil && s.lis[ls] {
|
if s.lis != nil && s.lis[ls] {
|
||||||
|
@ -773,8 +766,16 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var tempDelay time.Duration // how long to sleep on accept failure
|
var err error
|
||||||
|
ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
|
||||||
|
if err != nil {
|
||||||
|
s.mu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
channelz.Info(logger, ls.channelzID, "ListenSocket created")
|
||||||
|
|
||||||
|
var tempDelay time.Duration // how long to sleep on accept failure
|
||||||
for {
|
for {
|
||||||
rawConn, err := lis.Accept()
|
rawConn, err := lis.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1709,11 +1710,7 @@ func (s *Server) Stop() {
|
||||||
s.done.Fire()
|
s.done.Fire()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
s.channelzRemoveOnce.Do(func() {
|
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
|
||||||
if channelz.IsOn() {
|
|
||||||
channelz.RemoveEntry(s.channelzID)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
listeners := s.lis
|
listeners := s.lis
|
||||||
|
@ -1751,11 +1748,7 @@ func (s *Server) GracefulStop() {
|
||||||
s.quit.Fire()
|
s.quit.Fire()
|
||||||
defer s.done.Fire()
|
defer s.done.Fire()
|
||||||
|
|
||||||
s.channelzRemoveOnce.Do(func() {
|
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
|
||||||
if channelz.IsOn() {
|
|
||||||
channelz.RemoveEntry(s.channelzID)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if s.conns == nil {
|
if s.conns == nil {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
@ -1808,12 +1801,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
|
||||||
return codec
|
return codec
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHeader sets the header metadata.
|
// SetHeader sets the header metadata to be sent from the server to the client.
|
||||||
// When called multiple times, all the provided metadata will be merged.
|
// The context provided must be the context passed to the server's handler.
|
||||||
// All the metadata will be sent out when one of the following happens:
|
//
|
||||||
// - grpc.SendHeader() is called;
|
// Streaming RPCs should prefer the SetHeader method of the ServerStream.
|
||||||
// - The first response is sent out;
|
//
|
||||||
// - An RPC status is sent out (error or success).
|
// When called multiple times, all the provided metadata will be merged. All
|
||||||
|
// the metadata will be sent out when one of the following happens:
|
||||||
|
//
|
||||||
|
// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
|
||||||
|
// - The first response message is sent. For unary handlers, this occurs when
|
||||||
|
// the handler returns; for streaming handlers, this can happen when stream's
|
||||||
|
// SendMsg method is called.
|
||||||
|
// - An RPC status is sent out (error or success). This occurs when the handler
|
||||||
|
// returns.
|
||||||
|
//
|
||||||
|
// SetHeader will fail if called after any of the events above.
|
||||||
|
//
|
||||||
|
// The error returned is compatible with the status package. However, the
|
||||||
|
// status code will often not match the RPC status as seen by the client
|
||||||
|
// application, and therefore, should not be relied upon for this purpose.
|
||||||
func SetHeader(ctx context.Context, md metadata.MD) error {
|
func SetHeader(ctx context.Context, md metadata.MD) error {
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
@ -1825,8 +1832,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
|
||||||
return stream.SetHeader(md)
|
return stream.SetHeader(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendHeader sends header metadata. It may be called at most once.
|
// SendHeader sends header metadata. It may be called at most once, and may not
|
||||||
// The provided md and headers set by SetHeader() will be sent.
|
// be called after any event that causes headers to be sent (see SetHeader for
|
||||||
|
// a complete list). The provided md and headers set by SetHeader() will be
|
||||||
|
// sent.
|
||||||
|
//
|
||||||
|
// The error returned is compatible with the status package. However, the
|
||||||
|
// status code will often not match the RPC status as seen by the client
|
||||||
|
// application, and therefore, should not be relied upon for this purpose.
|
||||||
func SendHeader(ctx context.Context, md metadata.MD) error {
|
func SendHeader(ctx context.Context, md metadata.MD) error {
|
||||||
stream := ServerTransportStreamFromContext(ctx)
|
stream := ServerTransportStreamFromContext(ctx)
|
||||||
if stream == nil {
|
if stream == nil {
|
||||||
|
@ -1840,6 +1853,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
|
||||||
|
|
||||||
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
|
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
|
||||||
// When called more than once, all the provided metadata will be merged.
|
// When called more than once, all the provided metadata will be merged.
|
||||||
|
//
|
||||||
|
// The error returned is compatible with the status package. However, the
|
||||||
|
// status code will often not match the RPC status as seen by the client
|
||||||
|
// application, and therefore, should not be relied upon for this purpose.
|
||||||
func SetTrailer(ctx context.Context, md metadata.MD) error {
|
func SetTrailer(ctx context.Context, md metadata.MD) error {
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -381,6 +381,9 @@ func init() {
|
||||||
//
|
//
|
||||||
// If any of them is NOT *ServiceConfig, return false.
|
// If any of them is NOT *ServiceConfig, return false.
|
||||||
func equalServiceConfig(a, b serviceconfig.Config) bool {
|
func equalServiceConfig(a, b serviceconfig.Config) bool {
|
||||||
|
if a == nil && b == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
aa, ok := a.(*ServiceConfig)
|
aa, ok := a.(*ServiceConfig)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
"google.golang.org/grpc/internal/grpcutil"
|
"google.golang.org/grpc/internal/grpcutil"
|
||||||
|
imetadata "google.golang.org/grpc/internal/metadata"
|
||||||
iresolver "google.golang.org/grpc/internal/resolver"
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/internal/serviceconfig"
|
"google.golang.org/grpc/internal/serviceconfig"
|
||||||
"google.golang.org/grpc/internal/transport"
|
"google.golang.org/grpc/internal/transport"
|
||||||
|
@ -166,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||||
|
if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||||
|
if err := imetadata.Validate(md); err != nil {
|
||||||
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
cc.incrCallsStarted()
|
cc.incrCallsStarted()
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -297,14 +303,28 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
||||||
}
|
}
|
||||||
cs.binlog = binarylog.GetMethodLogger(method)
|
cs.binlog = binarylog.GetMethodLogger(method)
|
||||||
|
|
||||||
if err := cs.newAttemptLocked(false /* isTransparent */); err != nil {
|
cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */)
|
||||||
|
if err != nil {
|
||||||
cs.finish(err)
|
cs.finish(err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
op := func(a *csAttempt) error { return a.newStream() }
|
// Pick the transport to use and create a new stream on the transport.
|
||||||
|
// Assign cs.attempt upon success.
|
||||||
|
op := func(a *csAttempt) error {
|
||||||
|
if err := a.getTransport(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := a.newStream(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Because this operation is always called either here (while creating
|
||||||
|
// the clientStream) or by the retry code while locked when replaying
|
||||||
|
// the operation, it is safe to access cs.attempt directly.
|
||||||
|
cs.attempt = a
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
|
if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
|
||||||
cs.finish(err)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,9 +363,15 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
||||||
return cs, nil
|
return cs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newAttemptLocked creates a new attempt with a transport.
|
// newAttemptLocked creates a new csAttempt without a transport or stream.
|
||||||
// If it succeeds, then it replaces clientStream's attempt with this new attempt.
|
func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
|
||||||
func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
|
if err := cs.ctx.Err(); err != nil {
|
||||||
|
return nil, toRPCErr(err)
|
||||||
|
}
|
||||||
|
if err := cs.cc.ctx.Err(); err != nil {
|
||||||
|
return nil, ErrClientConnClosing
|
||||||
|
}
|
||||||
|
|
||||||
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
|
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
|
||||||
method := cs.callHdr.Method
|
method := cs.callHdr.Method
|
||||||
sh := cs.cc.dopts.copts.StatsHandler
|
sh := cs.cc.dopts.copts.StatsHandler
|
||||||
|
@ -379,27 +405,6 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
|
||||||
ctx = trace.NewContext(ctx, trInfo.tr)
|
ctx = trace.NewContext(ctx, trInfo.tr)
|
||||||
}
|
}
|
||||||
|
|
||||||
newAttempt := &csAttempt{
|
|
||||||
ctx: ctx,
|
|
||||||
beginTime: beginTime,
|
|
||||||
cs: cs,
|
|
||||||
dc: cs.cc.dopts.dc,
|
|
||||||
statsHandler: sh,
|
|
||||||
trInfo: trInfo,
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if retErr != nil {
|
|
||||||
// This attempt is not set in the clientStream, so it's finish won't
|
|
||||||
// be called. Call it here for stats and trace in case they are not
|
|
||||||
// nil.
|
|
||||||
newAttempt.finish(retErr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := ctx.Err(); err != nil {
|
|
||||||
return toRPCErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cs.cc.parsedTarget.Scheme == "xds" {
|
if cs.cc.parsedTarget.Scheme == "xds" {
|
||||||
// Add extra metadata (metadata that will be added by transport) to context
|
// Add extra metadata (metadata that will be added by transport) to context
|
||||||
// so the balancer can see them.
|
// so the balancer can see them.
|
||||||
|
@ -407,16 +412,32 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
|
||||||
"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
|
"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
|
||||||
|
return &csAttempt{
|
||||||
|
ctx: ctx,
|
||||||
|
beginTime: beginTime,
|
||||||
|
cs: cs,
|
||||||
|
dc: cs.cc.dopts.dc,
|
||||||
|
statsHandler: sh,
|
||||||
|
trInfo: trInfo,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) getTransport() error {
|
||||||
|
cs := a.cs
|
||||||
|
|
||||||
|
var err error
|
||||||
|
a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if de, ok := err.(dropError); ok {
|
||||||
|
err = de.error
|
||||||
|
a.drop = true
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if trInfo != nil {
|
if a.trInfo != nil {
|
||||||
trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
|
a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr())
|
||||||
}
|
}
|
||||||
newAttempt.t = t
|
|
||||||
newAttempt.done = done
|
|
||||||
cs.attempt = newAttempt
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -425,12 +446,21 @@ func (a *csAttempt) newStream() error {
|
||||||
cs.callHdr.PreviousAttempts = cs.numRetries
|
cs.callHdr.PreviousAttempts = cs.numRetries
|
||||||
s, err := a.t.NewStream(a.ctx, cs.callHdr)
|
s, err := a.t.NewStream(a.ctx, cs.callHdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Return without converting to an RPC error so retry code can
|
nse, ok := err.(*transport.NewStreamError)
|
||||||
// inspect.
|
if !ok {
|
||||||
return err
|
// Unexpected.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if nse.AllowTransparentRetry {
|
||||||
|
a.allowTransparentRetry = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap and convert error.
|
||||||
|
return toRPCErr(nse.Err)
|
||||||
}
|
}
|
||||||
cs.attempt.s = s
|
a.s = s
|
||||||
cs.attempt.p = &parser{r: s}
|
a.p = &parser{r: s}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -456,7 +486,7 @@ type clientStream struct {
|
||||||
|
|
||||||
retryThrottler *retryThrottler // The throttler active when the RPC began.
|
retryThrottler *retryThrottler // The throttler active when the RPC began.
|
||||||
|
|
||||||
binlog *binarylog.MethodLogger // Binary logger, can be nil.
|
binlog binarylog.MethodLogger // Binary logger, can be nil.
|
||||||
// serverHeaderBinlogged is a boolean for whether server header has been
|
// serverHeaderBinlogged is a boolean for whether server header has been
|
||||||
// logged. Server header will be logged when the first time one of those
|
// logged. Server header will be logged when the first time one of those
|
||||||
// happens: stream.Header(), stream.Recv().
|
// happens: stream.Header(), stream.Recv().
|
||||||
|
@ -508,6 +538,11 @@ type csAttempt struct {
|
||||||
|
|
||||||
statsHandler stats.Handler
|
statsHandler stats.Handler
|
||||||
beginTime time.Time
|
beginTime time.Time
|
||||||
|
|
||||||
|
// set for newStream errors that may be transparently retried
|
||||||
|
allowTransparentRetry bool
|
||||||
|
// set for pick errors that are returned as a status
|
||||||
|
drop bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) commitAttemptLocked() {
|
func (cs *clientStream) commitAttemptLocked() {
|
||||||
|
@ -527,41 +562,21 @@ func (cs *clientStream) commitAttempt() {
|
||||||
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
|
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
|
||||||
// the error that should be returned by the operation. If the RPC should be
|
// the error that should be returned by the operation. If the RPC should be
|
||||||
// retried, the bool indicates whether it is being retried transparently.
|
// retried, the bool indicates whether it is being retried transparently.
|
||||||
func (cs *clientStream) shouldRetry(err error) (bool, error) {
|
func (a *csAttempt) shouldRetry(err error) (bool, error) {
|
||||||
if cs.attempt.s == nil {
|
cs := a.cs
|
||||||
// Error from NewClientStream.
|
|
||||||
nse, ok := err.(*transport.NewStreamError)
|
|
||||||
if !ok {
|
|
||||||
// Unexpected, but assume no I/O was performed and the RPC is not
|
|
||||||
// fatal, so retry indefinitely.
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap and convert error.
|
if cs.finished || cs.committed || a.drop {
|
||||||
err = toRPCErr(nse.Err)
|
// RPC is finished or committed or was dropped by the picker; cannot retry.
|
||||||
|
|
||||||
// Never retry DoNotRetry errors, which indicate the RPC should not be
|
|
||||||
// retried due to max header list size violation, etc.
|
|
||||||
if nse.DoNotRetry {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// In the event of a non-IO operation error from NewStream, we never
|
|
||||||
// attempted to write anything to the wire, so we can retry
|
|
||||||
// indefinitely.
|
|
||||||
if !nse.DoNotTransparentRetry {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cs.finished || cs.committed {
|
|
||||||
// RPC is finished or committed; cannot retry.
|
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
if a.s == nil && a.allowTransparentRetry {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
// Wait for the trailers.
|
// Wait for the trailers.
|
||||||
unprocessed := false
|
unprocessed := false
|
||||||
if cs.attempt.s != nil {
|
if a.s != nil {
|
||||||
<-cs.attempt.s.Done()
|
<-a.s.Done()
|
||||||
unprocessed = cs.attempt.s.Unprocessed()
|
unprocessed = a.s.Unprocessed()
|
||||||
}
|
}
|
||||||
if cs.firstAttempt && unprocessed {
|
if cs.firstAttempt && unprocessed {
|
||||||
// First attempt, stream unprocessed: transparently retry.
|
// First attempt, stream unprocessed: transparently retry.
|
||||||
|
@ -573,14 +588,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
|
||||||
|
|
||||||
pushback := 0
|
pushback := 0
|
||||||
hasPushback := false
|
hasPushback := false
|
||||||
if cs.attempt.s != nil {
|
if a.s != nil {
|
||||||
if !cs.attempt.s.TrailersOnly() {
|
if !a.s.TrailersOnly() {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(retry): Move down if the spec changes to not check server pushback
|
// TODO(retry): Move down if the spec changes to not check server pushback
|
||||||
// before considering this a failure for throttling.
|
// before considering this a failure for throttling.
|
||||||
sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
|
sps := a.s.Trailer()["grpc-retry-pushback-ms"]
|
||||||
if len(sps) == 1 {
|
if len(sps) == 1 {
|
||||||
var e error
|
var e error
|
||||||
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
|
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
|
||||||
|
@ -597,10 +612,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var code codes.Code
|
var code codes.Code
|
||||||
if cs.attempt.s != nil {
|
if a.s != nil {
|
||||||
code = cs.attempt.s.Status().Code()
|
code = a.s.Status().Code()
|
||||||
} else {
|
} else {
|
||||||
code = status.Convert(err).Code()
|
code = status.Code(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rp := cs.methodConfig.RetryPolicy
|
rp := cs.methodConfig.RetryPolicy
|
||||||
|
@ -645,19 +660,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns nil if a retry was performed and succeeded; error otherwise.
|
// Returns nil if a retry was performed and succeeded; error otherwise.
|
||||||
func (cs *clientStream) retryLocked(lastErr error) error {
|
func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
|
||||||
for {
|
for {
|
||||||
cs.attempt.finish(toRPCErr(lastErr))
|
attempt.finish(toRPCErr(lastErr))
|
||||||
isTransparent, err := cs.shouldRetry(lastErr)
|
isTransparent, err := attempt.shouldRetry(lastErr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cs.commitAttemptLocked()
|
cs.commitAttemptLocked()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cs.firstAttempt = false
|
cs.firstAttempt = false
|
||||||
if err := cs.newAttemptLocked(isTransparent); err != nil {
|
attempt, err = cs.newAttemptLocked(isTransparent)
|
||||||
|
if err != nil {
|
||||||
|
// Only returns error if the clientconn is closed or the context of
|
||||||
|
// the stream is canceled.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if lastErr = cs.replayBufferLocked(); lastErr == nil {
|
// Note that the first op in the replay buffer always sets cs.attempt
|
||||||
|
// if it is able to pick a transport and create a stream.
|
||||||
|
if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -667,7 +687,10 @@ func (cs *clientStream) Context() context.Context {
|
||||||
cs.commitAttempt()
|
cs.commitAttempt()
|
||||||
// No need to lock before using attempt, since we know it is committed and
|
// No need to lock before using attempt, since we know it is committed and
|
||||||
// cannot change.
|
// cannot change.
|
||||||
return cs.attempt.s.Context()
|
if cs.attempt.s != nil {
|
||||||
|
return cs.attempt.s.Context()
|
||||||
|
}
|
||||||
|
return cs.ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
|
func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
|
||||||
|
@ -697,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
|
||||||
cs.mu.Unlock()
|
cs.mu.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := cs.retryLocked(err); err != nil {
|
if err := cs.retryLocked(a, err); err != nil {
|
||||||
cs.mu.Unlock()
|
cs.mu.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -728,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
|
||||||
cs.binlog.Log(logEntry)
|
cs.binlog.Log(logEntry)
|
||||||
cs.serverHeaderBinlogged = true
|
cs.serverHeaderBinlogged = true
|
||||||
}
|
}
|
||||||
return m, err
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) Trailer() metadata.MD {
|
func (cs *clientStream) Trailer() metadata.MD {
|
||||||
|
@ -746,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD {
|
||||||
return cs.attempt.s.Trailer()
|
return cs.attempt.s.Trailer()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) replayBufferLocked() error {
|
func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
|
||||||
a := cs.attempt
|
|
||||||
for _, f := range cs.buffer {
|
for _, f := range cs.buffer {
|
||||||
if err := f(a); err != nil {
|
if err := f(attempt); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -797,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||||
if len(payload) > *cs.callInfo.maxSendMessageSize {
|
if len(payload) > *cs.callInfo.maxSendMessageSize {
|
||||||
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
|
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
|
||||||
}
|
}
|
||||||
msgBytes := data // Store the pointer before setting to nil. For binary logging.
|
|
||||||
op := func(a *csAttempt) error {
|
op := func(a *csAttempt) error {
|
||||||
err := a.sendMsg(m, hdr, payload, data)
|
return a.sendMsg(m, hdr, payload, data)
|
||||||
// nil out the message and uncomp when replaying; they are only needed for
|
|
||||||
// stats which is disabled for subsequent attempts.
|
|
||||||
m, data = nil, nil
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
|
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
|
||||||
if cs.binlog != nil && err == nil {
|
if cs.binlog != nil && err == nil {
|
||||||
cs.binlog.Log(&binarylog.ClientMessage{
|
cs.binlog.Log(&binarylog.ClientMessage{
|
||||||
OnClientSide: true,
|
OnClientSide: true,
|
||||||
Message: msgBytes,
|
Message: data,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) RecvMsg(m interface{}) error {
|
func (cs *clientStream) RecvMsg(m interface{}) error {
|
||||||
|
@ -1364,8 +1381,10 @@ func (as *addrConnStream) finish(err error) {
|
||||||
|
|
||||||
// ServerStream defines the server-side behavior of a streaming RPC.
|
// ServerStream defines the server-side behavior of a streaming RPC.
|
||||||
//
|
//
|
||||||
// All errors returned from ServerStream methods are compatible with the
|
// Errors returned from ServerStream methods are compatible with the status
|
||||||
// status package.
|
// package. However, the status code will often not match the RPC status as
|
||||||
|
// seen by the client application, and therefore, should not be relied upon for
|
||||||
|
// this purpose.
|
||||||
type ServerStream interface {
|
type ServerStream interface {
|
||||||
// SetHeader sets the header metadata. It may be called multiple times.
|
// SetHeader sets the header metadata. It may be called multiple times.
|
||||||
// When call multiple times, all the provided metadata will be merged.
|
// When call multiple times, all the provided metadata will be merged.
|
||||||
|
@ -1428,7 +1447,7 @@ type serverStream struct {
|
||||||
|
|
||||||
statsHandler stats.Handler
|
statsHandler stats.Handler
|
||||||
|
|
||||||
binlog *binarylog.MethodLogger
|
binlog binarylog.MethodLogger
|
||||||
// serverHeaderBinlogged indicates whether server header has been logged. It
|
// serverHeaderBinlogged indicates whether server header has been logged. It
|
||||||
// will happen when one of the following two happens: stream.SendHeader(),
|
// will happen when one of the following two happens: stream.SendHeader(),
|
||||||
// stream.Send().
|
// stream.Send().
|
||||||
|
@ -1448,11 +1467,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error {
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
err := imetadata.Validate(md)
|
||||||
|
if err != nil {
|
||||||
|
return status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
return ss.s.SetHeader(md)
|
return ss.s.SetHeader(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *serverStream) SendHeader(md metadata.MD) error {
|
func (ss *serverStream) SendHeader(md metadata.MD) error {
|
||||||
err := ss.t.WriteHeader(ss.s, md)
|
err := imetadata.Validate(md)
|
||||||
|
if err != nil {
|
||||||
|
return status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ss.t.WriteHeader(ss.s, md)
|
||||||
if ss.binlog != nil && !ss.serverHeaderBinlogged {
|
if ss.binlog != nil && !ss.serverHeaderBinlogged {
|
||||||
h, _ := ss.s.Header()
|
h, _ := ss.s.Header()
|
||||||
ss.binlog.Log(&binarylog.ServerHeader{
|
ss.binlog.Log(&binarylog.ServerHeader{
|
||||||
|
@ -1467,6 +1495,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
|
||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if err := imetadata.Validate(md); err != nil {
|
||||||
|
logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
|
||||||
|
}
|
||||||
ss.s.SetTrailer(md)
|
ss.s.SetTrailer(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,4 +19,4 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.45.0"
|
const Version = "1.47.0"
|
||||||
|
|
|
@ -84,6 +84,9 @@ github.com/coredns/coredns/plugin/pkg/transport
|
||||||
github.com/coredns/coredns/plugin/pkg/uniq
|
github.com/coredns/coredns/plugin/pkg/uniq
|
||||||
github.com/coredns/coredns/plugin/test
|
github.com/coredns/coredns/plugin/test
|
||||||
github.com/coredns/coredns/request
|
github.com/coredns/coredns/request
|
||||||
|
# github.com/coreos/go-oidc/v3 v3.4.0
|
||||||
|
## explicit; go 1.14
|
||||||
|
github.com/coreos/go-oidc/v3/oidc
|
||||||
# github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
# github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||||
## explicit
|
## explicit
|
||||||
github.com/coreos/go-systemd/daemon
|
github.com/coreos/go-systemd/daemon
|
||||||
|
@ -141,6 +144,7 @@ github.com/gobwas/ws/wsutil
|
||||||
github.com/golang-collections/collections/queue
|
github.com/golang-collections/collections/queue
|
||||||
# github.com/golang/protobuf v1.5.2
|
# github.com/golang/protobuf v1.5.2
|
||||||
## explicit; go 1.9
|
## explicit; go 1.9
|
||||||
|
github.com/golang/protobuf/jsonpb
|
||||||
github.com/golang/protobuf/proto
|
github.com/golang/protobuf/proto
|
||||||
github.com/golang/protobuf/ptypes
|
github.com/golang/protobuf/ptypes
|
||||||
github.com/golang/protobuf/ptypes/any
|
github.com/golang/protobuf/ptypes/any
|
||||||
|
@ -386,11 +390,11 @@ golang.org/x/net/ipv6
|
||||||
golang.org/x/net/proxy
|
golang.org/x/net/proxy
|
||||||
golang.org/x/net/trace
|
golang.org/x/net/trace
|
||||||
golang.org/x/net/websocket
|
golang.org/x/net/websocket
|
||||||
# golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
# golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094
|
||||||
## explicit; go 1.11
|
## explicit; go 1.17
|
||||||
golang.org/x/oauth2
|
golang.org/x/oauth2
|
||||||
golang.org/x/oauth2/internal
|
golang.org/x/oauth2/internal
|
||||||
# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
# golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||||
## explicit
|
## explicit
|
||||||
golang.org/x/sync/errgroup
|
golang.org/x/sync/errgroup
|
||||||
# golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664
|
# golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664
|
||||||
|
@ -434,8 +438,8 @@ golang.org/x/tools/internal/imports
|
||||||
golang.org/x/tools/internal/packagesinternal
|
golang.org/x/tools/internal/packagesinternal
|
||||||
golang.org/x/tools/internal/typeparams
|
golang.org/x/tools/internal/typeparams
|
||||||
golang.org/x/tools/internal/typesinternal
|
golang.org/x/tools/internal/typesinternal
|
||||||
# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
# golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f
|
||||||
## explicit; go 1.11
|
## explicit; go 1.17
|
||||||
golang.org/x/xerrors
|
golang.org/x/xerrors
|
||||||
golang.org/x/xerrors/internal
|
golang.org/x/xerrors/internal
|
||||||
# google.golang.org/appengine v1.6.7
|
# google.golang.org/appengine v1.6.7
|
||||||
|
@ -447,12 +451,12 @@ google.golang.org/appengine/internal/log
|
||||||
google.golang.org/appengine/internal/remote_api
|
google.golang.org/appengine/internal/remote_api
|
||||||
google.golang.org/appengine/internal/urlfetch
|
google.golang.org/appengine/internal/urlfetch
|
||||||
google.golang.org/appengine/urlfetch
|
google.golang.org/appengine/urlfetch
|
||||||
# google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb
|
# google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90
|
||||||
## explicit; go 1.11
|
## explicit; go 1.15
|
||||||
google.golang.org/genproto/googleapis/api/httpbody
|
google.golang.org/genproto/googleapis/api/httpbody
|
||||||
google.golang.org/genproto/googleapis/rpc/status
|
google.golang.org/genproto/googleapis/rpc/status
|
||||||
google.golang.org/genproto/protobuf/field_mask
|
google.golang.org/genproto/protobuf/field_mask
|
||||||
# google.golang.org/grpc v1.45.0
|
# google.golang.org/grpc v1.47.0
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
google.golang.org/grpc
|
google.golang.org/grpc
|
||||||
google.golang.org/grpc/attributes
|
google.golang.org/grpc/attributes
|
||||||
|
@ -462,6 +466,7 @@ google.golang.org/grpc/balancer/base
|
||||||
google.golang.org/grpc/balancer/grpclb/state
|
google.golang.org/grpc/balancer/grpclb/state
|
||||||
google.golang.org/grpc/balancer/roundrobin
|
google.golang.org/grpc/balancer/roundrobin
|
||||||
google.golang.org/grpc/binarylog/grpc_binarylog_v1
|
google.golang.org/grpc/binarylog/grpc_binarylog_v1
|
||||||
|
google.golang.org/grpc/channelz
|
||||||
google.golang.org/grpc/codes
|
google.golang.org/grpc/codes
|
||||||
google.golang.org/grpc/connectivity
|
google.golang.org/grpc/connectivity
|
||||||
google.golang.org/grpc/credentials
|
google.golang.org/grpc/credentials
|
||||||
|
@ -471,6 +476,7 @@ google.golang.org/grpc/encoding/proto
|
||||||
google.golang.org/grpc/grpclog
|
google.golang.org/grpc/grpclog
|
||||||
google.golang.org/grpc/internal
|
google.golang.org/grpc/internal
|
||||||
google.golang.org/grpc/internal/backoff
|
google.golang.org/grpc/internal/backoff
|
||||||
|
google.golang.org/grpc/internal/balancer/gracefulswitch
|
||||||
google.golang.org/grpc/internal/balancerload
|
google.golang.org/grpc/internal/balancerload
|
||||||
google.golang.org/grpc/internal/binarylog
|
google.golang.org/grpc/internal/binarylog
|
||||||
google.golang.org/grpc/internal/buffer
|
google.golang.org/grpc/internal/buffer
|
||||||
|
@ -482,6 +488,7 @@ google.golang.org/grpc/internal/grpcrand
|
||||||
google.golang.org/grpc/internal/grpcsync
|
google.golang.org/grpc/internal/grpcsync
|
||||||
google.golang.org/grpc/internal/grpcutil
|
google.golang.org/grpc/internal/grpcutil
|
||||||
google.golang.org/grpc/internal/metadata
|
google.golang.org/grpc/internal/metadata
|
||||||
|
google.golang.org/grpc/internal/pretty
|
||||||
google.golang.org/grpc/internal/resolver
|
google.golang.org/grpc/internal/resolver
|
||||||
google.golang.org/grpc/internal/resolver/dns
|
google.golang.org/grpc/internal/resolver/dns
|
||||||
google.golang.org/grpc/internal/resolver/passthrough
|
google.golang.org/grpc/internal/resolver/passthrough
|
||||||
|
|
Loading…
Reference in New Issue