parent
abb3466c31
commit
d608a64cc5
29
go.mod
29
go.mod
|
@ -3,7 +3,7 @@ module github.com/cloudflare/cloudflared
|
|||
go 1.22
|
||||
|
||||
require (
|
||||
github.com/coredns/coredns v1.10.0
|
||||
github.com/coredns/coredns v1.11.3
|
||||
github.com/coreos/go-oidc/v3 v3.10.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/facebookgo/grace v0.0.0-20180706040059-75cf19382434
|
||||
|
@ -13,18 +13,17 @@ require (
|
|||
github.com/go-chi/chi/v5 v5.0.8
|
||||
github.com/go-chi/cors v1.2.1
|
||||
github.com/go-jose/go-jose/v4 v4.0.1
|
||||
github.com/gobwas/ws v1.0.4
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
|
||||
github.com/gobwas/ws v1.2.1
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/miekg/dns v1.1.58
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_model v0.5.0
|
||||
github.com/prometheus/client_model v0.6.0
|
||||
github.com/quic-go/quic-go v0.45.0
|
||||
github.com/rs/zerolog v1.20.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
|
@ -55,7 +54,7 @@ require (
|
|||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/coredns/caddy v1.1.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
|
||||
github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9 // indirect
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||
|
@ -64,36 +63,36 @@ require (
|
|||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect
|
||||
github.com/gobwas/httphead v0.1.0 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.13.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/common v0.53.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.26.0 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
golang.org/x/tools v0.21.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/grpc v1.63.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
|
||||
google.golang.org/grpc v1.63.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
||||
|
|
91
go.sum
91
go.sum
|
@ -7,13 +7,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0=
|
||||
github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
|
||||
github.com/coredns/coredns v1.10.0 h1:jCfuWsBjTs0dapkkhISfPCzn5LqvSRtrFtaf/Tjj4DI=
|
||||
github.com/coredns/coredns v1.10.0/go.mod h1:CIfRU5TgpuoIiJBJ4XrofQzfFQpPFh32ERpUevrSlaw=
|
||||
github.com/coredns/coredns v1.11.3 h1:8RjnpZc42db5th84/QJKH2i137ecJdzZK1HJwhetSPk=
|
||||
github.com/coredns/coredns v1.11.3/go.mod h1:lqFkDsHjEUdY7LJ75Nib3lwqJGip6ewWOqNIf8OavIQ=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
|
@ -24,8 +21,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
||||
github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9 h1:wWke/RUCl7VRjQhwPlR/v0glZXNYzBHdNUzf/Am2Nmg=
|
||||
|
@ -75,19 +73,18 @@ github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4
|
|||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 h1:YyrUZvJaU8Q0QsoVo+xLFBgWDTam29PKea6GYmwvSiQ=
|
||||
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/gobwas/ws v1.0.4 h1:5eXU1CZhpQdq5kXbKb+sECH5Ia5KiO6CYzIzdlVx6Bs=
|
||||
github.com/gobwas/ws v1.0.4/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk=
|
||||
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
|
@ -102,8 +99,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
|||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo=
|
||||
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -114,7 +111,6 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0Q
|
|||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d h1:PRDnysJ9dF1vUMmEzBu6aHQeUluSQy4eWH3RsSSy/vI=
|
||||
github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
|
@ -140,10 +136,10 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
|||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -152,16 +148,16 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
|
|||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
|
||||
github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=
|
||||
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
|
||||
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -171,10 +167,10 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
|||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
|
||||
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
|
||||
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
|
||||
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/quic-go/quic-go v0.45.0 h1:OHmkQGM37luZITyTSu6ff03HP/2IrwDX1ZFiNEhSFUE=
|
||||
|
@ -195,14 +191,13 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
|
||||
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
|
||||
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
|
||||
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8=
|
||||
go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU=
|
||||
|
@ -233,39 +228,32 @@ golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJ
|
|||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
|
||||
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
|
||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
|
@ -275,7 +263,6 @@ golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
|||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
|
@ -287,24 +274,20 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
|
|||
golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
|
||||
google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8=
|
||||
google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/plugin"
|
||||
|
@ -53,6 +54,11 @@ type Config struct {
|
|||
// TLSConfig when listening for encrypted connections (gRPC, DNS-over-TLS).
|
||||
TLSConfig *tls.Config
|
||||
|
||||
// Timeouts for TCP, TLS and HTTPS servers.
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
|
||||
// TSIG secrets, [name]key.
|
||||
TsigSecret map[string]string
|
||||
|
||||
|
|
|
@ -4,13 +4,11 @@ import (
|
|||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/coredns/coredns/plugin/pkg/nonwriter"
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// DoHWriter is a nonwriter.Writer that adds more specific LocalAddr and RemoteAddr methods.
|
||||
// DoHWriter is a dns.ResponseWriter that adds more specific LocalAddr and RemoteAddr methods.
|
||||
type DoHWriter struct {
|
||||
nonwriter.Writer
|
||||
|
||||
// raddr is the remote's address. This can be optionally set.
|
||||
raddr net.Addr
|
||||
// laddr is our address. This can be optionally set.
|
||||
|
@ -18,13 +16,50 @@ type DoHWriter struct {
|
|||
|
||||
// request is the HTTP request we're currently handling.
|
||||
request *http.Request
|
||||
|
||||
// Msg is a response to be written to the client.
|
||||
Msg *dns.Msg
|
||||
}
|
||||
|
||||
// WriteMsg stores the message to be written to the client.
|
||||
func (d *DoHWriter) WriteMsg(m *dns.Msg) error {
|
||||
d.Msg = m
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write stores the message to be written to the client.
|
||||
func (d *DoHWriter) Write(b []byte) (int, error) {
|
||||
d.Msg = new(dns.Msg)
|
||||
return len(b), d.Msg.Unpack(b)
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote address.
|
||||
func (d *DoHWriter) RemoteAddr() net.Addr { return d.raddr }
|
||||
func (d *DoHWriter) RemoteAddr() net.Addr {
|
||||
return d.raddr
|
||||
}
|
||||
|
||||
// LocalAddr returns the local address.
|
||||
func (d *DoHWriter) LocalAddr() net.Addr { return d.laddr }
|
||||
func (d *DoHWriter) LocalAddr() net.Addr {
|
||||
return d.laddr
|
||||
}
|
||||
|
||||
// Request returns the HTTP request
|
||||
func (d *DoHWriter) Request() *http.Request { return d.request }
|
||||
// Request returns the HTTP request.
|
||||
func (d *DoHWriter) Request() *http.Request {
|
||||
return d.request
|
||||
}
|
||||
|
||||
// Close no-op implementation.
|
||||
func (d *DoHWriter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TsigStatus no-op implementation.
|
||||
func (d *DoHWriter) TsigStatus() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TsigTimersOnly no-op implementation.
|
||||
func (d *DoHWriter) TsigTimersOnly(_ bool) {}
|
||||
|
||||
// Hijack no-op implementation.
|
||||
func (d *DoHWriter) Hijack() {}
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
package dnsserver
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/quic-go/quic-go"
|
||||
)
|
||||
|
||||
type DoQWriter struct {
|
||||
localAddr net.Addr
|
||||
remoteAddr net.Addr
|
||||
stream quic.Stream
|
||||
Msg *dns.Msg
|
||||
}
|
||||
|
||||
func (w *DoQWriter) Write(b []byte) (int, error) {
|
||||
b = AddPrefix(b)
|
||||
return w.stream.Write(b)
|
||||
}
|
||||
|
||||
func (w *DoQWriter) WriteMsg(m *dns.Msg) error {
|
||||
bytes, err := m.Pack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.Write(bytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return w.Close()
|
||||
}
|
||||
|
||||
// Close sends the STREAM FIN signal.
|
||||
// The server MUST send the response(s) on the same stream and MUST
|
||||
// indicate, after the last response, through the STREAM FIN
|
||||
// mechanism that no further data will be sent on that stream.
|
||||
// See https://www.rfc-editor.org/rfc/rfc9250#section-4.2-7
|
||||
func (w *DoQWriter) Close() error {
|
||||
return w.stream.Close()
|
||||
}
|
||||
|
||||
// AddPrefix adds a 2-byte prefix with the DNS message length.
|
||||
func AddPrefix(b []byte) (m []byte) {
|
||||
m = make([]byte, 2+len(b))
|
||||
binary.BigEndian.PutUint16(m, uint16(len(b)))
|
||||
copy(m[2:], b)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// These methods implement the dns.ResponseWriter interface from Go DNS.
|
||||
func (w *DoQWriter) TsigStatus() error { return nil }
|
||||
func (w *DoQWriter) TsigTimersOnly(b bool) {}
|
||||
func (w *DoQWriter) Hijack() {}
|
||||
func (w *DoQWriter) LocalAddr() net.Addr { return w.localAddr }
|
||||
func (w *DoQWriter) RemoteAddr() net.Addr { return w.remoteAddr }
|
|
@ -1,7 +1,6 @@
|
|||
package dnsserver
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
@ -17,12 +16,7 @@ import (
|
|||
|
||||
const serverType = "dns"
|
||||
|
||||
// Any flags defined here, need to be namespaced to the serverType other
|
||||
// wise they potentially clash with other server types.
|
||||
func init() {
|
||||
flag.StringVar(&Port, serverType+".port", DefaultPort, "Default port")
|
||||
flag.StringVar(&Port, "p", DefaultPort, "Default port")
|
||||
|
||||
caddy.RegisterServerType(serverType, caddy.ServerType{
|
||||
Directives: func() []string { return Directives },
|
||||
DefaultInput: func() caddy.Input {
|
||||
|
@ -88,6 +82,8 @@ func (h *dnsContext) InspectServerBlocks(sourceFile string, serverBlocks []caddy
|
|||
port = Port
|
||||
case transport.TLS:
|
||||
port = transport.TLSPort
|
||||
case transport.QUIC:
|
||||
port = transport.QUICPort
|
||||
case transport.GRPC:
|
||||
port = transport.GRPCPort
|
||||
case transport.HTTPS:
|
||||
|
@ -147,7 +143,12 @@ func (h *dnsContext) MakeServers() ([]caddy.Server, error) {
|
|||
c.ListenHosts = c.firstConfigInBlock.ListenHosts
|
||||
c.Debug = c.firstConfigInBlock.Debug
|
||||
c.Stacktrace = c.firstConfigInBlock.Stacktrace
|
||||
c.TLSConfig = c.firstConfigInBlock.TLSConfig
|
||||
|
||||
// Fork TLSConfig for each encrypted connection
|
||||
c.TLSConfig = c.firstConfigInBlock.TLSConfig.Clone()
|
||||
c.ReadTimeout = c.firstConfigInBlock.ReadTimeout
|
||||
c.WriteTimeout = c.firstConfigInBlock.WriteTimeout
|
||||
c.IdleTimeout = c.firstConfigInBlock.IdleTimeout
|
||||
c.TsigSecret = c.firstConfigInBlock.TsigSecret
|
||||
}
|
||||
|
||||
|
@ -175,6 +176,13 @@ func (h *dnsContext) MakeServers() ([]caddy.Server, error) {
|
|||
}
|
||||
servers = append(servers, s)
|
||||
|
||||
case transport.QUIC:
|
||||
s, err := NewServerQUIC(addr, group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
servers = append(servers, s)
|
||||
|
||||
case transport.GRPC:
|
||||
s, err := NewServergRPC(addr, group)
|
||||
if err != nil {
|
||||
|
@ -221,7 +229,8 @@ func (c *Config) AddPlugin(m plugin.Plugin) {
|
|||
}
|
||||
|
||||
// registerHandler adds a handler to a site's handler registration. Handlers
|
||||
// use this to announce that they exist to other plugin.
|
||||
//
|
||||
// use this to announce that they exist to other plugin.
|
||||
func (c *Config) registerHandler(h plugin.Handler) {
|
||||
if c.registry == nil {
|
||||
c.registry = make(map[string]plugin.Handler)
|
||||
|
@ -287,7 +296,7 @@ func (h *dnsContext) validateZonesAndListeningAddresses() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// groupSiteConfigsByListenAddr groups site configs by their listen
|
||||
// groupConfigsByListenAddr groups site configs by their listen
|
||||
// (bind) address, so sites that use the same listener can be served
|
||||
// on the same server instance. The return value maps the listen
|
||||
// address (what you pass into net.Listen) to the list of site configs.
|
||||
|
|
|
@ -44,6 +44,9 @@ type Server struct {
|
|||
debug bool // disable recover()
|
||||
stacktrace bool // enable stacktrace in recover error log
|
||||
classChaos bool // allow non-INET class queries
|
||||
idleTimeout time.Duration // Idle timeout for TCP
|
||||
readTimeout time.Duration // Read timeout for TCP
|
||||
writeTimeout time.Duration // Write timeout for TCP
|
||||
|
||||
tsigSecret map[string]string
|
||||
}
|
||||
|
@ -60,6 +63,9 @@ func NewServer(addr string, group []*Config) (*Server, error) {
|
|||
Addr: addr,
|
||||
zones: make(map[string][]*Config),
|
||||
graceTimeout: 5 * time.Second,
|
||||
idleTimeout: 10 * time.Second,
|
||||
readTimeout: 3 * time.Second,
|
||||
writeTimeout: 5 * time.Second,
|
||||
tsigSecret: make(map[string]string),
|
||||
}
|
||||
|
||||
|
@ -81,6 +87,17 @@ func NewServer(addr string, group []*Config) (*Server, error) {
|
|||
// append the config to the zone's configs
|
||||
s.zones[site.Zone] = append(s.zones[site.Zone], site)
|
||||
|
||||
// set timeouts
|
||||
if site.ReadTimeout != 0 {
|
||||
s.readTimeout = site.ReadTimeout
|
||||
}
|
||||
if site.WriteTimeout != 0 {
|
||||
s.writeTimeout = site.WriteTimeout
|
||||
}
|
||||
if site.IdleTimeout != 0 {
|
||||
s.idleTimeout = site.IdleTimeout
|
||||
}
|
||||
|
||||
// copy tsig secrets
|
||||
for key, secret := range site.TsigSecret {
|
||||
s.tsigSecret[key] = secret
|
||||
|
@ -130,11 +147,22 @@ var _ caddy.GracefulServer = &Server{}
|
|||
// This implements caddy.TCPServer interface.
|
||||
func (s *Server) Serve(l net.Listener) error {
|
||||
s.m.Lock()
|
||||
s.server[tcp] = &dns.Server{Listener: l, Net: "tcp", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
ctx := context.WithValue(context.Background(), Key{}, s)
|
||||
ctx = context.WithValue(ctx, LoopKey{}, 0)
|
||||
s.ServeDNS(ctx, w, r)
|
||||
}), TsigSecret: s.tsigSecret}
|
||||
|
||||
s.server[tcp] = &dns.Server{Listener: l,
|
||||
Net: "tcp",
|
||||
TsigSecret: s.tsigSecret,
|
||||
MaxTCPQueries: tcpMaxQueries,
|
||||
ReadTimeout: s.readTimeout,
|
||||
WriteTimeout: s.writeTimeout,
|
||||
IdleTimeout: func() time.Duration {
|
||||
return s.idleTimeout
|
||||
},
|
||||
Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
ctx := context.WithValue(context.Background(), Key{}, s)
|
||||
ctx = context.WithValue(ctx, LoopKey{}, 0)
|
||||
s.ServeDNS(ctx, w, r)
|
||||
})}
|
||||
|
||||
s.m.Unlock()
|
||||
|
||||
return s.server[tcp].ActivateAndServe()
|
||||
|
@ -404,6 +432,8 @@ func errorAndMetricsFunc(server string, w dns.ResponseWriter, r *dns.Msg, rc int
|
|||
const (
|
||||
tcp = 0
|
||||
udp = 1
|
||||
|
||||
tcpMaxQueries = -1
|
||||
)
|
||||
|
||||
type (
|
||||
|
|
|
@ -75,9 +75,9 @@ func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) {
|
|||
}
|
||||
|
||||
srv := &http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 120 * time.Second,
|
||||
ReadTimeout: s.readTimeout,
|
||||
WriteTimeout: s.writeTimeout,
|
||||
IdleTimeout: s.idleTimeout,
|
||||
ErrorLog: stdlog.New(&loggerAdapter{}, "", 0),
|
||||
}
|
||||
sh := &ServerHTTPS{
|
||||
|
|
|
@ -0,0 +1,346 @@
|
|||
package dnsserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
|
||||
"github.com/coredns/coredns/plugin/metrics/vars"
|
||||
clog "github.com/coredns/coredns/plugin/pkg/log"
|
||||
"github.com/coredns/coredns/plugin/pkg/reuseport"
|
||||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/quic-go/quic-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// DoQCodeNoError is used when the connection or stream needs to be
|
||||
// closed, but there is no error to signal.
|
||||
DoQCodeNoError quic.ApplicationErrorCode = 0
|
||||
|
||||
// DoQCodeInternalError signals that the DoQ implementation encountered
|
||||
// an internal error and is incapable of pursuing the transaction or the
|
||||
// connection.
|
||||
DoQCodeInternalError quic.ApplicationErrorCode = 1
|
||||
|
||||
// DoQCodeProtocolError signals that the DoQ implementation encountered
|
||||
// a protocol error and is forcibly aborting the connection.
|
||||
DoQCodeProtocolError quic.ApplicationErrorCode = 2
|
||||
)
|
||||
|
||||
// ServerQUIC represents an instance of a DNS-over-QUIC server.
|
||||
type ServerQUIC struct {
|
||||
*Server
|
||||
listenAddr net.Addr
|
||||
tlsConfig *tls.Config
|
||||
quicConfig *quic.Config
|
||||
quicListener *quic.Listener
|
||||
}
|
||||
|
||||
// NewServerQUIC returns a new CoreDNS QUIC server and compiles all plugin in to it.
|
||||
func NewServerQUIC(addr string, group []*Config) (*ServerQUIC, error) {
|
||||
s, err := NewServer(addr, group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The *tls* plugin must make sure that multiple conflicting
|
||||
// TLS configuration returns an error: it can only be specified once.
|
||||
var tlsConfig *tls.Config
|
||||
for _, z := range s.zones {
|
||||
for _, conf := range z {
|
||||
// Should we error if some configs *don't* have TLS?
|
||||
tlsConfig = conf.TLSConfig
|
||||
}
|
||||
}
|
||||
|
||||
if tlsConfig != nil {
|
||||
tlsConfig.NextProtos = []string{"doq"}
|
||||
}
|
||||
|
||||
var quicConfig *quic.Config
|
||||
quicConfig = &quic.Config{
|
||||
MaxIdleTimeout: s.idleTimeout,
|
||||
MaxIncomingStreams: math.MaxUint16,
|
||||
MaxIncomingUniStreams: math.MaxUint16,
|
||||
// Enable 0-RTT by default for all connections on the server-side.
|
||||
Allow0RTT: true,
|
||||
}
|
||||
|
||||
return &ServerQUIC{Server: s, tlsConfig: tlsConfig, quicConfig: quicConfig}, nil
|
||||
}
|
||||
|
||||
// ServePacket implements caddy.UDPServer interface.
|
||||
func (s *ServerQUIC) ServePacket(p net.PacketConn) error {
|
||||
s.m.Lock()
|
||||
s.listenAddr = s.quicListener.Addr()
|
||||
s.m.Unlock()
|
||||
|
||||
return s.ServeQUIC()
|
||||
}
|
||||
|
||||
// ServeQUIC listens for incoming QUIC packets.
|
||||
func (s *ServerQUIC) ServeQUIC() error {
|
||||
for {
|
||||
conn, err := s.quicListener.Accept(context.Background())
|
||||
if err != nil {
|
||||
if s.isExpectedErr(err) {
|
||||
s.closeQUICConn(conn, DoQCodeNoError)
|
||||
return err
|
||||
}
|
||||
|
||||
s.closeQUICConn(conn, DoQCodeInternalError)
|
||||
return err
|
||||
}
|
||||
|
||||
go s.serveQUICConnection(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// serveQUICConnection handles a new QUIC connection. It waits for new streams
|
||||
// and passes them to serveQUICStream.
|
||||
func (s *ServerQUIC) serveQUICConnection(conn quic.Connection) {
|
||||
for {
|
||||
// In DoQ, one query consumes one stream.
|
||||
// The client MUST select the next available client-initiated bidirectional
|
||||
// stream for each subsequent query on a QUIC connection.
|
||||
stream, err := conn.AcceptStream(context.Background())
|
||||
if err != nil {
|
||||
if s.isExpectedErr(err) {
|
||||
s.closeQUICConn(conn, DoQCodeNoError)
|
||||
return
|
||||
}
|
||||
|
||||
s.closeQUICConn(conn, DoQCodeInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
go s.serveQUICStream(stream, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ServerQUIC) serveQUICStream(stream quic.Stream, conn quic.Connection) {
|
||||
buf, err := readDOQMessage(stream)
|
||||
|
||||
// io.EOF does not really mean that there's any error, it is just
|
||||
// the STREAM FIN indicating that there will be no data to read
|
||||
// anymore from this stream.
|
||||
if err != nil && err != io.EOF {
|
||||
s.closeQUICConn(conn, DoQCodeProtocolError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
req := &dns.Msg{}
|
||||
err = req.Unpack(buf)
|
||||
if err != nil {
|
||||
clog.Debugf("unpacking quic packet: %s", err)
|
||||
s.closeQUICConn(conn, DoQCodeProtocolError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if !validRequest(req) {
|
||||
// If a peer encounters such an error condition, it is considered a
|
||||
// fatal error. It SHOULD forcibly abort the connection using QUIC's
|
||||
// CONNECTION_CLOSE mechanism and SHOULD use the DoQ error code
|
||||
// DOQ_PROTOCOL_ERROR.
|
||||
// See https://www.rfc-editor.org/rfc/rfc9250#section-4.3.3-3
|
||||
s.closeQUICConn(conn, DoQCodeProtocolError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
w := &DoQWriter{
|
||||
localAddr: conn.LocalAddr(),
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
stream: stream,
|
||||
Msg: req,
|
||||
}
|
||||
|
||||
dnsCtx := context.WithValue(stream.Context(), Key{}, s.Server)
|
||||
dnsCtx = context.WithValue(dnsCtx, LoopKey{}, 0)
|
||||
s.ServeDNS(dnsCtx, w, req)
|
||||
s.countResponse(DoQCodeNoError)
|
||||
}
|
||||
|
||||
// ListenPacket implements caddy.UDPServer interface.
|
||||
func (s *ServerQUIC) ListenPacket() (net.PacketConn, error) {
|
||||
p, err := reuseport.ListenPacket("udp", s.Addr[len(transport.QUIC+"://"):])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
s.quicListener, err = quic.Listen(p, s.tlsConfig, s.quicConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// OnStartupComplete lists the sites served by this server
|
||||
// and any relevant information, assuming Quiet is false.
|
||||
func (s *ServerQUIC) OnStartupComplete() {
|
||||
if Quiet {
|
||||
return
|
||||
}
|
||||
|
||||
out := startUpZones(transport.QUIC+"://", s.Addr, s.zones)
|
||||
if out != "" {
|
||||
fmt.Print(out)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the server non-gracefully. It blocks until the server is totally stopped.
|
||||
func (s *ServerQUIC) Stop() error {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
if s.quicListener != nil {
|
||||
return s.quicListener.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serve implements caddy.TCPServer interface.
|
||||
func (s *ServerQUIC) Serve(l net.Listener) error { return nil }
|
||||
|
||||
// Listen implements caddy.TCPServer interface.
|
||||
func (s *ServerQUIC) Listen() (net.Listener, error) { return nil, nil }
|
||||
|
||||
// closeQUICConn quietly closes the QUIC connection.
|
||||
func (s *ServerQUIC) closeQUICConn(conn quic.Connection, code quic.ApplicationErrorCode) {
|
||||
if conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
clog.Debugf("closing quic conn %s with code %d", conn.LocalAddr(), code)
|
||||
err := conn.CloseWithError(code, "")
|
||||
if err != nil {
|
||||
clog.Debugf("failed to close quic connection with code %d: %s", code, err)
|
||||
}
|
||||
|
||||
// DoQCodeNoError metrics are already registered after s.ServeDNS()
|
||||
if code != DoQCodeNoError {
|
||||
s.countResponse(code)
|
||||
}
|
||||
}
|
||||
|
||||
// validRequest checks for protocol errors in the unpacked DNS message.
|
||||
// See https://www.rfc-editor.org/rfc/rfc9250.html#name-protocol-errors
|
||||
func validRequest(req *dns.Msg) (ok bool) {
|
||||
// 1. a client or server receives a message with a non-zero Message ID.
|
||||
if req.Id != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// 2. an implementation receives a message containing the edns-tcp-keepalive
|
||||
// EDNS(0) Option [RFC7828].
|
||||
if opt := req.IsEdns0(); opt != nil {
|
||||
for _, option := range opt.Option {
|
||||
if option.Option() == dns.EDNS0TCPKEEPALIVE {
|
||||
clog.Debug("client sent EDNS0 TCP keepalive option")
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. the client or server does not indicate the expected STREAM FIN after
|
||||
// sending requests or responses.
|
||||
//
|
||||
// This is quite problematic to validate this case since this would imply
|
||||
// we have to wait until STREAM FIN is arrived before we start processing
|
||||
// the message. So we're consciously ignoring this case in this
|
||||
// implementation.
|
||||
|
||||
// 4. a server receives a "replayable" transaction in 0-RTT data
|
||||
//
|
||||
// The information necessary to validate this is not exposed by quic-go.
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// readDOQMessage reads a DNS over QUIC (DOQ) message from the given stream
|
||||
// and returns the message bytes.
|
||||
// Drafts of the RFC9250 did not require the 2-byte prefixed message length.
|
||||
// Thus, we are only supporting the official version (DoQ v1).
|
||||
func readDOQMessage(r io.Reader) ([]byte, error) {
|
||||
// All DNS messages (queries and responses) sent over DoQ connections MUST
|
||||
// be encoded as a 2-octet length field followed by the message content as
|
||||
// specified in [RFC1035].
|
||||
// See https://www.rfc-editor.org/rfc/rfc9250.html#section-4.2-4
|
||||
sizeBuf := make([]byte, 2)
|
||||
_, err := io.ReadFull(r, sizeBuf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size := binary.BigEndian.Uint16(sizeBuf)
|
||||
|
||||
if size == 0 {
|
||||
return nil, fmt.Errorf("message size is 0: probably unsupported DoQ version")
|
||||
}
|
||||
|
||||
buf := make([]byte, size)
|
||||
_, err = io.ReadFull(r, buf)
|
||||
|
||||
// A client or server receives a STREAM FIN before receiving all the bytes
|
||||
// for a message indicated in the 2-octet length field.
|
||||
// See https://www.rfc-editor.org/rfc/rfc9250#section-4.3.3-2.2
|
||||
if size != uint16(len(buf)) {
|
||||
return nil, fmt.Errorf("message size does not match 2-byte prefix")
|
||||
}
|
||||
|
||||
return buf, err
|
||||
}
|
||||
|
||||
// isExpectedErr returns true if err is an expected error, likely related to
|
||||
// the current implementation.
|
||||
func (s *ServerQUIC) isExpectedErr(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// This error is returned when the QUIC listener was closed by us. As
|
||||
// graceful shutdown is not implemented, the connection will be abruptly
|
||||
// closed but there is no error to signal.
|
||||
if errors.Is(err, quic.ErrServerClosed) {
|
||||
return true
|
||||
}
|
||||
|
||||
// This error happens when the connection was closed due to a DoQ
|
||||
// protocol error but there's still something to read in the closed stream.
|
||||
// For example, when the message was sent without the prefixed length.
|
||||
var qAppErr *quic.ApplicationError
|
||||
if errors.As(err, &qAppErr) && qAppErr.ErrorCode == 2 {
|
||||
return true
|
||||
}
|
||||
|
||||
// When a connection hits the idle timeout, quic.AcceptStream() returns
|
||||
// an IdleTimeoutError. In this, case, we should just drop the connection
|
||||
// with DoQCodeNoError.
|
||||
var qIdleErr *quic.IdleTimeoutError
|
||||
return errors.As(err, &qIdleErr)
|
||||
}
|
||||
|
||||
func (s *ServerQUIC) countResponse(code quic.ApplicationErrorCode) {
|
||||
switch code {
|
||||
case DoQCodeNoError:
|
||||
vars.QUICResponsesCount.WithLabelValues(s.Addr, "0x0").Inc()
|
||||
case DoQCodeInternalError:
|
||||
vars.QUICResponsesCount.WithLabelValues(s.Addr, "0x1").Inc()
|
||||
case DoQCodeProtocolError:
|
||||
vars.QUICResponsesCount.WithLabelValues(s.Addr, "0x2").Inc()
|
||||
}
|
||||
}
|
|
@ -5,6 +5,7 @@ import (
|
|||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/plugin/pkg/reuseport"
|
||||
|
@ -50,11 +51,20 @@ func (s *ServerTLS) Serve(l net.Listener) error {
|
|||
}
|
||||
|
||||
// Only fill out the TCP server for this one.
|
||||
s.server[tcp] = &dns.Server{Listener: l, Net: "tcp-tls", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
ctx := context.WithValue(context.Background(), Key{}, s.Server)
|
||||
ctx = context.WithValue(ctx, LoopKey{}, 0)
|
||||
s.ServeDNS(ctx, w, r)
|
||||
})}
|
||||
s.server[tcp] = &dns.Server{Listener: l,
|
||||
Net: "tcp-tls",
|
||||
MaxTCPQueries: tlsMaxQueries,
|
||||
ReadTimeout: s.readTimeout,
|
||||
WriteTimeout: s.writeTimeout,
|
||||
IdleTimeout: func() time.Duration {
|
||||
return s.idleTimeout
|
||||
},
|
||||
Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
ctx := context.WithValue(context.Background(), Key{}, s.Server)
|
||||
ctx = context.WithValue(ctx, LoopKey{}, 0)
|
||||
s.ServeDNS(ctx, w, r)
|
||||
})}
|
||||
|
||||
s.m.Unlock()
|
||||
|
||||
return s.server[tcp].ActivateAndServe()
|
||||
|
@ -87,3 +97,7 @@ func (s *ServerTLS) OnStartupComplete() {
|
|||
fmt.Print(out)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
tlsMaxQueries = -1
|
||||
)
|
||||
|
|
|
@ -10,14 +10,15 @@ package dnsserver
|
|||
// (after) them during a request, but they must not
|
||||
// care what plugin above them are doing.
|
||||
var Directives = []string{
|
||||
"root",
|
||||
"metadata",
|
||||
"geoip",
|
||||
"cancel",
|
||||
"tls",
|
||||
"timeouts",
|
||||
"reload",
|
||||
"nsid",
|
||||
"bufsize",
|
||||
"root",
|
||||
"bind",
|
||||
"debug",
|
||||
"trace",
|
||||
|
|
|
@ -28,6 +28,9 @@ func init() {
|
|||
caddy.RegisterCaddyfileLoader("flag", caddy.LoaderFunc(confLoader))
|
||||
caddy.SetDefaultCaddyfileLoader("default", caddy.LoaderFunc(defaultLoader))
|
||||
|
||||
flag.StringVar(&dnsserver.Port, serverType+".port", dnsserver.DefaultPort, "Default port")
|
||||
flag.StringVar(&dnsserver.Port, "p", dnsserver.DefaultPort, "Default port")
|
||||
|
||||
caddy.AppName = coreName
|
||||
caddy.AppVersion = CoreVersion
|
||||
}
|
||||
|
@ -42,7 +45,7 @@ func Run() {
|
|||
}
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0) // Set to 0 because we're doing our own time, with timezone
|
||||
log.SetFlags(LogFlags)
|
||||
|
||||
if version {
|
||||
showVersion()
|
||||
|
@ -166,10 +169,14 @@ var (
|
|||
conf string
|
||||
version bool
|
||||
plugins bool
|
||||
|
||||
// LogFlags are initially set to 0 for no extra output
|
||||
LogFlags int
|
||||
)
|
||||
|
||||
// Build information obtained with the help of -ldflags
|
||||
var (
|
||||
// nolint
|
||||
appVersion = "(untracked dev build)" // inferred at startup
|
||||
devBuild = true // inferred at startup
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package coremain
|
|||
|
||||
// Various CoreDNS constants.
|
||||
const (
|
||||
CoreVersion = "1.10.0"
|
||||
CoreVersion = "1.11.3"
|
||||
coreName = "CoreDNS"
|
||||
serverType = "dns"
|
||||
)
|
||||
|
|
|
@ -10,8 +10,7 @@ With *cache* enabled, all records except zone transfers and metadata records wil
|
|||
3600s. Caching is mostly useful in a scenario when fetching data from the backend (upstream,
|
||||
database, etc.) is expensive.
|
||||
|
||||
*Cache* will change the query to enable DNSSEC (DNSSEC OK; DO) if it passes through the plugin. If
|
||||
the client didn't request any DNSSEC (records), these are filtered out when replying.
|
||||
*Cache* will pass DNSSEC (DNSSEC OK; DO) options through the plugin for upstream queries.
|
||||
|
||||
This plugin can only be used once per Server Block.
|
||||
|
||||
|
@ -40,6 +39,7 @@ cache [TTL] [ZONES...] {
|
|||
serve_stale [DURATION] [REFRESH_MODE]
|
||||
servfail DURATION
|
||||
disable success|denial [ZONES...]
|
||||
keepttl
|
||||
}
|
||||
~~~
|
||||
|
||||
|
@ -70,6 +70,11 @@ cache [TTL] [ZONES...] {
|
|||
greater than 5 minutes.
|
||||
* `disable` disable the success or denial cache for the listed **ZONES**. If no **ZONES** are given, the specified
|
||||
cache will be disabled for all zones.
|
||||
* `keepttl` do not age TTL when serving responses from cache. The entry will still be removed from cache
|
||||
when the TTL expires as normal, but until it expires responses will include the original TTL instead
|
||||
of the remaining TTL. This can be useful if CoreDNS is used as an authoritative server and you want
|
||||
to serve a consistent TTL to downstream clients. This is **NOT** recommended when CoreDNS is caching
|
||||
records it is not authoritative for because it could result in downstream clients using stale answers.
|
||||
|
||||
## Capacity and Eviction
|
||||
|
||||
|
@ -136,4 +141,4 @@ example.org {
|
|||
disable denial sub.example.org
|
||||
}
|
||||
}
|
||||
~~~
|
||||
~~~
|
||||
|
|
|
@ -48,6 +48,9 @@ type Cache struct {
|
|||
pexcept []string
|
||||
nexcept []string
|
||||
|
||||
// Keep ttl option
|
||||
keepttl bool
|
||||
|
||||
// Testing.
|
||||
now func() time.Time
|
||||
}
|
||||
|
@ -76,7 +79,7 @@ func New() *Cache {
|
|||
// key returns key under which we store the item, -1 will be returned if we don't store the message.
|
||||
// Currently we do not cache Truncated, errors zone transfers or dynamic update messages.
|
||||
// qname holds the already lowercased qname.
|
||||
func key(qname string, m *dns.Msg, t response.Type) (bool, uint64) {
|
||||
func key(qname string, m *dns.Msg, t response.Type, do, cd bool) (bool, uint64) {
|
||||
// We don't store truncated responses.
|
||||
if m.Truncated {
|
||||
return false, 0
|
||||
|
@ -86,11 +89,27 @@ func key(qname string, m *dns.Msg, t response.Type) (bool, uint64) {
|
|||
return false, 0
|
||||
}
|
||||
|
||||
return true, hash(qname, m.Question[0].Qtype)
|
||||
return true, hash(qname, m.Question[0].Qtype, do, cd)
|
||||
}
|
||||
|
||||
func hash(qname string, qtype uint16) uint64 {
|
||||
var one = []byte("1")
|
||||
var zero = []byte("0")
|
||||
|
||||
func hash(qname string, qtype uint16, do, cd bool) uint64 {
|
||||
h := fnv.New64()
|
||||
|
||||
if do {
|
||||
h.Write(one)
|
||||
} else {
|
||||
h.Write(zero)
|
||||
}
|
||||
|
||||
if cd {
|
||||
h.Write(one)
|
||||
} else {
|
||||
h.Write(zero)
|
||||
}
|
||||
|
||||
h.Write([]byte{byte(qtype >> 8)})
|
||||
h.Write([]byte{byte(qtype)})
|
||||
h.Write([]byte(qname))
|
||||
|
@ -116,6 +135,7 @@ type ResponseWriter struct {
|
|||
server string // Server handling the request.
|
||||
|
||||
do bool // When true the original request had the DO bit set.
|
||||
cd bool // When true the original request had the CD bit set.
|
||||
ad bool // When true the original request had the AD bit set.
|
||||
prefetch bool // When true write nothing back to the client.
|
||||
remoteAddr net.Addr
|
||||
|
@ -145,6 +165,8 @@ func newPrefetchResponseWriter(server string, state request.Request, c *Cache) *
|
|||
Cache: c,
|
||||
state: state,
|
||||
server: server,
|
||||
do: state.Do(),
|
||||
cd: state.Req.CheckingDisabled,
|
||||
prefetch: true,
|
||||
remoteAddr: addr,
|
||||
}
|
||||
|
@ -163,7 +185,7 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
|
|||
mt, _ := response.Typify(res, w.now().UTC())
|
||||
|
||||
// key returns empty string for anything we don't want to cache.
|
||||
hasKey, key := key(w.state.Name(), res, mt)
|
||||
hasKey, key := key(w.state.Name(), res, mt, w.do, w.cd)
|
||||
|
||||
msgTTL := dnsutil.MinimalTTL(res, mt)
|
||||
var duration time.Duration
|
||||
|
@ -191,11 +213,10 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
|
|||
}
|
||||
|
||||
// Apply capped TTL to this reply to avoid jarring TTL experience 1799 -> 8 (e.g.)
|
||||
// We also may need to filter out DNSSEC records, see toMsg() for similar code.
|
||||
ttl := uint32(duration.Seconds())
|
||||
res.Answer = filterRRSlice(res.Answer, ttl, w.do, false)
|
||||
res.Ns = filterRRSlice(res.Ns, ttl, w.do, false)
|
||||
res.Extra = filterRRSlice(res.Extra, ttl, w.do, false)
|
||||
res.Answer = filterRRSlice(res.Answer, ttl, false)
|
||||
res.Ns = filterRRSlice(res.Ns, ttl, false)
|
||||
res.Extra = filterRRSlice(res.Extra, ttl, false)
|
||||
|
||||
if !w.do && !w.ad {
|
||||
// unset AD bit if requester is not OK with DNSSEC
|
||||
|
|
|
@ -2,35 +2,13 @@ package cache
|
|||
|
||||
import "github.com/miekg/dns"
|
||||
|
||||
// isDNSSEC returns true if r is a DNSSEC record. NSEC,NSEC3,DS and RRSIG/SIG
|
||||
// are DNSSEC records. DNSKEYs is not in this list on the assumption that the
|
||||
// client explicitly asked for it.
|
||||
func isDNSSEC(r dns.RR) bool {
|
||||
switch r.Header().Rrtype {
|
||||
case dns.TypeNSEC:
|
||||
return true
|
||||
case dns.TypeNSEC3:
|
||||
return true
|
||||
case dns.TypeDS:
|
||||
return true
|
||||
case dns.TypeRRSIG:
|
||||
return true
|
||||
case dns.TypeSIG:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// filterRRSlice filters rrs and removes DNSSEC RRs when do is false. In the returned slice
|
||||
// the TTLs are set to ttl. If dup is true the RRs in rrs are _copied_ into the slice that is
|
||||
// filterRRSlice filters out OPT RRs, and sets all RR TTLs to ttl.
|
||||
// If dup is true the RRs in rrs are _copied_ into the slice that is
|
||||
// returned.
|
||||
func filterRRSlice(rrs []dns.RR, ttl uint32, do, dup bool) []dns.RR {
|
||||
func filterRRSlice(rrs []dns.RR, ttl uint32, dup bool) []dns.RR {
|
||||
j := 0
|
||||
rs := make([]dns.RR, len(rrs))
|
||||
for _, r := range rrs {
|
||||
if !do && isDNSSEC(r) {
|
||||
continue
|
||||
}
|
||||
if r.Header().Rrtype == dns.TypeOPT {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
|
|||
rc := r.Copy() // We potentially modify r, to prevent other plugins from seeing this (r is a pointer), copy r into rc.
|
||||
state := request.Request{W: w, Req: rc}
|
||||
do := state.Do()
|
||||
cd := r.CheckingDisabled
|
||||
ad := r.AuthenticatedData
|
||||
|
||||
zone := plugin.Zones(c.Zones).Matches(state.Name())
|
||||
|
@ -28,17 +29,15 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
|
|||
now := c.now().UTC()
|
||||
server := metrics.WithServer(ctx)
|
||||
|
||||
// On cache miss, if the request has the OPT record and the DO bit set we leave the message as-is. If there isn't a DO bit
|
||||
// set we will modify the request to _add_ one. This means we will always do DNSSEC lookups on cache misses.
|
||||
// When writing to cache, any DNSSEC RRs in the response are written to cache with the response.
|
||||
// When sending a response to a non-DNSSEC client, we remove DNSSEC RRs from the response. We use a 2048 buffer size, which is
|
||||
// less than 4096 (and older default) and more than 1024 which may be too small. We might need to tweaks this
|
||||
// value to be smaller still to prevent UDP fragmentation?
|
||||
// On cache refresh, we will just use the DO bit from the incoming query for the refresh since we key our cache
|
||||
// with the query DO bit. That means two separate cache items for the query DO bit true or false. In the situation
|
||||
// in which upstream doesn't support DNSSEC, the two cache items will effectively be the same. Regardless, any
|
||||
// DNSSEC RRs in the response are written to cache with the response.
|
||||
|
||||
ttl := 0
|
||||
i := c.getIgnoreTTL(now, state, server)
|
||||
if i == nil {
|
||||
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do, ad: ad,
|
||||
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do, ad: ad, cd: cd,
|
||||
nexcept: c.nexcept, pexcept: c.pexcept, wildcardFunc: wildcardFunc(ctx)}
|
||||
return c.doRefresh(ctx, state, crr)
|
||||
}
|
||||
|
@ -46,7 +45,7 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
|
|||
if ttl < 0 {
|
||||
// serve stale behavior
|
||||
if c.verifyStale {
|
||||
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do}
|
||||
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do, cd: cd}
|
||||
cw := newVerifyStaleResponseWriter(crr)
|
||||
ret, err := c.doRefresh(ctx, state, cw)
|
||||
if cw.refreshed {
|
||||
|
@ -73,6 +72,11 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
|
|||
})
|
||||
}
|
||||
|
||||
if c.keepttl {
|
||||
// If keepttl is enabled we fake the current time to the stored
|
||||
// one so that we always get the original TTL
|
||||
now = i.stored
|
||||
}
|
||||
resp := i.toMsg(r, now, do, ad)
|
||||
w.WriteMsg(resp)
|
||||
return dns.RcodeSuccess, nil
|
||||
|
@ -101,9 +105,6 @@ func (c *Cache) doPrefetch(ctx context.Context, state request.Request, cw *Respo
|
|||
}
|
||||
|
||||
func (c *Cache) doRefresh(ctx context.Context, state request.Request, cw dns.ResponseWriter) (int, error) {
|
||||
if !state.Do() {
|
||||
setDo(state.Req)
|
||||
}
|
||||
return plugin.NextOrFailure(c.Name(), c.Next, ctx, cw, state.Req)
|
||||
}
|
||||
|
||||
|
@ -121,7 +122,7 @@ func (c *Cache) Name() string { return "cache" }
|
|||
|
||||
// getIgnoreTTL unconditionally returns an item if it exists in the cache.
|
||||
func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string) *item {
|
||||
k := hash(state.Name(), state.QType())
|
||||
k := hash(state.Name(), state.QType(), state.Do(), state.Req.CheckingDisabled)
|
||||
cacheRequests.WithLabelValues(server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
|
||||
|
||||
if i, ok := c.ncache.Get(k); ok {
|
||||
|
@ -145,7 +146,7 @@ func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string
|
|||
}
|
||||
|
||||
func (c *Cache) exists(state request.Request) *item {
|
||||
k := hash(state.Name(), state.QType())
|
||||
k := hash(state.Name(), state.QType(), state.Do(), state.Req.CheckingDisabled)
|
||||
if i, ok := c.ncache.Get(k); ok {
|
||||
return i.(*item)
|
||||
}
|
||||
|
@ -154,22 +155,3 @@ func (c *Cache) exists(state request.Request) *item {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setDo sets the DO bit and UDP buffer size in the message m.
|
||||
func setDo(m *dns.Msg) {
|
||||
o := m.IsEdns0()
|
||||
if o != nil {
|
||||
o.SetDo()
|
||||
o.SetUDPSize(defaultUDPBufSize)
|
||||
return
|
||||
}
|
||||
|
||||
o = &dns.OPT{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeOPT}}
|
||||
o.SetDo()
|
||||
o.SetUDPSize(defaultUDPBufSize)
|
||||
m.Extra = append(m.Extra, o)
|
||||
}
|
||||
|
||||
// defaultUDPBufsize is the bufsize the cache plugin uses on outgoing requests that don't
|
||||
// have an OPT RR.
|
||||
const defaultUDPBufSize = 2048
|
||||
|
|
|
@ -87,9 +87,9 @@ func (i *item) toMsg(m *dns.Msg, now time.Time, do bool, ad bool) *dns.Msg {
|
|||
m1.Extra = make([]dns.RR, len(i.Extra))
|
||||
|
||||
ttl := uint32(i.ttl(now))
|
||||
m1.Answer = filterRRSlice(i.Answer, ttl, do, true)
|
||||
m1.Ns = filterRRSlice(i.Ns, ttl, do, true)
|
||||
m1.Extra = filterRRSlice(i.Extra, ttl, do, true)
|
||||
m1.Answer = filterRRSlice(i.Answer, ttl, true)
|
||||
m1.Ns = filterRRSlice(i.Ns, ttl, true)
|
||||
m1.Extra = filterRRSlice(i.Extra, ttl, true)
|
||||
|
||||
return m1
|
||||
}
|
||||
|
|
|
@ -240,6 +240,12 @@ func cacheParse(c *caddy.Controller) (*Cache, error) {
|
|||
default:
|
||||
return nil, fmt.Errorf("cache type for disable must be %q or %q", Success, Denial)
|
||||
}
|
||||
case "keepttl":
|
||||
args := c.RemainingArgs()
|
||||
if len(args) != 0 {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
ca.keepttl = true
|
||||
default:
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
|
|
@ -8,33 +8,32 @@
|
|||
//
|
||||
// Implement the Provider interface for a plugin p:
|
||||
//
|
||||
// func (p P) Metadata(ctx context.Context, state request.Request) context.Context {
|
||||
// metadata.SetValueFunc(ctx, "test/something", func() string { return "myvalue" })
|
||||
// return ctx
|
||||
// }
|
||||
// func (p P) Metadata(ctx context.Context, state request.Request) context.Context {
|
||||
// metadata.SetValueFunc(ctx, "test/something", func() string { return "myvalue" })
|
||||
// return ctx
|
||||
// }
|
||||
//
|
||||
// Basic example with caching:
|
||||
//
|
||||
// func (p P) Metadata(ctx context.Context, state request.Request) context.Context {
|
||||
// cached := ""
|
||||
// f := func() string {
|
||||
// if cached != "" {
|
||||
// return cached
|
||||
// }
|
||||
// cached = expensiveFunc()
|
||||
// return cached
|
||||
// }
|
||||
// metadata.SetValueFunc(ctx, "test/something", f)
|
||||
// return ctx
|
||||
// }
|
||||
// func (p P) Metadata(ctx context.Context, state request.Request) context.Context {
|
||||
// cached := ""
|
||||
// f := func() string {
|
||||
// if cached != "" {
|
||||
// return cached
|
||||
// }
|
||||
// cached = expensiveFunc()
|
||||
// return cached
|
||||
// }
|
||||
// metadata.SetValueFunc(ctx, "test/something", f)
|
||||
// return ctx
|
||||
// }
|
||||
//
|
||||
// If you need access to this metadata from another plugin:
|
||||
//
|
||||
// // ...
|
||||
// valueFunc := metadata.ValueFunc(ctx, "test/something")
|
||||
// value := valueFunc()
|
||||
// // use 'value'
|
||||
//
|
||||
// // ...
|
||||
// valueFunc := metadata.ValueFunc(ctx, "test/something")
|
||||
// value := valueFunc()
|
||||
// // use 'value'
|
||||
package metadata
|
||||
|
||||
import (
|
||||
|
|
|
@ -21,6 +21,7 @@ the following metrics are exported:
|
|||
* `coredns_dns_response_size_bytes{server, zone, view, proto}` - response size in bytes.
|
||||
* `coredns_dns_responses_total{server, zone, view, rcode, plugin}` - response per zone, rcode and plugin.
|
||||
* `coredns_dns_https_responses_total{server, status}` - responses per server and http status code.
|
||||
* `coredns_dns_quic_responses_total{server, status}` - responses per server and QUIC application code.
|
||||
* `coredns_plugin_enabled{server, zone, view, name}` - indicates whether a plugin is enabled on per server, zone and view basis.
|
||||
|
||||
Almost each counter has a label `zone` which is the zonename used for the request/response.
|
||||
|
|
|
@ -17,19 +17,21 @@ var (
|
|||
}, []string{"server", "zone", "view", "proto", "family", "type"})
|
||||
|
||||
RequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_duration_seconds",
|
||||
Buckets: plugin.TimeBuckets,
|
||||
Help: "Histogram of the time (in seconds) each request took per zone.",
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_duration_seconds",
|
||||
Buckets: plugin.TimeBuckets,
|
||||
NativeHistogramBucketFactor: plugin.NativeHistogramBucketFactor,
|
||||
Help: "Histogram of the time (in seconds) each request took per zone.",
|
||||
}, []string{"server", "zone", "view"})
|
||||
|
||||
RequestSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_size_bytes",
|
||||
Help: "Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol.",
|
||||
Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3},
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_size_bytes",
|
||||
Help: "Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol.",
|
||||
Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3},
|
||||
NativeHistogramBucketFactor: plugin.NativeHistogramBucketFactor,
|
||||
}, []string{"server", "zone", "view", "proto"})
|
||||
|
||||
RequestDo = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
|
@ -40,11 +42,12 @@ var (
|
|||
}, []string{"server", "zone", "view"})
|
||||
|
||||
ResponseSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "response_size_bytes",
|
||||
Help: "Size of the returned response in bytes.",
|
||||
Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3},
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "response_size_bytes",
|
||||
Help: "Size of the returned response in bytes.",
|
||||
Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3},
|
||||
NativeHistogramBucketFactor: plugin.NativeHistogramBucketFactor,
|
||||
}, []string{"server", "zone", "view", "proto"})
|
||||
|
||||
ResponseRcode = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
|
@ -72,6 +75,13 @@ var (
|
|||
Name: "https_responses_total",
|
||||
Help: "Counter of DoH responses per server and http status code.",
|
||||
}, []string{"server", "status"})
|
||||
|
||||
QUICResponsesCount = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "quic_responses_total",
|
||||
Help: "Counter of DoQ responses per server and QUIC application code.",
|
||||
}, []string{"server", "status"})
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -48,5 +48,6 @@ const (
|
|||
// MinimalDefaultTTL is the absolute lowest TTL we use in CoreDNS.
|
||||
MinimalDefaultTTL = 5 * time.Second
|
||||
// MaximumDefaulTTL is the maximum TTL was use on RRsets in CoreDNS.
|
||||
// TODO: rename as MaximumDefaultTTL
|
||||
MaximumDefaulTTL = 1 * time.Hour
|
||||
)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
@ -16,18 +17,30 @@ const MimeType = "application/dns-message"
|
|||
// Path is the URL path that should be used.
|
||||
const Path = "/dns-query"
|
||||
|
||||
// NewRequest returns a new DoH request given a method, URL (without any paths, so exclude /dns-query) and dns.Msg.
|
||||
// NewRequest returns a new DoH request given a HTTP method, URL and dns.Msg.
|
||||
//
|
||||
// The URL should not have a path, so please exclude /dns-query. The URL will
|
||||
// be prefixed with https:// by default, unless it's already prefixed with
|
||||
// either http:// or https://.
|
||||
func NewRequest(method, url string, m *dns.Msg) (*http.Request, error) {
|
||||
buf, err := m.Pack()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") {
|
||||
url = fmt.Sprintf("https://%s", url)
|
||||
}
|
||||
|
||||
switch method {
|
||||
case http.MethodGet:
|
||||
b64 := base64.RawURLEncoding.EncodeToString(buf)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "https://"+url+Path+"?dns="+b64, nil)
|
||||
req, err := http.NewRequest(
|
||||
http.MethodGet,
|
||||
fmt.Sprintf("%s%s?dns=%s", url, Path, b64),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return req, err
|
||||
}
|
||||
|
@ -37,7 +50,11 @@ func NewRequest(method, url string, m *dns.Msg) (*http.Request, error) {
|
|||
return req, nil
|
||||
|
||||
case http.MethodPost:
|
||||
req, err := http.NewRequest(http.MethodPost, "https://"+url+Path+"?bla=foo:443", bytes.NewReader(buf))
|
||||
req, err := http.NewRequest(
|
||||
http.MethodPost,
|
||||
fmt.Sprintf("%s%s?bla=foo:443", url, Path),
|
||||
bytes.NewReader(buf),
|
||||
)
|
||||
if err != nil {
|
||||
return req, err
|
||||
}
|
||||
|
|
|
@ -36,8 +36,7 @@ func SupportedOption(option uint16) bool {
|
|||
|
||||
// Version checks the EDNS version in the request. If error
|
||||
// is nil everything is OK and we can invoke the plugin. If non-nil, the
|
||||
// returned Msg is valid to be returned to the client (and should). For some
|
||||
// reason this response should not contain a question RR in the question section.
|
||||
// returned Msg is valid to be returned to the client (and should).
|
||||
func Version(req *dns.Msg) (*dns.Msg, error) {
|
||||
opt := req.IsEdns0()
|
||||
if opt == nil {
|
||||
|
@ -48,8 +47,6 @@ func Version(req *dns.Msg) (*dns.Msg, error) {
|
|||
}
|
||||
m := new(dns.Msg)
|
||||
m.SetReply(req)
|
||||
// zero out question section, wtf.
|
||||
m.Question = nil
|
||||
|
||||
o := new(dns.OPT)
|
||||
o.Hdr.Name = "."
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"io"
|
||||
golog "log"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// D controls whether we should output debug logs. If true, we do, once set
|
||||
|
@ -21,30 +21,22 @@ import (
|
|||
var D = &d{}
|
||||
|
||||
type d struct {
|
||||
on bool
|
||||
sync.RWMutex
|
||||
on atomic.Bool
|
||||
}
|
||||
|
||||
// Set enables debug logging.
|
||||
func (d *d) Set() {
|
||||
d.Lock()
|
||||
d.on = true
|
||||
d.Unlock()
|
||||
d.on.Store(true)
|
||||
}
|
||||
|
||||
// Clear disables debug logging.
|
||||
func (d *d) Clear() {
|
||||
d.Lock()
|
||||
d.on = false
|
||||
d.Unlock()
|
||||
d.on.Store(false)
|
||||
}
|
||||
|
||||
// Value returns if debug logging is enabled.
|
||||
func (d *d) Value() bool {
|
||||
d.RLock()
|
||||
b := d.on
|
||||
d.RUnlock()
|
||||
return b
|
||||
return d.on.Load()
|
||||
}
|
||||
|
||||
// logf calls log.Printf prefixed with level.
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
// Package nonwriter implements a dns.ResponseWriter that never writes, but captures the dns.Msg being written.
|
||||
package nonwriter
|
||||
|
||||
import (
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// Writer is a type of ResponseWriter that captures the message, but never writes to the client.
|
||||
type Writer struct {
|
||||
dns.ResponseWriter
|
||||
Msg *dns.Msg
|
||||
}
|
||||
|
||||
// New makes and returns a new NonWriter.
|
||||
func New(w dns.ResponseWriter) *Writer { return &Writer{ResponseWriter: w} }
|
||||
|
||||
// WriteMsg records the message, but doesn't write it itself.
|
||||
func (w *Writer) WriteMsg(res *dns.Msg) error {
|
||||
w.Msg = res
|
||||
return nil
|
||||
}
|
|
@ -33,6 +33,14 @@ func HostPortOrFile(s ...string) ([]string, error) {
|
|||
var servers []string
|
||||
for _, h := range s {
|
||||
trans, host := Transport(h)
|
||||
if len(host) == 0 {
|
||||
return servers, fmt.Errorf("invalid address: %q", h)
|
||||
}
|
||||
|
||||
if trans == transport.UNIX {
|
||||
servers = append(servers, trans+"://"+host)
|
||||
continue
|
||||
}
|
||||
|
||||
addr, _, err := net.SplitHostPort(host)
|
||||
|
||||
|
@ -53,6 +61,8 @@ func HostPortOrFile(s ...string) ([]string, error) {
|
|||
ss = net.JoinHostPort(host, transport.Port)
|
||||
case transport.TLS:
|
||||
ss = transport.TLS + "://" + net.JoinHostPort(host, transport.TLSPort)
|
||||
case transport.QUIC:
|
||||
ss = transport.QUIC + "://" + net.JoinHostPort(host, transport.QUICPort)
|
||||
case transport.GRPC:
|
||||
ss = transport.GRPC + "://" + net.JoinHostPort(host, transport.GRPCPort)
|
||||
case transport.HTTPS:
|
||||
|
@ -89,7 +99,7 @@ func tryFile(s string) ([]string, error) {
|
|||
|
||||
servers := []string{}
|
||||
for _, s := range c.Servers {
|
||||
servers = append(servers, net.JoinHostPort(s, c.Port))
|
||||
servers = append(servers, net.JoinHostPort(stripZone(s), c.Port))
|
||||
}
|
||||
return servers, nil
|
||||
}
|
||||
|
|
|
@ -19,6 +19,10 @@ func Transport(s string) (trans string, addr string) {
|
|||
s = s[len(transport.DNS+"://"):]
|
||||
return transport.DNS, s
|
||||
|
||||
case strings.HasPrefix(s, transport.QUIC+"://"):
|
||||
s = s[len(transport.QUIC+"://"):]
|
||||
return transport.QUIC, s
|
||||
|
||||
case strings.HasPrefix(s, transport.GRPC+"://"):
|
||||
s = s[len(transport.GRPC+"://"):]
|
||||
return transport.GRPC, s
|
||||
|
@ -27,6 +31,9 @@ func Transport(s string) (trans string, addr string) {
|
|||
s = s[len(transport.HTTPS+"://"):]
|
||||
|
||||
return transport.HTTPS, s
|
||||
case strings.HasPrefix(s, transport.UNIX+"://"):
|
||||
s = s[len(transport.UNIX+"://"):]
|
||||
return transport.UNIX, s
|
||||
}
|
||||
|
||||
return transport.DNS, s
|
||||
|
|
|
@ -4,8 +4,10 @@ package transport
|
|||
const (
|
||||
DNS = "dns"
|
||||
TLS = "tls"
|
||||
QUIC = "quic"
|
||||
GRPC = "grpc"
|
||||
HTTPS = "https"
|
||||
UNIX = "unix"
|
||||
)
|
||||
|
||||
// Port numbers for the various transports.
|
||||
|
@ -14,6 +16,8 @@ const (
|
|||
Port = "53"
|
||||
// TLSPort is the default port for DNS-over-TLS.
|
||||
TLSPort = "853"
|
||||
// QUICPort is the default port for DNS-over-QUIC.
|
||||
QUICPort = "853"
|
||||
// GRPCPort is the default port for DNS-over-gRPC.
|
||||
GRPCPort = "443"
|
||||
// HTTPSPort is the default port for DNS-over-HTTPS.
|
||||
|
|
|
@ -108,5 +108,9 @@ var TimeBuckets = prometheus.ExponentialBuckets(0.00025, 2, 16) // from 0.25ms t
|
|||
// SlimTimeBuckets is low cardinality set of duration buckets.
|
||||
var SlimTimeBuckets = prometheus.ExponentialBuckets(0.00025, 10, 5) // from 0.25ms to 2.5 seconds
|
||||
|
||||
// NativeHistogramBucketFactor controls the resolution of Prometheus native histogram buckets.
|
||||
// See: https://pkg.go.dev/github.com/prometheus/client_golang@v1.19.0/prometheus#section-readme
|
||||
var NativeHistogramBucketFactor = 1.05
|
||||
|
||||
// ErrOnce is returned when a plugin doesn't support multiple setups per server.
|
||||
var ErrOnce = errors.New("this plugin can only be used once per Server Block")
|
||||
|
|
|
@ -3,6 +3,7 @@ package test
|
|||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TempFile will create a temporary file on disk and returns the name and a cleanup function to remove it later.
|
||||
|
@ -18,12 +19,9 @@ func TempFile(dir, content string) (string, func(), error) {
|
|||
return f.Name(), rmFunc, nil
|
||||
}
|
||||
|
||||
// WritePEMFiles creates a tmp dir with ca.pem, cert.pem, and key.pem and the func to remove it
|
||||
func WritePEMFiles(dir string) (string, func(), error) {
|
||||
tempDir, err := os.MkdirTemp(dir, "go-test-pemfiles")
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
// WritePEMFiles creates a tmp dir with ca.pem, cert.pem, and key.pem
|
||||
func WritePEMFiles(t *testing.T) (string, error) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
data := `-----BEGIN CERTIFICATE-----
|
||||
MIIC9zCCAd+gAwIBAgIJALGtqdMzpDemMA0GCSqGSIb3DQEBCwUAMBIxEDAOBgNV
|
||||
|
@ -45,7 +43,7 @@ I1rs/VUGKzcJGVIWbHrgjP68CTStGAvKgbsTqw7aLXTSqtPw88N9XVSyRg==
|
|||
-----END CERTIFICATE-----`
|
||||
path := filepath.Join(tempDir, "ca.pem")
|
||||
if err := os.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
return "", nil, err
|
||||
return "", err
|
||||
}
|
||||
data = `-----BEGIN CERTIFICATE-----
|
||||
MIICozCCAYsCCQCRlf5BrvPuqjANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDDAdr
|
||||
|
@ -65,8 +63,8 @@ zhDEPP4FhY+Sz+y1yWirphl7A1aZwhXVPcfWIGqpQ3jzNwUeocbH27kuLh+U4hQo
|
|||
qeg10RdFnw==
|
||||
-----END CERTIFICATE-----`
|
||||
path = filepath.Join(tempDir, "cert.pem")
|
||||
if err = os.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
return "", nil, err
|
||||
if err := os.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data = `-----BEGIN RSA PRIVATE KEY-----
|
||||
|
@ -97,10 +95,9 @@ E/WObVJXDnBdViu0L9abE9iaTToBVri4cmlDlZagLuKVR+TFTCN/DSlVZTDkqkLI
|
|||
8chzqtkH6b2b2R73hyRysWjsomys34ma3mEEPTX/aXeAF2MSZ/EWT9yL
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
path = filepath.Join(tempDir, "key.pem")
|
||||
if err = os.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
return "", nil, err
|
||||
if err := os.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
rmFunc := func() { os.RemoveAll(tempDir) }
|
||||
return tempDir, rmFunc, nil
|
||||
return tempDir, nil
|
||||
}
|
||||
|
|
|
@ -29,15 +29,19 @@ func (p RRSet) Less(i, j int) bool { return p[i].String() < p[j].String() }
|
|||
// Case represents a test case that encapsulates various data from a query and response.
|
||||
// Note that is the TTL of a record is 303 we don't compare it with the TTL.
|
||||
type Case struct {
|
||||
Qname string
|
||||
Qtype uint16
|
||||
Rcode int
|
||||
Do bool
|
||||
AuthenticatedData bool
|
||||
Answer []dns.RR
|
||||
Ns []dns.RR
|
||||
Extra []dns.RR
|
||||
Error error
|
||||
Qname string
|
||||
Qtype uint16
|
||||
Rcode int
|
||||
Do bool
|
||||
CheckingDisabled bool
|
||||
RecursionAvailable bool
|
||||
AuthenticatedData bool
|
||||
Authoritative bool
|
||||
Truncated bool
|
||||
Answer []dns.RR
|
||||
Ns []dns.RR
|
||||
Extra []dns.RR
|
||||
Error error
|
||||
}
|
||||
|
||||
// Msg returns a *dns.Msg embedded in c.
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
//
|
||||
// result := Scrape("http://localhost:9153/metrics")
|
||||
// v := MetricValue("coredns_cache_capacity", result)
|
||||
//
|
||||
package test
|
||||
|
||||
import (
|
||||
|
@ -217,7 +216,7 @@ func makeBuckets(m *dto.Metric) map[string]string {
|
|||
|
||||
func fetchMetricFamilies(url string, ch chan<- *dto.MetricFamily) {
|
||||
defer close(ch)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
sudo: required
|
||||
|
||||
language: go
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
os:
|
||||
- linux
|
||||
- windows
|
||||
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.x
|
||||
|
||||
install:
|
||||
- go get github.com/gobwas/pool
|
||||
- go get github.com/gobwas/httphead
|
||||
|
||||
script:
|
||||
- if [ "$TRAVIS_OS_NAME" = "windows" ]; then go test ./...; fi
|
||||
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then make test autobahn; fi
|
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017-2018 Sergey Kamardin <gobwas@gmail.com>
|
||||
Copyright (c) 2017-2021 Sergey Kamardin <gobwas@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
@ -13,15 +13,22 @@ bin/gocovmerge:
|
|||
|
||||
.PHONY: autobahn
|
||||
autobahn: clean bin/reporter
|
||||
./autobahn/script/test.sh --build
|
||||
./autobahn/script/test.sh --build --follow-logs
|
||||
bin/reporter $(PWD)/autobahn/report/index.json
|
||||
|
||||
.PHONY: autobahn/report
|
||||
autobahn/report: bin/reporter
|
||||
./bin/reporter -http localhost:5555 ./autobahn/report/index.json
|
||||
|
||||
test:
|
||||
go test -coverprofile=ws.coverage .
|
||||
go test -coverprofile=wsutil.coverage ./wsutil
|
||||
go test -coverprofile=wsfalte.coverage ./wsflate
|
||||
# No statemenets to cover in ./tests (there are only tests).
|
||||
go test ./tests
|
||||
|
||||
cover: bin/gocovmerge test autobahn
|
||||
bin/gocovmerge ws.coverage wsutil.coverage autobahn/report/server.coverage > total.coverage
|
||||
bin/gocovmerge ws.coverage wsutil.coverage wsflate.coverage autobahn/report/server.coverage > total.coverage
|
||||
|
||||
benchcmp: BENCH_BRANCH=$(shell git rev-parse --abbrev-ref HEAD)
|
||||
benchcmp: BENCH_OLD:=$(shell mktemp -t old.XXXX)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# ws
|
||||
|
||||
[![GoDoc][godoc-image]][godoc-url]
|
||||
[![Travis][travis-image]][travis-url]
|
||||
[![CI][ci-badge]][ci-url]
|
||||
|
||||
> [RFC6455][rfc-url] WebSocket implementation in Go.
|
||||
|
||||
|
@ -351,10 +351,191 @@ func main() {
|
|||
}
|
||||
```
|
||||
|
||||
# Compression
|
||||
|
||||
There is a `ws/wsflate` package to support [Permessage-Deflate Compression
|
||||
Extension][rfc-pmce].
|
||||
|
||||
It provides minimalistic I/O wrappers to be used in conjunction with any
|
||||
deflate implementation (for example, the standard library's
|
||||
[compress/flate][compress/flate]).
|
||||
|
||||
It is also compatible with `wsutil`'s reader and writer by providing
|
||||
`wsflate.MessageState` type, which implements `wsutil.SendExtension` and
|
||||
`wsutil.RecvExtension` interfaces.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/gobwas/ws"
|
||||
"github.com/gobwas/ws/wsflate"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ln, err := net.Listen("tcp", "localhost:8080")
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
e := wsflate.Extension{
|
||||
// We are using default parameters here since we use
|
||||
// wsflate.{Compress,Decompress}Frame helpers below in the code.
|
||||
// This assumes that we use standard compress/flate package as flate
|
||||
// implementation.
|
||||
Parameters: wsflate.DefaultParameters,
|
||||
}
|
||||
u := ws.Upgrader{
|
||||
Negotiate: e.Negotiate,
|
||||
}
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Reset extension after previous upgrades.
|
||||
e.Reset()
|
||||
|
||||
_, err = u.Upgrade(conn)
|
||||
if err != nil {
|
||||
log.Printf("upgrade error: %s", err)
|
||||
continue
|
||||
}
|
||||
if _, ok := e.Accepted(); !ok {
|
||||
log.Printf("didn't negotiate compression for %s", conn.RemoteAddr())
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer conn.Close()
|
||||
for {
|
||||
frame, err := ws.ReadFrame(conn)
|
||||
if err != nil {
|
||||
// Handle error.
|
||||
return
|
||||
}
|
||||
|
||||
frame = ws.UnmaskFrameInPlace(frame)
|
||||
|
||||
if wsflate.IsCompressed(frame.Header) {
|
||||
// Note that even after successful negotiation of
|
||||
// compression extension, both sides are able to send
|
||||
// non-compressed messages.
|
||||
frame, err = wsflate.DecompressFrame(frame)
|
||||
if err != nil {
|
||||
// Handle error.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Do something with frame...
|
||||
|
||||
ack := ws.NewTextFrame([]byte("this is an acknowledgement"))
|
||||
|
||||
// Compress response unconditionally.
|
||||
ack, err = wsflate.CompressFrame(ack)
|
||||
if err != nil {
|
||||
// Handle error.
|
||||
return
|
||||
}
|
||||
if err = ws.WriteFrame(conn, ack); err != nil {
|
||||
// Handle error.
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can use compression with `wsutil` package this way:
|
||||
|
||||
```go
|
||||
// Upgrade somehow and negotiate compression to get the conn...
|
||||
|
||||
// Initialize flate reader. We are using nil as a source io.Reader because
|
||||
// we will Reset() it in the message i/o loop below.
|
||||
fr := wsflate.NewReader(nil, func(r io.Reader) wsflate.Decompressor {
|
||||
return flate.NewReader(r)
|
||||
})
|
||||
// Initialize flate writer. We are using nil as a destination io.Writer
|
||||
// because we will Reset() it in the message i/o loop below.
|
||||
fw := wsflate.NewWriter(nil, func(w io.Writer) wsflate.Compressor {
|
||||
f, _ := flate.NewWriter(w, 9)
|
||||
return f
|
||||
})
|
||||
|
||||
// Declare compression message state variable.
|
||||
//
|
||||
// It has two goals:
|
||||
// - Allow users to check whether received message is compressed or not.
|
||||
// - Help wsutil.Reader and wsutil.Writer to set/unset appropriate
|
||||
// WebSocket header bits while writing next frame to the wire (it
|
||||
// implements wsutil.RecvExtension and wsutil.SendExtension).
|
||||
var msg wsflate.MessageState
|
||||
|
||||
// Initialize WebSocket reader as previously.
|
||||
// Please note the use of Reader.Extensions field as well as
|
||||
// of ws.StateExtended flag.
|
||||
rd := &wsutil.Reader{
|
||||
Source: conn,
|
||||
State: ws.StateServerSide | ws.StateExtended,
|
||||
Extensions: []wsutil.RecvExtension{
|
||||
&msg,
|
||||
},
|
||||
}
|
||||
|
||||
// Initialize WebSocket writer with ws.StateExtended flag as well.
|
||||
wr := wsutil.NewWriter(conn, ws.StateServerSide|ws.StateExtended, 0)
|
||||
// Use the message state as wsutil.SendExtension.
|
||||
wr.SetExtensions(&msg)
|
||||
|
||||
for {
|
||||
h, err := rd.NextFrame()
|
||||
if err != nil {
|
||||
// handle error.
|
||||
}
|
||||
if h.OpCode.IsControl() {
|
||||
// handle control frame.
|
||||
}
|
||||
if !msg.IsCompressed() {
|
||||
// handle uncompressed frame (skipped for the sake of example
|
||||
// simplicity).
|
||||
}
|
||||
|
||||
// Reset the writer to echo same op code.
|
||||
wr.Reset(h.OpCode)
|
||||
|
||||
// Reset both flate reader and writer to start the new round of i/o.
|
||||
fr.Reset(rd)
|
||||
fw.Reset(wr)
|
||||
|
||||
// Copy whole message from reader to writer decompressing it and
|
||||
// compressing again.
|
||||
if _, err := io.Copy(fw, fr); err != nil {
|
||||
// handle error.
|
||||
}
|
||||
// Flush any remaining buffers from flate writer to WebSocket writer.
|
||||
if err := fw.Close(); err != nil {
|
||||
// handle error.
|
||||
}
|
||||
// Flush the whole WebSocket message to the wire.
|
||||
if err := wr.Flush(); err != nil {
|
||||
// handle error.
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
[rfc-url]: https://tools.ietf.org/html/rfc6455
|
||||
[rfc-pmce]: https://tools.ietf.org/html/rfc7692#section-7
|
||||
[godoc-image]: https://godoc.org/github.com/gobwas/ws?status.svg
|
||||
[godoc-url]: https://godoc.org/github.com/gobwas/ws
|
||||
[travis-image]: https://travis-ci.org/gobwas/ws.svg?branch=master
|
||||
[travis-url]: https://travis-ci.org/gobwas/ws
|
||||
[compress/flate]: https://golang.org/pkg/compress/flate/
|
||||
[ci-badge]: https://github.com/gobwas/ws/workflows/CI/badge.svg
|
||||
[ci-url]: https://github.com/gobwas/ws/actions?query=workflow%3ACI
|
||||
|
|
|
@ -36,7 +36,7 @@ func Cipher(payload []byte, mask [4]byte, offset int) {
|
|||
}
|
||||
|
||||
// NOTE: we use here binary.LittleEndian regardless of what is real
|
||||
// endianess on machine is. To do so, we have to use binary.LittleEndian in
|
||||
// endianness on machine is. To do so, we have to use binary.LittleEndian in
|
||||
// the masking loop below as well.
|
||||
var (
|
||||
m = binary.LittleEndian.Uint32(mask[:])
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -145,7 +146,7 @@ type Dialer struct {
|
|||
func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *bufio.Reader, hs Handshake, err error) {
|
||||
u, err := url.ParseRequestURI(urlstr)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, nil, hs, err
|
||||
}
|
||||
|
||||
// Prepare context to dial with. Initially it is the same as original, but
|
||||
|
@ -163,7 +164,7 @@ func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *buf
|
|||
}
|
||||
}
|
||||
if conn, err = d.dial(dialctx, u); err != nil {
|
||||
return
|
||||
return conn, nil, hs, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -189,7 +190,7 @@ func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *buf
|
|||
|
||||
br, hs, err = d.Upgrade(conn, u)
|
||||
|
||||
return
|
||||
return conn, br, hs, err
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -204,7 +205,7 @@ func tlsDefaultConfig() *tls.Config {
|
|||
return &tlsEmptyConfig
|
||||
}
|
||||
|
||||
func hostport(host string, defaultPort string) (hostname, addr string) {
|
||||
func hostport(host, defaultPort string) (hostname, addr string) {
|
||||
var (
|
||||
colon = strings.LastIndexByte(host, ':')
|
||||
bracket = strings.IndexByte(host, ']')
|
||||
|
@ -228,7 +229,7 @@ func (d Dialer) dial(ctx context.Context, u *url.URL) (conn net.Conn, err error)
|
|||
hostname, addr := hostport(u.Host, ":443")
|
||||
conn, err = dial(ctx, "tcp", addr)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
tlsClient := d.TLSClient
|
||||
if tlsClient == nil {
|
||||
|
@ -241,7 +242,7 @@ func (d Dialer) dial(ctx context.Context, u *url.URL) (conn net.Conn, err error)
|
|||
if wrap := d.WrapConn; wrap != nil {
|
||||
conn = wrap(conn)
|
||||
}
|
||||
return
|
||||
return conn, err
|
||||
}
|
||||
|
||||
func (d Dialer) tlsClient(conn net.Conn, hostname string) net.Conn {
|
||||
|
@ -310,29 +311,29 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha
|
|||
initNonce(nonce)
|
||||
|
||||
httpWriteUpgradeRequest(bw, u, nonce, d.Protocols, d.Extensions, d.Header)
|
||||
if err = bw.Flush(); err != nil {
|
||||
return
|
||||
if err := bw.Flush(); err != nil {
|
||||
return br, hs, err
|
||||
}
|
||||
|
||||
// Read HTTP status line like "HTTP/1.1 101 Switching Protocols".
|
||||
sl, err := readLine(br)
|
||||
if err != nil {
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
// Begin validation of the response.
|
||||
// See https://tools.ietf.org/html/rfc6455#section-4.2.2
|
||||
// Parse request line data like HTTP version, uri and method.
|
||||
resp, err := httpParseResponseLine(sl)
|
||||
if err != nil {
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
// Even if RFC says "1.1 or higher" without mentioning the part of the
|
||||
// version, we apply it only to minor part.
|
||||
if resp.major != 1 || resp.minor < 1 {
|
||||
err = ErrHandshakeBadProtocol
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
if resp.status != 101 {
|
||||
if resp.status != http.StatusSwitchingProtocols {
|
||||
err = StatusError(resp.status)
|
||||
if onStatusError := d.OnStatusError; onStatusError != nil {
|
||||
// Invoke callback with multireader of status-line bytes br.
|
||||
|
@ -344,7 +345,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha
|
|||
),
|
||||
)
|
||||
}
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
// If response status is 101 then we expect all technical headers to be
|
||||
// valid. If not, then we stop processing response without giving user
|
||||
|
@ -355,7 +356,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha
|
|||
line, e := readLine(br)
|
||||
if e != nil {
|
||||
err = e
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
if len(line) == 0 {
|
||||
// Blank line, no more lines to read.
|
||||
|
@ -365,7 +366,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha
|
|||
k, v, ok := httpParseHeaderLine(line)
|
||||
if !ok {
|
||||
err = ErrMalformedResponse
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
|
||||
switch btsToString(k) {
|
||||
|
@ -373,7 +374,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha
|
|||
headerSeen |= headerSeenUpgrade
|
||||
if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) {
|
||||
err = ErrHandshakeBadUpgrade
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
|
||||
case headerConnectionCanonical:
|
||||
|
@ -384,14 +385,14 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha
|
|||
// multiple token. But in response it must contains exactly one.
|
||||
if !bytes.Equal(v, specHeaderValueConnection) && !bytes.EqualFold(v, specHeaderValueConnection) {
|
||||
err = ErrHandshakeBadConnection
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
|
||||
case headerSecAcceptCanonical:
|
||||
headerSeen |= headerSeenSecAccept
|
||||
if !checkAcceptFromNonce(v, nonce) {
|
||||
err = ErrHandshakeBadSecAccept
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
|
||||
case headerSecProtocolCanonical:
|
||||
|
@ -409,20 +410,20 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha
|
|||
// Server echoed subprotocol that is not present in client
|
||||
// requested protocols.
|
||||
err = ErrHandshakeBadSubProtocol
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
|
||||
case headerSecExtensionsCanonical:
|
||||
hs.Extensions, err = matchSelectedExtensions(v, d.Extensions, hs.Extensions)
|
||||
if err != nil {
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
|
||||
default:
|
||||
if onHeader := d.OnHeader; onHeader != nil {
|
||||
if e := onHeader(k, v); e != nil {
|
||||
err = e
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -439,7 +440,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha
|
|||
panic("unknown headers state")
|
||||
}
|
||||
}
|
||||
return
|
||||
return br, hs, err
|
||||
}
|
||||
|
||||
// PutReader returns bufio.Reader instance to the inner reuse pool.
|
||||
|
@ -474,10 +475,19 @@ func matchSelectedExtensions(selected []byte, wanted, received []httphead.Option
|
|||
index = -1
|
||||
match := func() (ok bool) {
|
||||
for _, want := range wanted {
|
||||
if option.Equal(want) {
|
||||
// A server accepts one or more extensions by including a
|
||||
// |Sec-WebSocket-Extensions| header field containing one or more
|
||||
// extensions that were requested by the client.
|
||||
//
|
||||
// The interpretation of any extension parameters, and what
|
||||
// constitutes a valid response by a server to a requested set of
|
||||
// parameters by a client, will be defined by each such extension.
|
||||
if bytes.Equal(option.Name, want.Name) {
|
||||
// Check parsed extension to be present in client
|
||||
// requested extensions. We move matched extension
|
||||
// from client list to avoid allocation.
|
||||
// from client list to avoid allocation of httphead.Option.Name,
|
||||
// httphead.Option.Parameters have to be copied from the header
|
||||
want.Parameters, _ = option.Parameters.Copy(make([]byte, option.Parameters.Size()))
|
||||
received = append(received, want)
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build go1.8
|
||||
// +build go1.8
|
||||
|
||||
package ws
|
||||
|
|
|
@ -11,70 +11,70 @@ Upgrade to WebSocket (or WebSocket handshake) can be done in two ways.
|
|||
|
||||
The first way is to use `net/http` server:
|
||||
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, _, _, err := ws.UpgradeHTTP(r, w)
|
||||
})
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, _, _, err := ws.UpgradeHTTP(r, w)
|
||||
})
|
||||
|
||||
The second and much more efficient way is so-called "zero-copy upgrade". It
|
||||
avoids redundant allocations and copying of not used headers or other request
|
||||
data. User decides by himself which data should be copied.
|
||||
|
||||
ln, err := net.Listen("tcp", ":8080")
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
ln, err := net.Listen("tcp", ":8080")
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
handshake, err := ws.Upgrade(conn)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
handshake, err := ws.Upgrade(conn)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
For customization details see `ws.Upgrader` documentation.
|
||||
|
||||
After WebSocket handshake you can work with connection in multiple ways.
|
||||
That is, `ws` does not force the only one way of how to work with WebSocket:
|
||||
|
||||
header, err := ws.ReadHeader(conn)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
header, err := ws.ReadHeader(conn)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
|
||||
buf := make([]byte, header.Length)
|
||||
_, err := io.ReadFull(conn, buf)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
buf := make([]byte, header.Length)
|
||||
_, err := io.ReadFull(conn, buf)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
|
||||
resp := ws.NewBinaryFrame([]byte("hello, world!"))
|
||||
if err := ws.WriteFrame(conn, frame); err != nil {
|
||||
// handle err
|
||||
}
|
||||
resp := ws.NewBinaryFrame([]byte("hello, world!"))
|
||||
if err := ws.WriteFrame(conn, frame); err != nil {
|
||||
// handle err
|
||||
}
|
||||
|
||||
As you can see, it stream friendly:
|
||||
|
||||
const N = 42
|
||||
const N = 42
|
||||
|
||||
ws.WriteHeader(ws.Header{
|
||||
Fin: true,
|
||||
Length: N,
|
||||
OpCode: ws.OpBinary,
|
||||
})
|
||||
ws.WriteHeader(ws.Header{
|
||||
Fin: true,
|
||||
Length: N,
|
||||
OpCode: ws.OpBinary,
|
||||
})
|
||||
|
||||
io.CopyN(conn, rand.Reader, N)
|
||||
io.CopyN(conn, rand.Reader, N)
|
||||
|
||||
Or:
|
||||
|
||||
header, err := ws.ReadHeader(conn)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
header, err := ws.ReadHeader(conn)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
|
||||
io.CopyN(ioutil.Discard, conn, header.Length)
|
||||
io.CopyN(ioutil.Discard, conn, header.Length)
|
||||
|
||||
For more info see the documentation.
|
||||
*/
|
||||
|
|
|
@ -2,12 +2,12 @@ package ws
|
|||
|
||||
// RejectOption represents an option used to control the way connection is
|
||||
// rejected.
|
||||
type RejectOption func(*rejectConnectionError)
|
||||
type RejectOption func(*ConnectionRejectedError)
|
||||
|
||||
// RejectionReason returns an option that makes connection to be rejected with
|
||||
// given reason.
|
||||
func RejectionReason(reason string) RejectOption {
|
||||
return func(err *rejectConnectionError) {
|
||||
return func(err *ConnectionRejectedError) {
|
||||
err.reason = reason
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ func RejectionReason(reason string) RejectOption {
|
|||
// RejectionStatus returns an option that makes connection to be rejected with
|
||||
// given HTTP status code.
|
||||
func RejectionStatus(code int) RejectOption {
|
||||
return func(err *rejectConnectionError) {
|
||||
return func(err *ConnectionRejectedError) {
|
||||
err.code = code
|
||||
}
|
||||
}
|
||||
|
@ -23,32 +23,37 @@ func RejectionStatus(code int) RejectOption {
|
|||
// RejectionHeader returns an option that makes connection to be rejected with
|
||||
// given HTTP headers.
|
||||
func RejectionHeader(h HandshakeHeader) RejectOption {
|
||||
return func(err *rejectConnectionError) {
|
||||
return func(err *ConnectionRejectedError) {
|
||||
err.header = h
|
||||
}
|
||||
}
|
||||
|
||||
// RejectConnectionError constructs an error that could be used to control the way
|
||||
// handshake is rejected by Upgrader.
|
||||
// RejectConnectionError constructs an error that could be used to control the
|
||||
// way handshake is rejected by Upgrader.
|
||||
func RejectConnectionError(options ...RejectOption) error {
|
||||
err := new(rejectConnectionError)
|
||||
err := new(ConnectionRejectedError)
|
||||
for _, opt := range options {
|
||||
opt(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// rejectConnectionError represents a rejection of upgrade error.
|
||||
// ConnectionRejectedError represents a rejection of connection during
|
||||
// WebSocket handshake error.
|
||||
//
|
||||
// It can be returned by Upgrader's On* hooks to control the way WebSocket
|
||||
// handshake is rejected.
|
||||
type rejectConnectionError struct {
|
||||
// It can be returned by Upgrader's On* hooks to indicate that WebSocket
|
||||
// handshake should be rejected.
|
||||
type ConnectionRejectedError struct {
|
||||
reason string
|
||||
code int
|
||||
header HandshakeHeader
|
||||
}
|
||||
|
||||
// Error implements error interface.
|
||||
func (r *rejectConnectionError) Error() string {
|
||||
func (r *ConnectionRejectedError) Error() string {
|
||||
return r.reason
|
||||
}
|
||||
|
||||
func (r *ConnectionRejectedError) StatusCode() int {
|
||||
return r.code
|
||||
}
|
||||
|
|
|
@ -206,6 +206,28 @@ func (h Header) Rsv2() bool { return h.Rsv&bit6 != 0 }
|
|||
// Rsv3 reports whether the header has third rsv bit set.
|
||||
func (h Header) Rsv3() bool { return h.Rsv&bit7 != 0 }
|
||||
|
||||
// Rsv creates rsv byte representation from bits.
|
||||
func Rsv(r1, r2, r3 bool) (rsv byte) {
|
||||
if r1 {
|
||||
rsv |= bit5
|
||||
}
|
||||
if r2 {
|
||||
rsv |= bit6
|
||||
}
|
||||
if r3 {
|
||||
rsv |= bit7
|
||||
}
|
||||
return rsv
|
||||
}
|
||||
|
||||
// RsvBits returns rsv bits from bytes representation.
|
||||
func RsvBits(rsv byte) (r1, r2, r3 bool) {
|
||||
r1 = rsv&bit5 != 0
|
||||
r2 = rsv&bit6 != 0
|
||||
r3 = rsv&bit7 != 0
|
||||
return r1, r2, r3
|
||||
}
|
||||
|
||||
// Frame represents websocket frame.
|
||||
// See https://tools.ietf.org/html/rfc6455#section-5.2
|
||||
type Frame struct {
|
||||
|
@ -319,6 +341,29 @@ func MaskFrameInPlace(f Frame) Frame {
|
|||
return MaskFrameInPlaceWith(f, NewMask())
|
||||
}
|
||||
|
||||
var zeroMask [4]byte
|
||||
|
||||
// UnmaskFrame unmasks frame and returns frame with unmasked payload and Mask
|
||||
// header's field cleared.
|
||||
// Note that it copies f payload.
|
||||
func UnmaskFrame(f Frame) Frame {
|
||||
p := make([]byte, len(f.Payload))
|
||||
copy(p, f.Payload)
|
||||
f.Payload = p
|
||||
return UnmaskFrameInPlace(f)
|
||||
}
|
||||
|
||||
// UnmaskFrameInPlace unmasks frame and returns frame with unmasked payload and
|
||||
// Mask header's field cleared.
|
||||
// Note that it applies xor cipher to f.Payload without copying, that is, it
|
||||
// modifies f.Payload inplace.
|
||||
func UnmaskFrameInPlace(f Frame) Frame {
|
||||
Cipher(f.Payload, f.Header.Mask, 0)
|
||||
f.Header.Masked = false
|
||||
f.Header.Mask = zeroMask
|
||||
return f
|
||||
}
|
||||
|
||||
// MaskFrameInPlaceWith masks frame with given mask and returns frame
|
||||
// with masked payload and Mask header's field set.
|
||||
// Note that it applies xor cipher to f.Payload without copying, that is, it
|
||||
|
@ -333,7 +378,7 @@ func MaskFrameInPlaceWith(f Frame, m [4]byte) Frame {
|
|||
// NewMask creates new random mask.
|
||||
func NewMask() (ret [4]byte) {
|
||||
binary.BigEndian.PutUint32(ret[:], rand.Uint32())
|
||||
return
|
||||
return ret
|
||||
}
|
||||
|
||||
// CompileFrame returns byte representation of given frame.
|
||||
|
@ -343,7 +388,7 @@ func CompileFrame(f Frame) (bts []byte, err error) {
|
|||
buf := bytes.NewBuffer(make([]byte, 0, 16))
|
||||
err = WriteFrame(buf, f)
|
||||
bts = buf.Bytes()
|
||||
return
|
||||
return bts, err
|
||||
}
|
||||
|
||||
// MustCompileFrame is like CompileFrame but panics if frame can not be
|
||||
|
@ -356,20 +401,6 @@ func MustCompileFrame(f Frame) []byte {
|
|||
return bts
|
||||
}
|
||||
|
||||
// Rsv creates rsv byte representation.
|
||||
func Rsv(r1, r2, r3 bool) (rsv byte) {
|
||||
if r1 {
|
||||
rsv |= bit5
|
||||
}
|
||||
if r2 {
|
||||
rsv |= bit6
|
||||
}
|
||||
if r3 {
|
||||
rsv |= bit7
|
||||
}
|
||||
return rsv
|
||||
}
|
||||
|
||||
func makeCloseFrame(code StatusCode) Frame {
|
||||
return NewCloseFrame(NewCloseFrameBody(code, ""))
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
|
@ -38,7 +37,8 @@ var (
|
|||
textTailErrUpgradeRequired = errorText(ErrHandshakeUpgradeRequired)
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// Every new header must be added to TestHeaderNames test.
|
||||
headerHost = "Host"
|
||||
headerUpgrade = "Upgrade"
|
||||
headerConnection = "Connection"
|
||||
|
@ -48,14 +48,14 @@ var (
|
|||
headerSecKey = "Sec-WebSocket-Key"
|
||||
headerSecAccept = "Sec-WebSocket-Accept"
|
||||
|
||||
headerHostCanonical = textproto.CanonicalMIMEHeaderKey(headerHost)
|
||||
headerUpgradeCanonical = textproto.CanonicalMIMEHeaderKey(headerUpgrade)
|
||||
headerConnectionCanonical = textproto.CanonicalMIMEHeaderKey(headerConnection)
|
||||
headerSecVersionCanonical = textproto.CanonicalMIMEHeaderKey(headerSecVersion)
|
||||
headerSecProtocolCanonical = textproto.CanonicalMIMEHeaderKey(headerSecProtocol)
|
||||
headerSecExtensionsCanonical = textproto.CanonicalMIMEHeaderKey(headerSecExtensions)
|
||||
headerSecKeyCanonical = textproto.CanonicalMIMEHeaderKey(headerSecKey)
|
||||
headerSecAcceptCanonical = textproto.CanonicalMIMEHeaderKey(headerSecAccept)
|
||||
headerHostCanonical = headerHost
|
||||
headerUpgradeCanonical = headerUpgrade
|
||||
headerConnectionCanonical = headerConnection
|
||||
headerSecVersionCanonical = "Sec-Websocket-Version"
|
||||
headerSecProtocolCanonical = "Sec-Websocket-Protocol"
|
||||
headerSecExtensionsCanonical = "Sec-Websocket-Extensions"
|
||||
headerSecKeyCanonical = "Sec-Websocket-Key"
|
||||
headerSecAcceptCanonical = "Sec-Websocket-Accept"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -91,10 +91,8 @@ func httpParseRequestLine(line []byte) (req httpRequestLine, err error) {
|
|||
req.major, req.minor, ok = httpParseVersion(proto)
|
||||
if !ok {
|
||||
err = ErrMalformedRequest
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
return req, err
|
||||
}
|
||||
|
||||
func httpParseResponseLine(line []byte) (resp httpResponseLine, err error) {
|
||||
|
@ -128,25 +126,25 @@ func httpParseVersion(bts []byte) (major, minor int, ok bool) {
|
|||
case bytes.Equal(bts, httpVersion1_1):
|
||||
return 1, 1, true
|
||||
case len(bts) < 8:
|
||||
return
|
||||
return 0, 0, false
|
||||
case !bytes.Equal(bts[:5], httpVersionPrefix):
|
||||
return
|
||||
return 0, 0, false
|
||||
}
|
||||
|
||||
bts = bts[5:]
|
||||
|
||||
dot := bytes.IndexByte(bts, '.')
|
||||
if dot == -1 {
|
||||
return
|
||||
return 0, 0, false
|
||||
}
|
||||
var err error
|
||||
major, err = asciiToInt(bts[:dot])
|
||||
if err != nil {
|
||||
return
|
||||
return major, 0, false
|
||||
}
|
||||
minor, err = asciiToInt(bts[dot+1:])
|
||||
if err != nil {
|
||||
return
|
||||
return major, minor, false
|
||||
}
|
||||
|
||||
return major, minor, true
|
||||
|
@ -157,7 +155,7 @@ func httpParseVersion(bts []byte) (major, minor int, ok bool) {
|
|||
func httpParseHeaderLine(line []byte) (k, v []byte, ok bool) {
|
||||
colon := bytes.IndexByte(line, ':')
|
||||
if colon == -1 {
|
||||
return
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
k = btrim(line[:colon])
|
||||
|
@ -198,8 +196,9 @@ func strSelectProtocol(h string, check func(string) bool) (ret string, ok bool)
|
|||
}
|
||||
return true
|
||||
})
|
||||
return
|
||||
return ret, ok
|
||||
}
|
||||
|
||||
func btsSelectProtocol(h []byte, check func([]byte) bool) (ret string, ok bool) {
|
||||
var selected []byte
|
||||
ok = httphead.ScanTokens(h, func(v []byte) bool {
|
||||
|
@ -212,21 +211,57 @@ func btsSelectProtocol(h []byte, check func([]byte) bool) (ret string, ok bool)
|
|||
if ok && selected != nil {
|
||||
return string(selected), true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func strSelectExtensions(h string, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) {
|
||||
return btsSelectExtensions(strToBytes(h), selected, check)
|
||||
return ret, ok
|
||||
}
|
||||
|
||||
func btsSelectExtensions(h []byte, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) {
|
||||
s := httphead.OptionSelector{
|
||||
Flags: httphead.SelectUnique | httphead.SelectCopy,
|
||||
Flags: httphead.SelectCopy,
|
||||
Check: check,
|
||||
}
|
||||
return s.Select(h, selected)
|
||||
}
|
||||
|
||||
func negotiateMaybe(in httphead.Option, dest []httphead.Option, f func(httphead.Option) (httphead.Option, error)) ([]httphead.Option, error) {
|
||||
if in.Size() == 0 {
|
||||
return dest, nil
|
||||
}
|
||||
opt, err := f(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Size() > 0 {
|
||||
dest = append(dest, opt)
|
||||
}
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
func negotiateExtensions(
|
||||
h []byte, dest []httphead.Option,
|
||||
f func(httphead.Option) (httphead.Option, error),
|
||||
) (_ []httphead.Option, err error) {
|
||||
index := -1
|
||||
var current httphead.Option
|
||||
ok := httphead.ScanOptions(h, func(i int, name, attr, val []byte) httphead.Control {
|
||||
if i != index {
|
||||
dest, err = negotiateMaybe(current, dest, f)
|
||||
if err != nil {
|
||||
return httphead.ControlBreak
|
||||
}
|
||||
index = i
|
||||
current = httphead.Option{Name: name}
|
||||
}
|
||||
if attr != nil {
|
||||
current.Parameters.Set(attr, val)
|
||||
}
|
||||
return httphead.ControlContinue
|
||||
})
|
||||
if !ok {
|
||||
return nil, ErrMalformedRequest
|
||||
}
|
||||
return negotiateMaybe(current, dest, f)
|
||||
}
|
||||
|
||||
func httpWriteHeader(bw *bufio.Writer, key, value string) {
|
||||
httpWriteHeaderKey(bw, key)
|
||||
bw.WriteString(value)
|
||||
|
|
|
@ -65,8 +65,6 @@ func initAcceptFromNonce(accept, nonce []byte) {
|
|||
|
||||
sum := sha1.Sum(p)
|
||||
base64.StdEncoding.Encode(accept, sum[:])
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func writeAccept(bw *bufio.Writer, nonce []byte) (int, error) {
|
||||
|
|
|
@ -24,7 +24,7 @@ func ReadHeader(r io.Reader) (h Header, err error) {
|
|||
// Prepare to hold first 2 bytes to choose size of next read.
|
||||
_, err = io.ReadFull(r, bts)
|
||||
if err != nil {
|
||||
return
|
||||
return h, err
|
||||
}
|
||||
|
||||
h.Fin = bts[0]&bit0 != 0
|
||||
|
@ -51,11 +51,11 @@ func ReadHeader(r io.Reader) (h Header, err error) {
|
|||
|
||||
default:
|
||||
err = ErrHeaderLengthUnexpected
|
||||
return
|
||||
return h, err
|
||||
}
|
||||
|
||||
if extra == 0 {
|
||||
return
|
||||
return h, err
|
||||
}
|
||||
|
||||
// Increase len of bts to extra bytes need to read.
|
||||
|
@ -63,7 +63,7 @@ func ReadHeader(r io.Reader) (h Header, err error) {
|
|||
bts = bts[:extra]
|
||||
_, err = io.ReadFull(r, bts)
|
||||
if err != nil {
|
||||
return
|
||||
return h, err
|
||||
}
|
||||
|
||||
switch {
|
||||
|
@ -74,7 +74,7 @@ func ReadHeader(r io.Reader) (h Header, err error) {
|
|||
case length == 127:
|
||||
if bts[0]&0x80 != 0 {
|
||||
err = ErrHeaderLengthMSB
|
||||
return
|
||||
return h, err
|
||||
}
|
||||
h.Length = int64(binary.BigEndian.Uint64(bts[:8]))
|
||||
bts = bts[8:]
|
||||
|
@ -84,7 +84,7 @@ func ReadHeader(r io.Reader) (h Header, err error) {
|
|||
copy(h.Mask[:], bts)
|
||||
}
|
||||
|
||||
return
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// ReadFrame reads a frame from r.
|
||||
|
@ -95,7 +95,7 @@ func ReadHeader(r io.Reader) (h Header, err error) {
|
|||
func ReadFrame(r io.Reader) (f Frame, err error) {
|
||||
f.Header, err = ReadHeader(r)
|
||||
if err != nil {
|
||||
return
|
||||
return f, err
|
||||
}
|
||||
|
||||
if f.Header.Length > 0 {
|
||||
|
@ -105,7 +105,7 @@ func ReadFrame(r io.Reader) (f Frame, err error) {
|
|||
_, err = io.ReadFull(r, f.Payload)
|
||||
}
|
||||
|
||||
return
|
||||
return f, err
|
||||
}
|
||||
|
||||
// MustReadFrame is like ReadFrame but panics if frame can not be read.
|
||||
|
@ -128,20 +128,20 @@ func ParseCloseFrameData(payload []byte) (code StatusCode, reason string) {
|
|||
// In other words, we ignoring this rule [RFC6455:7.1.5]:
|
||||
// If this Close control frame contains no status code, _The WebSocket
|
||||
// Connection Close Code_ is considered to be 1005.
|
||||
return
|
||||
return code, reason
|
||||
}
|
||||
code = StatusCode(binary.BigEndian.Uint16(payload))
|
||||
reason = string(payload[2:])
|
||||
return
|
||||
return code, reason
|
||||
}
|
||||
|
||||
// ParseCloseFrameDataUnsafe is like ParseCloseFrameData except the thing
|
||||
// that it does not copies payload bytes into reason, but prepares unsafe cast.
|
||||
func ParseCloseFrameDataUnsafe(payload []byte) (code StatusCode, reason string) {
|
||||
if len(payload) < 2 {
|
||||
return
|
||||
return code, reason
|
||||
}
|
||||
code = StatusCode(binary.BigEndian.Uint16(payload))
|
||||
reason = btsToString(payload[2:])
|
||||
return
|
||||
return code, reason
|
||||
}
|
||||
|
|
|
@ -24,11 +24,11 @@ const (
|
|||
var (
|
||||
ErrHandshakeBadProtocol = RejectConnectionError(
|
||||
RejectionStatus(http.StatusHTTPVersionNotSupported),
|
||||
RejectionReason(fmt.Sprintf("handshake error: bad HTTP protocol version")),
|
||||
RejectionReason("handshake error: bad HTTP protocol version"),
|
||||
)
|
||||
ErrHandshakeBadMethod = RejectConnectionError(
|
||||
RejectionStatus(http.StatusMethodNotAllowed),
|
||||
RejectionReason(fmt.Sprintf("handshake error: bad HTTP request method")),
|
||||
RejectionReason("handshake error: bad HTTP request method"),
|
||||
)
|
||||
ErrHandshakeBadHost = RejectConnectionError(
|
||||
RejectionStatus(http.StatusBadRequest),
|
||||
|
@ -129,7 +129,22 @@ type HTTPUpgrader struct {
|
|||
// Extension is the select function that is used to select extensions from
|
||||
// list requested by client. If this field is set, then the all matched
|
||||
// extensions are sent to a client as negotiated.
|
||||
//
|
||||
// Deprecated: use Negotiate instead.
|
||||
Extension func(httphead.Option) bool
|
||||
|
||||
// Negotiate is the callback that is used to negotiate extensions from
|
||||
// the client's offer. If this field is set, then the returned non-zero
|
||||
// extensions are sent to the client as accepted extensions in the
|
||||
// response.
|
||||
//
|
||||
// The argument is only valid until the Negotiate callback returns.
|
||||
//
|
||||
// If returned error is non-nil then connection is rejected and response is
|
||||
// sent with appropriate HTTP error code and body set to error message.
|
||||
//
|
||||
// RejectConnectionError could be used to get more control on response.
|
||||
Negotiate func(httphead.Option) (httphead.Option, error)
|
||||
}
|
||||
|
||||
// Upgrade upgrades http connection to the websocket connection.
|
||||
|
@ -148,7 +163,7 @@ func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net.
|
|||
}
|
||||
if err != nil {
|
||||
httpError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
return conn, rw, hs, err
|
||||
}
|
||||
|
||||
// See https://tools.ietf.org/html/rfc6455#section-4.1
|
||||
|
@ -200,11 +215,20 @@ func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net.
|
|||
}
|
||||
}
|
||||
}
|
||||
if check := u.Extension; err == nil && check != nil {
|
||||
if f := u.Negotiate; err == nil && f != nil {
|
||||
for _, h := range r.Header[headerSecExtensionsCanonical] {
|
||||
hs.Extensions, err = negotiateExtensions(strToBytes(h), hs.Extensions, f)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// DEPRECATED path.
|
||||
if check := u.Extension; err == nil && check != nil && u.Negotiate == nil {
|
||||
xs := r.Header[headerSecExtensionsCanonical]
|
||||
for i := 0; i < len(xs) && err == nil; i++ {
|
||||
var ok bool
|
||||
hs.Extensions, ok = strSelectExtensions(xs[i], hs.Extensions, check)
|
||||
hs.Extensions, ok = btsSelectExtensions(strToBytes(xs[i]), hs.Extensions, check)
|
||||
if !ok {
|
||||
err = ErrMalformedRequest
|
||||
}
|
||||
|
@ -227,7 +251,7 @@ func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net.
|
|||
err = rw.Writer.Flush()
|
||||
} else {
|
||||
var code int
|
||||
if rej, ok := err.(*rejectConnectionError); ok {
|
||||
if rej, ok := err.(*ConnectionRejectedError); ok {
|
||||
code = rej.code
|
||||
header[1] = rej.header
|
||||
}
|
||||
|
@ -236,9 +260,9 @@ func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net.
|
|||
}
|
||||
httpWriteResponseError(rw.Writer, err, code, header.WriteTo)
|
||||
// Do not store Flush() error to not override already existing one.
|
||||
rw.Writer.Flush()
|
||||
_ = rw.Writer.Flush()
|
||||
}
|
||||
return
|
||||
return conn, rw, hs, err
|
||||
}
|
||||
|
||||
// Upgrader contains options for upgrading connection to websocket.
|
||||
|
@ -271,6 +295,9 @@ type Upgrader struct {
|
|||
// from list requested by client. If this field is set, then the all matched
|
||||
// extensions are sent to a client as negotiated.
|
||||
//
|
||||
// Note that Extension may be called multiple times and implementations
|
||||
// must track uniqueness of accepted extensions manually.
|
||||
//
|
||||
// The argument is only valid until the callback returns.
|
||||
//
|
||||
// According to the RFC6455 order of extensions passed by a client is
|
||||
|
@ -283,13 +310,38 @@ type Upgrader struct {
|
|||
// fields listed by the client in its request represent a preference of the
|
||||
// header fields it wishes to use, with the first options listed being most
|
||||
// preferable."
|
||||
//
|
||||
// Deprecated: use Negotiate instead.
|
||||
Extension func(httphead.Option) bool
|
||||
|
||||
// ExtensionCustom allow user to parse Sec-WebSocket-Extensions header manually.
|
||||
// ExtensionCustom allow user to parse Sec-WebSocket-Extensions header
|
||||
// manually.
|
||||
//
|
||||
// If ExtensionCustom() decides to accept received extension, it must
|
||||
// append appropriate option to the given slice of httphead.Option.
|
||||
// It returns results of append() to the given slice and a flag that
|
||||
// reports whether given header value is wellformed or not.
|
||||
//
|
||||
// Note that ExtensionCustom may be called multiple times and
|
||||
// implementations must track uniqueness of accepted extensions manually.
|
||||
//
|
||||
// Note that returned options should be valid until Upgrade returns.
|
||||
// If ExtensionCustom is set, it used instead of Extension function.
|
||||
ExtensionCustom func([]byte, []httphead.Option) ([]httphead.Option, bool)
|
||||
|
||||
// Negotiate is the callback that is used to negotiate extensions from
|
||||
// the client's offer. If this field is set, then the returned non-zero
|
||||
// extensions are sent to the client as accepted extensions in the
|
||||
// response.
|
||||
//
|
||||
// The argument is only valid until the Negotiate callback returns.
|
||||
//
|
||||
// If returned error is non-nil then connection is rejected and response is
|
||||
// sent with appropriate HTTP error code and body set to error message.
|
||||
//
|
||||
// RejectConnectionError could be used to get more control on response.
|
||||
Negotiate func(httphead.Option) (httphead.Option, error)
|
||||
|
||||
// Header is an optional HandshakeHeader instance that could be used to
|
||||
// write additional headers to the handshake response.
|
||||
//
|
||||
|
@ -399,12 +451,12 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) {
|
|||
// Read HTTP request line like "GET /ws HTTP/1.1".
|
||||
rl, err := readLine(br)
|
||||
if err != nil {
|
||||
return
|
||||
return hs, err
|
||||
}
|
||||
// Parse request line data like HTTP version, uri and method.
|
||||
req, err := httpParseRequestLine(rl)
|
||||
if err != nil {
|
||||
return
|
||||
return hs, err
|
||||
}
|
||||
|
||||
// Prepare stack-based handshake header list.
|
||||
|
@ -497,7 +549,7 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) {
|
|||
if len(v) != nonceSize {
|
||||
err = ErrHandshakeBadSecKey
|
||||
} else {
|
||||
copy(nonce[:], v)
|
||||
copy(nonce, v)
|
||||
}
|
||||
|
||||
case headerSecProtocolCanonical:
|
||||
|
@ -514,7 +566,11 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) {
|
|||
}
|
||||
|
||||
case headerSecExtensionsCanonical:
|
||||
if custom, check := u.ExtensionCustom, u.Extension; custom != nil || check != nil {
|
||||
if f := u.Negotiate; err == nil && f != nil {
|
||||
hs.Extensions, err = negotiateExtensions(v, hs.Extensions, f)
|
||||
}
|
||||
// DEPRECATED path.
|
||||
if custom, check := u.ExtensionCustom, u.Extension; u.Negotiate == nil && (custom != nil || check != nil) {
|
||||
var ok bool
|
||||
if custom != nil {
|
||||
hs.Extensions, ok = custom(v, hs.Extensions)
|
||||
|
@ -574,7 +630,7 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) {
|
|||
}
|
||||
if err != nil {
|
||||
var code int
|
||||
if rej, ok := err.(*rejectConnectionError); ok {
|
||||
if rej, ok := err.(*ConnectionRejectedError); ok {
|
||||
code = rej.code
|
||||
header[1] = rej.header
|
||||
}
|
||||
|
@ -583,14 +639,14 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) {
|
|||
}
|
||||
httpWriteResponseError(bw, err, code, header.WriteTo)
|
||||
// Do not store Flush() error to not override already existing one.
|
||||
bw.Flush()
|
||||
return
|
||||
_ = bw.Flush()
|
||||
return hs, err
|
||||
}
|
||||
|
||||
httpWriteResponseUpgrade(bw, nonce, hs, header.WriteTo)
|
||||
err = bw.Flush()
|
||||
|
||||
return
|
||||
return hs, err
|
||||
}
|
||||
|
||||
type handshakeHeader [2]HandshakeHeader
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
|
||||
"github.com/gobwas/httphead"
|
||||
)
|
||||
|
@ -41,19 +39,6 @@ func SelectEqual(v string) func(string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
func strToBytes(str string) (bts []byte) {
|
||||
s := (*reflect.StringHeader)(unsafe.Pointer(&str))
|
||||
b := (*reflect.SliceHeader)(unsafe.Pointer(&bts))
|
||||
b.Data = s.Data
|
||||
b.Len = s.Len
|
||||
b.Cap = s.Len
|
||||
return
|
||||
}
|
||||
|
||||
func btsToString(bts []byte) (str string) {
|
||||
return *(*string)(unsafe.Pointer(&bts))
|
||||
}
|
||||
|
||||
// asciiToInt converts bytes to int.
|
||||
func asciiToInt(bts []byte) (ret int, err error) {
|
||||
// ASCII numbers all start with the high-order bits 0011.
|
||||
|
@ -73,7 +58,7 @@ func asciiToInt(bts []byte) (ret int, err error) {
|
|||
}
|
||||
|
||||
// pow for integers implementation.
|
||||
// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3
|
||||
// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3.
|
||||
func pow(a, b int) int {
|
||||
p := 1
|
||||
for b > 0 {
|
||||
|
@ -116,7 +101,7 @@ func btsHasToken(header, token []byte) (has bool) {
|
|||
has = bytes.EqualFold(v, token)
|
||||
return !has
|
||||
})
|
||||
return
|
||||
return has
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
package ws
|
||||
|
||||
func strToBytes(str string) (bts []byte) {
|
||||
return []byte(str)
|
||||
}
|
||||
|
||||
func btsToString(bts []byte) (str string) {
|
||||
return string(bts)
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
package ws
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func strToBytes(str string) (bts []byte) {
|
||||
s := (*reflect.StringHeader)(unsafe.Pointer(&str))
|
||||
b := (*reflect.SliceHeader)(unsafe.Pointer(&bts))
|
||||
b.Data = s.Data
|
||||
b.Len = s.Len
|
||||
b.Cap = s.Len
|
||||
return bts
|
||||
}
|
||||
|
||||
func btsToString(bts []byte) (str string) {
|
||||
return *(*string)(unsafe.Pointer(&bts))
|
||||
}
|
|
@ -34,7 +34,7 @@ func (c *CipherReader) Read(p []byte) (n int, err error) {
|
|||
n, err = c.r.Read(p)
|
||||
ws.Cipher(p[:n], c.mask, c.pos)
|
||||
c.pos += n
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
|
||||
// CipherWriter implements io.Writer that applies xor-cipher to the bytes
|
||||
|
@ -68,5 +68,5 @@ func (c *CipherWriter) Write(p []byte) (n int, err error) {
|
|||
n, err = c.w.Write(cp)
|
||||
c.pos += n
|
||||
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
|
|
|
@ -113,6 +113,7 @@ type rwConn struct {
|
|||
func (rwc rwConn) Read(p []byte) (int, error) {
|
||||
return rwc.r.Read(p)
|
||||
}
|
||||
|
||||
func (rwc rwConn) Write(p []byte) (int, error) {
|
||||
return rwc.w.Write(p)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
package wsutil
|
||||
|
||||
import "github.com/gobwas/ws"
|
||||
|
||||
// RecvExtension is an interface for clearing fragment header RSV bits.
|
||||
type RecvExtension interface {
|
||||
UnsetBits(ws.Header) (ws.Header, error)
|
||||
}
|
||||
|
||||
// RecvExtensionFunc is an adapter to allow the use of ordinary functions as
|
||||
// RecvExtension.
|
||||
type RecvExtensionFunc func(ws.Header) (ws.Header, error)
|
||||
|
||||
// BitsRecv implements RecvExtension.
|
||||
func (fn RecvExtensionFunc) UnsetBits(h ws.Header) (ws.Header, error) {
|
||||
return fn(h)
|
||||
}
|
||||
|
||||
// SendExtension is an interface for setting fragment header RSV bits.
|
||||
type SendExtension interface {
|
||||
SetBits(ws.Header) (ws.Header, error)
|
||||
}
|
||||
|
||||
// SendExtensionFunc is an adapter to allow the use of ordinary functions as
|
||||
// SendExtension.
|
||||
type SendExtensionFunc func(ws.Header) (ws.Header, error)
|
||||
|
||||
// BitsSend implements SendExtension.
|
||||
func (fn SendExtensionFunc) SetBits(h ws.Header) (ws.Header, error) {
|
||||
return fn(h)
|
||||
}
|
|
@ -199,7 +199,7 @@ func (c ControlHandler) HandleClose(h ws.Header) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.Flush(); err != nil {
|
||||
if err := w.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return ClosedError{
|
||||
|
|
|
@ -64,14 +64,14 @@ func ReadMessage(r io.Reader, s ws.State, m []Message) ([]Message, error) {
|
|||
|
||||
// ReadClientMessage reads next message from r, considering that caller
|
||||
// represents server side.
|
||||
// It is a shortcut for ReadMessage(r, ws.StateServerSide, m)
|
||||
// It is a shortcut for ReadMessage(r, ws.StateServerSide, m).
|
||||
func ReadClientMessage(r io.Reader, m []Message) ([]Message, error) {
|
||||
return ReadMessage(r, ws.StateServerSide, m)
|
||||
}
|
||||
|
||||
// ReadServerMessage reads next message from r, considering that caller
|
||||
// represents client side.
|
||||
// It is a shortcut for ReadMessage(r, ws.StateClientSide, m)
|
||||
// It is a shortcut for ReadMessage(r, ws.StateClientSide, m).
|
||||
func ReadServerMessage(r io.Reader, m []Message) ([]Message, error) {
|
||||
return ReadMessage(r, ws.StateClientSide, m)
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ func ReadClientText(rw io.ReadWriter) ([]byte, error) {
|
|||
// It discards received text messages.
|
||||
//
|
||||
// Note this may handle and write control frames into the writer part of a given
|
||||
// io.ReadWriter.
|
||||
// io.ReadWriter.
|
||||
func ReadClientBinary(rw io.ReadWriter) ([]byte, error) {
|
||||
p, _, err := readData(rw, ws.StateServerSide, ws.OpBinary)
|
||||
return p, err
|
||||
|
@ -133,7 +133,7 @@ func ReadServerData(rw io.ReadWriter) ([]byte, ws.OpCode, error) {
|
|||
// It discards received binary messages.
|
||||
//
|
||||
// Note this may handle and write control frames into the writer part of a given
|
||||
// io.ReadWriter.
|
||||
// io.ReadWriter.
|
||||
func ReadServerText(rw io.ReadWriter) ([]byte, error) {
|
||||
p, _, err := readData(rw, ws.StateClientSide, ws.OpText)
|
||||
return p, err
|
||||
|
|
|
@ -12,6 +12,10 @@ import (
|
|||
// preceding NextFrame() call.
|
||||
var ErrNoFrameAdvance = errors.New("no frame advance")
|
||||
|
||||
// ErrFrameTooLarge indicates that a message of length higher than
|
||||
// MaxFrameSize was being read.
|
||||
var ErrFrameTooLarge = errors.New("frame too large")
|
||||
|
||||
// FrameHandlerFunc handles parsed frame header and its body represented by
|
||||
// io.Reader.
|
||||
//
|
||||
|
@ -37,7 +41,17 @@ type Reader struct {
|
|||
// bytes are not valid UTF-8 sequence, ErrInvalidUTF8 returned.
|
||||
CheckUTF8 bool
|
||||
|
||||
// TODO(gobwas): add max frame size limit here.
|
||||
// Extensions is a list of negotiated extensions for reader Source.
|
||||
// It is used to meet the specs and clear appropriate bits in fragment
|
||||
// header RSV segment.
|
||||
Extensions []RecvExtension
|
||||
|
||||
// MaxFrameSize controls the maximum frame size in bytes
|
||||
// that can be read. A message exceeding that size will return
|
||||
// a ErrFrameTooLarge to the application.
|
||||
//
|
||||
// Not setting this field means there is no limit.
|
||||
MaxFrameSize int64
|
||||
|
||||
OnContinuation FrameHandlerFunc
|
||||
OnIntermediate FrameHandlerFunc
|
||||
|
@ -97,12 +111,13 @@ func (r *Reader) Read(p []byte) (n int, err error) {
|
|||
|
||||
n, err = r.frame.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
if err == nil && r.raw.N != 0 {
|
||||
return
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// EOF condition (either err is io.EOF or r.raw.N is zero).
|
||||
switch {
|
||||
case r.raw.N != 0:
|
||||
err = io.ErrUnexpectedEOF
|
||||
|
@ -112,6 +127,8 @@ func (r *Reader) Read(p []byte) (n int, err error) {
|
|||
r.resetFragment()
|
||||
|
||||
case r.CheckUTF8 && !r.utf8.Valid():
|
||||
// NOTE: check utf8 only when full message received, since partial
|
||||
// reads may be invalid.
|
||||
n = r.utf8.Accepted()
|
||||
err = ErrInvalidUTF8
|
||||
|
||||
|
@ -120,7 +137,7 @@ func (r *Reader) Read(p []byte) (n int, err error) {
|
|||
err = io.EOF
|
||||
}
|
||||
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Discard discards current message unread bytes.
|
||||
|
@ -166,14 +183,29 @@ func (r *Reader) NextFrame() (hdr ws.Header, err error) {
|
|||
return hdr, err
|
||||
}
|
||||
|
||||
if n := r.MaxFrameSize; n > 0 && hdr.Length > n {
|
||||
return hdr, ErrFrameTooLarge
|
||||
}
|
||||
|
||||
// Save raw reader to use it on discarding frame without ciphering and
|
||||
// other streaming checks.
|
||||
r.raw = io.LimitedReader{r.Source, hdr.Length}
|
||||
r.raw = io.LimitedReader{
|
||||
R: r.Source,
|
||||
N: hdr.Length,
|
||||
}
|
||||
|
||||
frame := io.Reader(&r.raw)
|
||||
if hdr.Masked {
|
||||
frame = NewCipherReader(frame, hdr.Mask)
|
||||
}
|
||||
|
||||
for _, x := range r.Extensions {
|
||||
hdr, err = x.UnsetBits(hdr)
|
||||
if err != nil {
|
||||
return hdr, err
|
||||
}
|
||||
}
|
||||
|
||||
if r.fragmented() {
|
||||
if hdr.OpCode.IsControl() {
|
||||
if cb := r.OnIntermediate; cb != nil {
|
||||
|
@ -183,7 +215,7 @@ func (r *Reader) NextFrame() (hdr ws.Header, err error) {
|
|||
// Ensure that src is empty.
|
||||
_, err = io.Copy(ioutil.Discard, &r.raw)
|
||||
}
|
||||
return
|
||||
return hdr, err
|
||||
}
|
||||
} else {
|
||||
r.opCode = hdr.OpCode
|
||||
|
@ -208,7 +240,7 @@ func (r *Reader) NextFrame() (hdr ws.Header, err error) {
|
|||
r.State = r.State.Set(ws.StateFragmented)
|
||||
}
|
||||
|
||||
return
|
||||
return hdr, err
|
||||
}
|
||||
|
||||
func (r *Reader) fragmented() bool {
|
||||
|
|
|
@ -65,7 +65,7 @@ func (u *UTF8Reader) Read(p []byte) (n int, err error) {
|
|||
u.state, u.codep = s, c
|
||||
u.accepted = accepted
|
||||
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Valid checks current reader state. It returns true if all read bytes are
|
||||
|
|
|
@ -84,38 +84,6 @@ func (c *ControlWriter) Flush() error {
|
|||
return c.w.Flush()
|
||||
}
|
||||
|
||||
// Writer contains logic of buffering output data into a WebSocket fragments.
|
||||
// It is much the same as bufio.Writer, except the thing that it works with
|
||||
// WebSocket frames, not the raw data.
|
||||
//
|
||||
// Writer writes frames with specified OpCode.
|
||||
// It uses ws.State to decide whether the output frames must be masked.
|
||||
//
|
||||
// Note that it does not check control frame size or other RFC rules.
|
||||
// That is, it must be used with special care to write control frames without
|
||||
// violation of RFC. You could use ControlWriter that wraps Writer and contains
|
||||
// some guards for writing control frames.
|
||||
//
|
||||
// If an error occurs writing to a Writer, no more data will be accepted and
|
||||
// all subsequent writes will return the error.
|
||||
// After all data has been written, the client should call the Flush() method
|
||||
// to guarantee all data has been forwarded to the underlying io.Writer.
|
||||
type Writer struct {
|
||||
dest io.Writer
|
||||
|
||||
n int // Buffered bytes counter.
|
||||
raw []byte // Raw representation of buffer, including reserved header bytes.
|
||||
buf []byte // Writeable part of buffer, without reserved header bytes.
|
||||
|
||||
op ws.OpCode
|
||||
state ws.State
|
||||
|
||||
dirty bool
|
||||
fragmented bool
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
var writers = pool.New(128, 65536)
|
||||
|
||||
// GetWriter tries to reuse Writer getting it from the pool.
|
||||
|
@ -145,6 +113,58 @@ func PutWriter(w *Writer) {
|
|||
writers.Put(w, w.Size())
|
||||
}
|
||||
|
||||
// Writer contains logic of buffering output data into a WebSocket fragments.
|
||||
// It is much the same as bufio.Writer, except the thing that it works with
|
||||
// WebSocket frames, not the raw data.
|
||||
//
|
||||
// Writer writes frames with specified OpCode.
|
||||
// It uses ws.State to decide whether the output frames must be masked.
|
||||
//
|
||||
// Note that it does not check control frame size or other RFC rules.
|
||||
// That is, it must be used with special care to write control frames without
|
||||
// violation of RFC. You could use ControlWriter that wraps Writer and contains
|
||||
// some guards for writing control frames.
|
||||
//
|
||||
// If an error occurs writing to a Writer, no more data will be accepted and
|
||||
// all subsequent writes will return the error.
|
||||
//
|
||||
// After all data has been written, the client should call the Flush() method
|
||||
// to guarantee all data has been forwarded to the underlying io.Writer.
|
||||
type Writer struct {
|
||||
// dest specifies a destination of buffer flushes.
|
||||
dest io.Writer
|
||||
|
||||
// op specifies the WebSocket operation code used in flushed frames.
|
||||
op ws.OpCode
|
||||
|
||||
// state specifies the state of the Writer.
|
||||
state ws.State
|
||||
|
||||
// extensions is a list of negotiated extensions for writer Dest.
|
||||
// It is used to meet the specs and set appropriate bits in fragment
|
||||
// header RSV segment.
|
||||
extensions []SendExtension
|
||||
|
||||
// noFlush reports whether buffer must grow instead of being flushed.
|
||||
noFlush bool
|
||||
|
||||
// Raw representation of the buffer, including reserved header bytes.
|
||||
raw []byte
|
||||
|
||||
// Writeable part of buffer, without reserved header bytes.
|
||||
// Resetting this to nil will not result in reallocation if raw is not nil.
|
||||
// And vice versa: if buf is not nil, then Writer is assumed as ready and
|
||||
// initialized.
|
||||
buf []byte
|
||||
|
||||
// Buffered bytes counter.
|
||||
n int
|
||||
|
||||
dirty bool
|
||||
fseq int
|
||||
err error
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer whose buffer has the DefaultWriteBuffer size.
|
||||
func NewWriter(dest io.Writer, state ws.State, op ws.OpCode) *Writer {
|
||||
return NewWriterBufferSize(dest, state, op, 0)
|
||||
|
@ -186,57 +206,63 @@ func NewWriterBufferSize(dest io.Writer, state ws.State, op ws.OpCode, n int) *W
|
|||
//
|
||||
// It panics if len(buf) is too small to fit header and payload data.
|
||||
func NewWriterBuffer(dest io.Writer, state ws.State, op ws.OpCode, buf []byte) *Writer {
|
||||
offset := reserve(state, len(buf))
|
||||
if len(buf) <= offset {
|
||||
panic("buffer too small")
|
||||
}
|
||||
|
||||
return &Writer{
|
||||
w := &Writer{
|
||||
dest: dest,
|
||||
raw: buf,
|
||||
buf: buf[offset:],
|
||||
state: state,
|
||||
op: op,
|
||||
raw: buf,
|
||||
}
|
||||
w.initBuf()
|
||||
return w
|
||||
}
|
||||
|
||||
func reserve(state ws.State, n int) (offset int) {
|
||||
var mask int
|
||||
if state.ClientSide() {
|
||||
mask = 4
|
||||
}
|
||||
|
||||
switch {
|
||||
case n <= int(len7)+mask+2:
|
||||
return mask + 2
|
||||
case n <= int(len16)+mask+4:
|
||||
return mask + 4
|
||||
default:
|
||||
return mask + 10
|
||||
func (w *Writer) initBuf() {
|
||||
offset := reserve(w.state, len(w.raw))
|
||||
if len(w.raw) <= offset {
|
||||
panic("wsutil: writer buffer is too small")
|
||||
}
|
||||
w.buf = w.raw[offset:]
|
||||
}
|
||||
|
||||
// headerSize returns number of bytes needed to encode header of a frame with
|
||||
// given state and length.
|
||||
func headerSize(s ws.State, n int) int {
|
||||
return ws.HeaderSize(ws.Header{
|
||||
Length: int64(n),
|
||||
Masked: s.ClientSide(),
|
||||
})
|
||||
}
|
||||
|
||||
// Reset discards any buffered data, clears error, and resets w to have given
|
||||
// state and write frames with given OpCode to dest.
|
||||
// Reset resets Writer as it was created by New() methods.
|
||||
// Note that Reset does reset extensions and other options was set after
|
||||
// Writer initialization.
|
||||
func (w *Writer) Reset(dest io.Writer, state ws.State, op ws.OpCode) {
|
||||
w.n = 0
|
||||
w.dirty = false
|
||||
w.fragmented = false
|
||||
w.dest = dest
|
||||
w.state = state
|
||||
w.op = op
|
||||
|
||||
w.initBuf()
|
||||
|
||||
w.n = 0
|
||||
w.dirty = false
|
||||
w.fseq = 0
|
||||
w.extensions = w.extensions[:0]
|
||||
w.noFlush = false
|
||||
}
|
||||
|
||||
// Size returns the size of the underlying buffer in bytes.
|
||||
// ResetOp is an quick version of Reset().
|
||||
// ResetOp does reset unwritten fragments and does not reset results of
|
||||
// SetExtensions() or DisableFlush() methods.
|
||||
func (w *Writer) ResetOp(op ws.OpCode) {
|
||||
w.op = op
|
||||
w.n = 0
|
||||
w.dirty = false
|
||||
w.fseq = 0
|
||||
}
|
||||
|
||||
// SetExtensions adds xs as extensions to be used during writes.
|
||||
func (w *Writer) SetExtensions(xs ...SendExtension) {
|
||||
w.extensions = xs
|
||||
}
|
||||
|
||||
// DisableFlush denies Writer to write fragments.
|
||||
func (w *Writer) DisableFlush() {
|
||||
w.noFlush = true
|
||||
}
|
||||
|
||||
// Size returns the size of the underlying buffer in bytes (not including
|
||||
// WebSocket header bytes).
|
||||
func (w *Writer) Size() int {
|
||||
return len(w.buf)
|
||||
}
|
||||
|
@ -263,6 +289,10 @@ func (w *Writer) Write(p []byte) (n int, err error) {
|
|||
|
||||
var nn int
|
||||
for len(p) > w.Available() && w.err == nil {
|
||||
if w.noFlush {
|
||||
w.Grow(len(p))
|
||||
continue
|
||||
}
|
||||
if w.Buffered() == 0 {
|
||||
// Large write, empty buffer. Write directly from p to avoid copy.
|
||||
// Trade off here is that we make additional Write() to underlying
|
||||
|
@ -295,6 +325,55 @@ func (w *Writer) Write(p []byte) (n int, err error) {
|
|||
return n, w.err
|
||||
}
|
||||
|
||||
func ceilPowerOfTwo(n int) int {
|
||||
n |= n >> 1
|
||||
n |= n >> 2
|
||||
n |= n >> 4
|
||||
n |= n >> 8
|
||||
n |= n >> 16
|
||||
n |= n >> 32
|
||||
n++
|
||||
return n
|
||||
}
|
||||
|
||||
// Grow grows Writer's internal buffer capacity to guarantee space for another
|
||||
// n bytes of _payload_ -- that is, frame header is not included in n.
|
||||
func (w *Writer) Grow(n int) {
|
||||
// NOTE: we must respect the possibility of header reserved bytes grow.
|
||||
var (
|
||||
size = len(w.raw)
|
||||
prevOffset = len(w.raw) - len(w.buf)
|
||||
nextOffset = len(w.raw) - len(w.buf)
|
||||
buffered = w.Buffered()
|
||||
)
|
||||
for cap := size - nextOffset - buffered; cap < n; {
|
||||
// This loop runs twice only at split cases, when reservation of raw
|
||||
// buffer space for the header shrinks capacity of new buffer such that
|
||||
// it still less than n.
|
||||
//
|
||||
// Loop is safe here because:
|
||||
// - (offset + buffered + n) is greater than size, otherwise (cap < n)
|
||||
// would be false:
|
||||
// size = offset + buffered + freeSpace (cap)
|
||||
// size' = offset + buffered + wantSpace (n)
|
||||
// Since (cap < n) is true in the loop condition, size' is guaranteed
|
||||
// to be greater => no infinite loop.
|
||||
size = ceilPowerOfTwo(nextOffset + buffered + n)
|
||||
nextOffset = reserve(w.state, size)
|
||||
cap = size - nextOffset - buffered
|
||||
}
|
||||
if size < len(w.raw) {
|
||||
panic("wsutil: buffer grow leads to its reduce")
|
||||
}
|
||||
if size == len(w.raw) {
|
||||
return
|
||||
}
|
||||
p := make([]byte, size)
|
||||
copy(p[nextOffset-prevOffset:], w.raw[:prevOffset+buffered])
|
||||
w.raw = p
|
||||
w.buf = w.raw[nextOffset:]
|
||||
}
|
||||
|
||||
// WriteThrough writes data bypassing the buffer.
|
||||
// Note that Writer's buffer must be empty before calling WriteThrough().
|
||||
func (w *Writer) WriteThrough(p []byte) (n int, err error) {
|
||||
|
@ -305,13 +384,37 @@ func (w *Writer) WriteThrough(p []byte) (n int, err error) {
|
|||
return 0, ErrNotEmpty
|
||||
}
|
||||
|
||||
w.err = writeFrame(w.dest, w.state, w.opCode(), false, p)
|
||||
var frame ws.Frame
|
||||
frame.Header = ws.Header{
|
||||
OpCode: w.opCode(),
|
||||
Fin: false,
|
||||
Length: int64(len(p)),
|
||||
}
|
||||
for _, x := range w.extensions {
|
||||
frame.Header, err = x.SetBits(frame.Header)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if w.state.ClientSide() {
|
||||
// Should copy bytes to prevent corruption of caller data.
|
||||
payload := pbytes.GetLen(len(p))
|
||||
defer pbytes.Put(payload)
|
||||
copy(payload, p)
|
||||
|
||||
frame.Payload = payload
|
||||
frame = ws.MaskFrameInPlace(frame)
|
||||
} else {
|
||||
frame.Payload = p
|
||||
}
|
||||
|
||||
w.err = ws.WriteFrame(w.dest, frame)
|
||||
if w.err == nil {
|
||||
n = len(p)
|
||||
}
|
||||
|
||||
w.dirty = true
|
||||
w.fragmented = true
|
||||
w.fseq++
|
||||
|
||||
return n, w.err
|
||||
}
|
||||
|
@ -321,7 +424,11 @@ func (w *Writer) ReadFrom(src io.Reader) (n int64, err error) {
|
|||
var nn int
|
||||
for err == nil {
|
||||
if w.Available() == 0 {
|
||||
err = w.FlushFragment()
|
||||
if w.noFlush {
|
||||
w.Grow(w.Buffered()) // Twice bigger.
|
||||
} else {
|
||||
err = w.FlushFragment()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -367,7 +474,7 @@ func (w *Writer) Flush() error {
|
|||
w.err = w.flushFragment(true)
|
||||
w.n = 0
|
||||
w.dirty = false
|
||||
w.fragmented = false
|
||||
w.fseq = 0
|
||||
|
||||
return w.err
|
||||
}
|
||||
|
@ -381,35 +488,49 @@ func (w *Writer) FlushFragment() error {
|
|||
|
||||
w.err = w.flushFragment(false)
|
||||
w.n = 0
|
||||
w.fragmented = true
|
||||
w.fseq++
|
||||
|
||||
return w.err
|
||||
}
|
||||
|
||||
func (w *Writer) flushFragment(fin bool) error {
|
||||
frame := ws.NewFrame(w.opCode(), fin, w.buf[:w.n])
|
||||
func (w *Writer) flushFragment(fin bool) (err error) {
|
||||
var (
|
||||
payload = w.buf[:w.n]
|
||||
header = ws.Header{
|
||||
OpCode: w.opCode(),
|
||||
Fin: fin,
|
||||
Length: int64(len(payload)),
|
||||
}
|
||||
)
|
||||
for _, ext := range w.extensions {
|
||||
header, err = ext.SetBits(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if w.state.ClientSide() {
|
||||
frame = ws.MaskFrameInPlace(frame)
|
||||
header.Masked = true
|
||||
header.Mask = ws.NewMask()
|
||||
ws.Cipher(payload, header.Mask, 0)
|
||||
}
|
||||
|
||||
// Write header to the header segment of the raw buffer.
|
||||
head := len(w.raw) - len(w.buf)
|
||||
offset := head - ws.HeaderSize(frame.Header)
|
||||
var (
|
||||
offset = len(w.raw) - len(w.buf)
|
||||
skip = offset - ws.HeaderSize(header)
|
||||
)
|
||||
buf := bytesWriter{
|
||||
buf: w.raw[offset:head],
|
||||
buf: w.raw[skip:offset],
|
||||
}
|
||||
if err := ws.WriteHeader(&buf, frame.Header); err != nil {
|
||||
if err := ws.WriteHeader(&buf, header); err != nil {
|
||||
// Must never be reached.
|
||||
panic("dump header error: " + err.Error())
|
||||
}
|
||||
|
||||
_, err := w.dest.Write(w.raw[offset : head+w.n])
|
||||
|
||||
_, err = w.dest.Write(w.raw[skip : offset+w.n])
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *Writer) opCode() ws.OpCode {
|
||||
if w.fragmented {
|
||||
if w.fseq > 0 {
|
||||
return ws.OpContinuation
|
||||
}
|
||||
return w.op
|
||||
|
@ -448,3 +569,31 @@ func writeFrame(w io.Writer, s ws.State, op ws.OpCode, fin bool, p []byte) error
|
|||
|
||||
return ws.WriteFrame(w, frame)
|
||||
}
|
||||
|
||||
// reserve calculates number of bytes need to be reserved for frame header.
|
||||
//
|
||||
// Note that instead of ws.HeaderSize() it does calculation based on the buffer
|
||||
// size, not the payload size.
|
||||
func reserve(state ws.State, n int) (offset int) {
|
||||
var mask int
|
||||
if state.ClientSide() {
|
||||
mask = 4
|
||||
}
|
||||
switch {
|
||||
case n <= int(len7)+mask+2:
|
||||
return mask + 2
|
||||
case n <= int(len16)+mask+4:
|
||||
return mask + 4
|
||||
default:
|
||||
return mask + 10
|
||||
}
|
||||
}
|
||||
|
||||
// headerSize returns number of bytes needed to encode header of a frame with
|
||||
// given state and length.
|
||||
func headerSize(s ws.State, n int) int {
|
||||
return ws.HeaderSize(ws.Header{
|
||||
Length: int64(n),
|
||||
Masked: s.ClientSide(),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -3,54 +3,54 @@ Package wsutil provides utilities for working with WebSocket protocol.
|
|||
|
||||
Overview:
|
||||
|
||||
// Read masked text message from peer and check utf8 encoding.
|
||||
header, err := ws.ReadHeader(conn)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
// Read masked text message from peer and check utf8 encoding.
|
||||
header, err := ws.ReadHeader(conn)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
|
||||
// Prepare to read payload.
|
||||
r := io.LimitReader(conn, header.Length)
|
||||
r = wsutil.NewCipherReader(r, header.Mask)
|
||||
r = wsutil.NewUTF8Reader(r)
|
||||
// Prepare to read payload.
|
||||
r := io.LimitReader(conn, header.Length)
|
||||
r = wsutil.NewCipherReader(r, header.Mask)
|
||||
r = wsutil.NewUTF8Reader(r)
|
||||
|
||||
payload, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
payload, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
|
||||
You could get the same behavior using just `wsutil.Reader`:
|
||||
|
||||
r := wsutil.Reader{
|
||||
Source: conn,
|
||||
CheckUTF8: true,
|
||||
}
|
||||
r := wsutil.Reader{
|
||||
Source: conn,
|
||||
CheckUTF8: true,
|
||||
}
|
||||
|
||||
payload, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
payload, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
|
||||
Or even simplest:
|
||||
|
||||
payload, err := wsutil.ReadClientText(conn)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
payload, err := wsutil.ReadClientText(conn)
|
||||
if err != nil {
|
||||
// handle err
|
||||
}
|
||||
|
||||
Package is also exports tools for buffered writing:
|
||||
|
||||
// Create buffered writer, that will buffer output bytes and send them as
|
||||
// 128-length fragments (with exception on large writes, see the doc).
|
||||
writer := wsutil.NewWriterSize(conn, ws.StateServerSide, ws.OpText, 128)
|
||||
// Create buffered writer, that will buffer output bytes and send them as
|
||||
// 128-length fragments (with exception on large writes, see the doc).
|
||||
writer := wsutil.NewWriterSize(conn, ws.StateServerSide, ws.OpText, 128)
|
||||
|
||||
_, err := io.CopyN(writer, rand.Reader, 100)
|
||||
if err == nil {
|
||||
err = writer.Flush()
|
||||
}
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
_, err := io.CopyN(writer, rand.Reader, 100)
|
||||
if err == nil {
|
||||
err = writer.Flush()
|
||||
}
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
For more utils and helpers see the documentation.
|
||||
*/
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
Copyright (c) 2012 Caleb Doxsey
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,55 +0,0 @@
|
|||
package queue
|
||||
|
||||
type (
|
||||
Queue struct {
|
||||
start, end *node
|
||||
length int
|
||||
}
|
||||
node struct {
|
||||
value interface{}
|
||||
next *node
|
||||
}
|
||||
)
|
||||
|
||||
// Create a new queue
|
||||
func New() *Queue {
|
||||
return &Queue{nil,nil,0}
|
||||
}
|
||||
// Take the next item off the front of the queue
|
||||
func (this *Queue) Dequeue() interface{} {
|
||||
if this.length == 0 {
|
||||
return nil
|
||||
}
|
||||
n := this.start
|
||||
if this.length == 1 {
|
||||
this.start = nil
|
||||
this.end = nil
|
||||
} else {
|
||||
this.start = this.start.next
|
||||
}
|
||||
this.length--
|
||||
return n.value
|
||||
}
|
||||
// Put an item on the end of a queue
|
||||
func (this *Queue) Enqueue(value interface{}) {
|
||||
n := &node{value,nil}
|
||||
if this.length == 0 {
|
||||
this.start = n
|
||||
this.end = n
|
||||
} else {
|
||||
this.end.next = n
|
||||
this.end = n
|
||||
}
|
||||
this.length++
|
||||
}
|
||||
// Return the number of items in the queue
|
||||
func (this *Queue) Len() int {
|
||||
return this.length
|
||||
}
|
||||
// Return the first item in the queue without removing it
|
||||
func (this *Queue) Peek() interface{} {
|
||||
if this.length == 0 {
|
||||
return nil
|
||||
}
|
||||
return this.start.value
|
||||
}
|
|
@ -17,6 +17,7 @@ package profile
|
|||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (p *Profile) decoder() []decoder {
|
||||
|
@ -183,12 +184,13 @@ var profileDecoder = []decoder{
|
|||
// repeated Location location = 4
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Location)
|
||||
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
|
||||
x.Line = b.tmpLines[:0] // Use shared space temporarily
|
||||
pp := m.(*Profile)
|
||||
pp.Location = append(pp.Location, x)
|
||||
err := decodeMessage(b, x)
|
||||
var tmp []Line
|
||||
x.Line = append(tmp, x.Line...) // Shrink to allocated size
|
||||
b.tmpLines = x.Line[:0]
|
||||
// Copy to shrink size and detach from shared space.
|
||||
x.Line = append([]Line(nil), x.Line...)
|
||||
return err
|
||||
},
|
||||
// repeated Function function = 5
|
||||
|
@ -252,6 +254,14 @@ func (p *Profile) postDecode() error {
|
|||
} else {
|
||||
mappings[m.ID] = m
|
||||
}
|
||||
|
||||
// If this a main linux kernel mapping with a relocation symbol suffix
|
||||
// ("[kernel.kallsyms]_text"), extract said suffix.
|
||||
// It is fairly hacky to handle at this level, but the alternatives appear even worse.
|
||||
const prefix = "[kernel.kallsyms]"
|
||||
if strings.HasPrefix(m.File, prefix) {
|
||||
m.KernelRelocationSymbol = m.File[len(prefix):]
|
||||
}
|
||||
}
|
||||
|
||||
functions := make(map[uint64]*Function, len(p.Function))
|
||||
|
@ -298,41 +308,52 @@ func (p *Profile) postDecode() error {
|
|||
st.Unit, err = getString(p.stringTable, &st.unitX, err)
|
||||
}
|
||||
|
||||
// Pre-allocate space for all locations.
|
||||
numLocations := 0
|
||||
for _, s := range p.Sample {
|
||||
labels := make(map[string][]string, len(s.labelX))
|
||||
numLabels := make(map[string][]int64, len(s.labelX))
|
||||
numUnits := make(map[string][]string, len(s.labelX))
|
||||
for _, l := range s.labelX {
|
||||
var key, value string
|
||||
key, err = getString(p.stringTable, &l.keyX, err)
|
||||
if l.strX != 0 {
|
||||
value, err = getString(p.stringTable, &l.strX, err)
|
||||
labels[key] = append(labels[key], value)
|
||||
} else if l.numX != 0 || l.unitX != 0 {
|
||||
numValues := numLabels[key]
|
||||
units := numUnits[key]
|
||||
if l.unitX != 0 {
|
||||
var unit string
|
||||
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||
units = padStringArray(units, len(numValues))
|
||||
numUnits[key] = append(units, unit)
|
||||
}
|
||||
numLabels[key] = append(numLabels[key], l.numX)
|
||||
}
|
||||
}
|
||||
if len(labels) > 0 {
|
||||
s.Label = labels
|
||||
}
|
||||
if len(numLabels) > 0 {
|
||||
s.NumLabel = numLabels
|
||||
for key, units := range numUnits {
|
||||
if len(units) > 0 {
|
||||
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||
numLocations += len(s.locationIDX)
|
||||
}
|
||||
locBuffer := make([]*Location, numLocations)
|
||||
|
||||
for _, s := range p.Sample {
|
||||
if len(s.labelX) > 0 {
|
||||
labels := make(map[string][]string, len(s.labelX))
|
||||
numLabels := make(map[string][]int64, len(s.labelX))
|
||||
numUnits := make(map[string][]string, len(s.labelX))
|
||||
for _, l := range s.labelX {
|
||||
var key, value string
|
||||
key, err = getString(p.stringTable, &l.keyX, err)
|
||||
if l.strX != 0 {
|
||||
value, err = getString(p.stringTable, &l.strX, err)
|
||||
labels[key] = append(labels[key], value)
|
||||
} else if l.numX != 0 || l.unitX != 0 {
|
||||
numValues := numLabels[key]
|
||||
units := numUnits[key]
|
||||
if l.unitX != 0 {
|
||||
var unit string
|
||||
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||
units = padStringArray(units, len(numValues))
|
||||
numUnits[key] = append(units, unit)
|
||||
}
|
||||
numLabels[key] = append(numLabels[key], l.numX)
|
||||
}
|
||||
}
|
||||
s.NumUnit = numUnits
|
||||
if len(labels) > 0 {
|
||||
s.Label = labels
|
||||
}
|
||||
if len(numLabels) > 0 {
|
||||
s.NumLabel = numLabels
|
||||
for key, units := range numUnits {
|
||||
if len(units) > 0 {
|
||||
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||
}
|
||||
}
|
||||
s.NumUnit = numUnits
|
||||
}
|
||||
}
|
||||
s.Location = make([]*Location, len(s.locationIDX))
|
||||
|
||||
s.Location = locBuffer[:len(s.locationIDX)]
|
||||
locBuffer = locBuffer[len(s.locationIDX):]
|
||||
for i, lid := range s.locationIDX {
|
||||
if lid < uint64(len(locationIds)) {
|
||||
s.Location[i] = locationIds[lid]
|
||||
|
|
|
@ -22,6 +22,10 @@ import "regexp"
|
|||
// samples where at least one frame matches focus but none match ignore.
|
||||
// Returns true is the corresponding regexp matched at least one sample.
|
||||
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
|
||||
if focus == nil && ignore == nil && hide == nil && show == nil {
|
||||
fm = true // Missing focus implies a match
|
||||
return
|
||||
}
|
||||
focusOrIgnore := make(map[uint64]bool)
|
||||
hidden := make(map[uint64]bool)
|
||||
for _, l := range p.Location {
|
||||
|
|
|
@ -295,11 +295,12 @@ func get64b(b []byte) (uint64, []byte) {
|
|||
//
|
||||
// The general format for profilez samples is a sequence of words in
|
||||
// binary format. The first words are a header with the following data:
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 3
|
||||
// 3rd word -- 0 if a c++ application, 1 if a java application.
|
||||
// 4th word -- Sampling period (in microseconds).
|
||||
// 5th word -- Padding.
|
||||
//
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 3
|
||||
// 3rd word -- 0 if a c++ application, 1 if a java application.
|
||||
// 4th word -- Sampling period (in microseconds).
|
||||
// 5th word -- Padding.
|
||||
func parseCPU(b []byte) (*Profile, error) {
|
||||
var parse func([]byte) (uint64, []byte)
|
||||
var n1, n2, n3, n4, n5 uint64
|
||||
|
@ -403,15 +404,18 @@ func cleanupDuplicateLocations(p *Profile) {
|
|||
//
|
||||
// profilez samples are a repeated sequence of stack frames of the
|
||||
// form:
|
||||
// 1st word -- The number of times this stack was encountered.
|
||||
// 2nd word -- The size of the stack (StackSize).
|
||||
// 3rd word -- The first address on the stack.
|
||||
// ...
|
||||
// StackSize + 2 -- The last address on the stack
|
||||
//
|
||||
// 1st word -- The number of times this stack was encountered.
|
||||
// 2nd word -- The size of the stack (StackSize).
|
||||
// 3rd word -- The first address on the stack.
|
||||
// ...
|
||||
// StackSize + 2 -- The last address on the stack
|
||||
//
|
||||
// The last stack trace is of the form:
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 1
|
||||
// 3rd word -- 0
|
||||
//
|
||||
// 1st word -- 0
|
||||
// 2nd word -- 1
|
||||
// 3rd word -- 0
|
||||
//
|
||||
// Addresses from stack traces may point to the next instruction after
|
||||
// each call. Optionally adjust by -1 to land somewhere on the actual
|
||||
|
@ -861,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) {
|
|||
// Recognize each thread and populate profile samples.
|
||||
for !isMemoryMapSentinel(line) {
|
||||
if strings.HasPrefix(line, "---- no stack trace for") {
|
||||
line = ""
|
||||
break
|
||||
}
|
||||
if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
package profile
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) {
|
|||
|
||||
for _, src := range srcs {
|
||||
// Clear the profile-specific hash tables
|
||||
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
|
||||
pm.locationsByID = makeLocationIDMap(len(src.Location))
|
||||
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
|
||||
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
|
||||
|
||||
|
@ -136,7 +137,7 @@ type profileMerger struct {
|
|||
p *Profile
|
||||
|
||||
// Memoization tables within a profile.
|
||||
locationsByID map[uint64]*Location
|
||||
locationsByID locationIDMap
|
||||
functionsByID map[uint64]*Function
|
||||
mappingsByID map[uint64]mapInfo
|
||||
|
||||
|
@ -153,6 +154,16 @@ type mapInfo struct {
|
|||
}
|
||||
|
||||
func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||
// Check memoization table
|
||||
k := pm.sampleKey(src)
|
||||
if ss, ok := pm.samples[k]; ok {
|
||||
for i, v := range src.Value {
|
||||
ss.Value[i] += v
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// Make new sample.
|
||||
s := &Sample{
|
||||
Location: make([]*Location, len(src.Location)),
|
||||
Value: make([]int64, len(src.Value)),
|
||||
|
@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
|||
s.NumLabel[k] = vv
|
||||
s.NumUnit[k] = uu
|
||||
}
|
||||
// Check memoization table. Must be done on the remapped location to
|
||||
// account for the remapped mapping. Add current values to the
|
||||
// existing sample.
|
||||
k := s.key()
|
||||
if ss, ok := pm.samples[k]; ok {
|
||||
for i, v := range src.Value {
|
||||
ss.Value[i] += v
|
||||
}
|
||||
return ss
|
||||
}
|
||||
copy(s.Value, src.Value)
|
||||
pm.samples[k] = s
|
||||
pm.p.Sample = append(pm.p.Sample, s)
|
||||
return s
|
||||
}
|
||||
|
||||
// key generates sampleKey to be used as a key for maps.
|
||||
func (sample *Sample) key() sampleKey {
|
||||
ids := make([]string, len(sample.Location))
|
||||
for i, l := range sample.Location {
|
||||
ids[i] = strconv.FormatUint(l.ID, 16)
|
||||
func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
|
||||
// Accumulate contents into a string.
|
||||
var buf strings.Builder
|
||||
buf.Grow(64) // Heuristic to avoid extra allocs
|
||||
|
||||
// encode a number
|
||||
putNumber := func(v uint64) {
|
||||
var num [binary.MaxVarintLen64]byte
|
||||
n := binary.PutUvarint(num[:], v)
|
||||
buf.Write(num[:n])
|
||||
}
|
||||
|
||||
labels := make([]string, 0, len(sample.Label))
|
||||
for k, v := range sample.Label {
|
||||
labels = append(labels, fmt.Sprintf("%q%q", k, v))
|
||||
// encode a string prefixed with its length.
|
||||
putDelimitedString := func(s string) {
|
||||
putNumber(uint64(len(s)))
|
||||
buf.WriteString(s)
|
||||
}
|
||||
sort.Strings(labels)
|
||||
|
||||
numlabels := make([]string, 0, len(sample.NumLabel))
|
||||
for k, v := range sample.NumLabel {
|
||||
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
|
||||
for _, l := range sample.Location {
|
||||
// Get the location in the merged profile, which may have a different ID.
|
||||
if loc := pm.mapLocation(l); loc != nil {
|
||||
putNumber(loc.ID)
|
||||
}
|
||||
}
|
||||
sort.Strings(numlabels)
|
||||
putNumber(0) // Delimiter
|
||||
|
||||
return sampleKey{
|
||||
strings.Join(ids, "|"),
|
||||
strings.Join(labels, ""),
|
||||
strings.Join(numlabels, ""),
|
||||
for _, l := range sortedKeys1(sample.Label) {
|
||||
putDelimitedString(l)
|
||||
values := sample.Label[l]
|
||||
putNumber(uint64(len(values)))
|
||||
for _, v := range values {
|
||||
putDelimitedString(v)
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range sortedKeys2(sample.NumLabel) {
|
||||
putDelimitedString(l)
|
||||
values := sample.NumLabel[l]
|
||||
putNumber(uint64(len(values)))
|
||||
for _, v := range values {
|
||||
putNumber(uint64(v))
|
||||
}
|
||||
units := sample.NumUnit[l]
|
||||
putNumber(uint64(len(units)))
|
||||
for _, v := range units {
|
||||
putDelimitedString(v)
|
||||
}
|
||||
}
|
||||
|
||||
return sampleKey(buf.String())
|
||||
}
|
||||
|
||||
type sampleKey struct {
|
||||
locations string
|
||||
labels string
|
||||
numlabels string
|
||||
type sampleKey string
|
||||
|
||||
// sortedKeys1 returns the sorted keys found in a string->[]string map.
|
||||
//
|
||||
// Note: this is currently non-generic since github pprof runs golint,
|
||||
// which does not support generics. When that issue is fixed, it can
|
||||
// be merged with sortedKeys2 and made into a generic function.
|
||||
func sortedKeys1(m map[string][]string) []string {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
|
||||
//
|
||||
// Note: this is currently non-generic since github pprof runs golint,
|
||||
// which does not support generics. When that issue is fixed, it can
|
||||
// be merged with sortedKeys1 and made into a generic function.
|
||||
func sortedKeys2(m map[string][]int64) []string {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||
|
@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
|
|||
return nil
|
||||
}
|
||||
|
||||
if l, ok := pm.locationsByID[src.ID]; ok {
|
||||
if l := pm.locationsByID.get(src.ID); l != nil {
|
||||
return l
|
||||
}
|
||||
|
||||
|
@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
|
|||
// account for the remapped mapping ID.
|
||||
k := l.key()
|
||||
if ll, ok := pm.locations[k]; ok {
|
||||
pm.locationsByID[src.ID] = ll
|
||||
pm.locationsByID.set(src.ID, ll)
|
||||
return ll
|
||||
}
|
||||
pm.locationsByID[src.ID] = l
|
||||
pm.locationsByID.set(src.ID, l)
|
||||
pm.locations[k] = l
|
||||
pm.p.Location = append(pm.p.Location, l)
|
||||
return l
|
||||
|
@ -303,16 +360,17 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
|
|||
return mi
|
||||
}
|
||||
m := &Mapping{
|
||||
ID: uint64(len(pm.p.Mapping) + 1),
|
||||
Start: src.Start,
|
||||
Limit: src.Limit,
|
||||
Offset: src.Offset,
|
||||
File: src.File,
|
||||
BuildID: src.BuildID,
|
||||
HasFunctions: src.HasFunctions,
|
||||
HasFilenames: src.HasFilenames,
|
||||
HasLineNumbers: src.HasLineNumbers,
|
||||
HasInlineFrames: src.HasInlineFrames,
|
||||
ID: uint64(len(pm.p.Mapping) + 1),
|
||||
Start: src.Start,
|
||||
Limit: src.Limit,
|
||||
Offset: src.Offset,
|
||||
File: src.File,
|
||||
KernelRelocationSymbol: src.KernelRelocationSymbol,
|
||||
BuildID: src.BuildID,
|
||||
HasFunctions: src.HasFunctions,
|
||||
HasFilenames: src.HasFilenames,
|
||||
HasLineNumbers: src.HasLineNumbers,
|
||||
HasInlineFrames: src.HasInlineFrames,
|
||||
}
|
||||
pm.p.Mapping = append(pm.p.Mapping, m)
|
||||
|
||||
|
@ -479,3 +537,131 @@ func (p *Profile) compatible(pb *Profile) error {
|
|||
func equalValueType(st1, st2 *ValueType) bool {
|
||||
return st1.Type == st2.Type && st1.Unit == st2.Unit
|
||||
}
|
||||
|
||||
// locationIDMap is like a map[uint64]*Location, but provides efficiency for
|
||||
// ids that are densely numbered, which is often the case.
|
||||
type locationIDMap struct {
|
||||
dense []*Location // indexed by id for id < len(dense)
|
||||
sparse map[uint64]*Location // indexed by id for id >= len(dense)
|
||||
}
|
||||
|
||||
func makeLocationIDMap(n int) locationIDMap {
|
||||
return locationIDMap{
|
||||
dense: make([]*Location, n),
|
||||
sparse: map[uint64]*Location{},
|
||||
}
|
||||
}
|
||||
|
||||
func (lm locationIDMap) get(id uint64) *Location {
|
||||
if id < uint64(len(lm.dense)) {
|
||||
return lm.dense[int(id)]
|
||||
}
|
||||
return lm.sparse[id]
|
||||
}
|
||||
|
||||
func (lm locationIDMap) set(id uint64, loc *Location) {
|
||||
if id < uint64(len(lm.dense)) {
|
||||
lm.dense[id] = loc
|
||||
return
|
||||
}
|
||||
lm.sparse[id] = loc
|
||||
}
|
||||
|
||||
// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
|
||||
// keeps sample types that appear in all profiles only and drops/reorders the
|
||||
// sample types as necessary.
|
||||
//
|
||||
// In the case of sample types order is not the same for given profiles the
|
||||
// order is derived from the first profile.
|
||||
//
|
||||
// Profiles are modified in-place.
|
||||
//
|
||||
// It returns an error if the sample type's intersection is empty.
|
||||
func CompatibilizeSampleTypes(ps []*Profile) error {
|
||||
sTypes := commonSampleTypes(ps)
|
||||
if len(sTypes) == 0 {
|
||||
return fmt.Errorf("profiles have empty common sample type list")
|
||||
}
|
||||
for _, p := range ps {
|
||||
if err := compatibilizeSampleTypes(p, sTypes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// commonSampleTypes returns sample types that appear in all profiles in the
|
||||
// order how they ordered in the first profile.
|
||||
func commonSampleTypes(ps []*Profile) []string {
|
||||
if len(ps) == 0 {
|
||||
return nil
|
||||
}
|
||||
sTypes := map[string]int{}
|
||||
for _, p := range ps {
|
||||
for _, st := range p.SampleType {
|
||||
sTypes[st.Type]++
|
||||
}
|
||||
}
|
||||
var res []string
|
||||
for _, st := range ps[0].SampleType {
|
||||
if sTypes[st.Type] == len(ps) {
|
||||
res = append(res, st.Type)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// compatibilizeSampleTypes drops sample types that are not present in sTypes
|
||||
// list and reorder them if needed.
|
||||
//
|
||||
// It sets DefaultSampleType to sType[0] if it is not in sType list.
|
||||
//
|
||||
// It assumes that all sample types from the sTypes list are present in the
|
||||
// given profile otherwise it returns an error.
|
||||
func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
|
||||
if len(sTypes) == 0 {
|
||||
return fmt.Errorf("sample type list is empty")
|
||||
}
|
||||
defaultSampleType := sTypes[0]
|
||||
reMap, needToModify := make([]int, len(sTypes)), false
|
||||
for i, st := range sTypes {
|
||||
if st == p.DefaultSampleType {
|
||||
defaultSampleType = p.DefaultSampleType
|
||||
}
|
||||
idx := searchValueType(p.SampleType, st)
|
||||
if idx < 0 {
|
||||
return fmt.Errorf("%q sample type is not found in profile", st)
|
||||
}
|
||||
reMap[i] = idx
|
||||
if idx != i {
|
||||
needToModify = true
|
||||
}
|
||||
}
|
||||
if !needToModify && len(sTypes) == len(p.SampleType) {
|
||||
return nil
|
||||
}
|
||||
p.DefaultSampleType = defaultSampleType
|
||||
oldSampleTypes := p.SampleType
|
||||
p.SampleType = make([]*ValueType, len(sTypes))
|
||||
for i, idx := range reMap {
|
||||
p.SampleType[i] = oldSampleTypes[idx]
|
||||
}
|
||||
values := make([]int64, len(sTypes))
|
||||
for _, s := range p.Sample {
|
||||
for i, idx := range reMap {
|
||||
values[i] = s.Value[idx]
|
||||
}
|
||||
s.Value = s.Value[:len(values)]
|
||||
copy(s.Value, values)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func searchValueType(vts []*ValueType, s string) int {
|
||||
for i, vt := range vts {
|
||||
if vt.Type == s {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
@ -73,9 +72,23 @@ type ValueType struct {
|
|||
type Sample struct {
|
||||
Location []*Location
|
||||
Value []int64
|
||||
Label map[string][]string
|
||||
// Label is a per-label-key map to values for string labels.
|
||||
//
|
||||
// In general, having multiple values for the given label key is strongly
|
||||
// discouraged - see docs for the sample label field in profile.proto. The
|
||||
// main reason this unlikely state is tracked here is to make the
|
||||
// decoding->encoding roundtrip not lossy. But we expect that the value
|
||||
// slices present in this map are always of length 1.
|
||||
Label map[string][]string
|
||||
// NumLabel is a per-label-key map to values for numeric labels. See a note
|
||||
// above on handling multiple values for a label.
|
||||
NumLabel map[string][]int64
|
||||
NumUnit map[string][]string
|
||||
// NumUnit is a per-label-key map to the unit names of corresponding numeric
|
||||
// label values. The unit info may be missing even if the label is in
|
||||
// NumLabel, see the docs in profile.proto for details. When the value is
|
||||
// slice is present and not nil, its length must be equal to the length of
|
||||
// the corresponding value slice in NumLabel.
|
||||
NumUnit map[string][]string
|
||||
|
||||
locationIDX []uint64
|
||||
labelX []label
|
||||
|
@ -106,6 +119,15 @@ type Mapping struct {
|
|||
|
||||
fileX int64
|
||||
buildIDX int64
|
||||
|
||||
// Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
|
||||
// For linux kernel mappings generated by some tools, correct symbolization depends
|
||||
// on knowing which of the two possible relocation symbols was used for `Start`.
|
||||
// This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
|
||||
//
|
||||
// Note, this public field is not persisted in the proto. For the purposes of
|
||||
// copying / merging / hashing profiles, it is considered subsumed by `File`.
|
||||
KernelRelocationSymbol string
|
||||
}
|
||||
|
||||
// Location corresponds to Profile.Location
|
||||
|
@ -144,7 +166,7 @@ type Function struct {
|
|||
// may be a gzip-compressed encoded protobuf or one of many legacy
|
||||
// profile formats which may be unsupported in the future.
|
||||
func Parse(r io.Reader) (*Profile, error) {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -159,7 +181,7 @@ func ParseData(data []byte) (*Profile, error) {
|
|||
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err == nil {
|
||||
data, err = ioutil.ReadAll(gz)
|
||||
data, err = io.ReadAll(gz)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decompressing profile: %v", err)
|
||||
|
@ -707,6 +729,35 @@ func (s *Sample) HasLabel(key, value string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// SetNumLabel sets the specified key to the specified value for all samples in the
|
||||
// profile. "unit" is a slice that describes the units that each corresponding member
|
||||
// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
|
||||
// unit for a given value, that member of "unit" should be the empty string.
|
||||
// "unit" must either have the same length as "value", or be nil.
|
||||
func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
|
||||
for _, sample := range p.Sample {
|
||||
if sample.NumLabel == nil {
|
||||
sample.NumLabel = map[string][]int64{key: value}
|
||||
} else {
|
||||
sample.NumLabel[key] = value
|
||||
}
|
||||
if sample.NumUnit == nil {
|
||||
sample.NumUnit = map[string][]string{key: unit}
|
||||
} else {
|
||||
sample.NumUnit[key] = unit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveNumLabel removes all numerical labels associated with the specified key for all
|
||||
// samples in the profile.
|
||||
func (p *Profile) RemoveNumLabel(key string) {
|
||||
for _, sample := range p.Sample {
|
||||
delete(sample.NumLabel, key)
|
||||
delete(sample.NumUnit, key)
|
||||
}
|
||||
}
|
||||
|
||||
// DiffBaseSample returns true if a sample belongs to the diff base and false
|
||||
// otherwise.
|
||||
func (s *Sample) DiffBaseSample() bool {
|
||||
|
|
|
@ -39,11 +39,12 @@ import (
|
|||
)
|
||||
|
||||
type buffer struct {
|
||||
field int // field tag
|
||||
typ int // proto wire type code for field
|
||||
u64 uint64
|
||||
data []byte
|
||||
tmp [16]byte
|
||||
field int // field tag
|
||||
typ int // proto wire type code for field
|
||||
u64 uint64
|
||||
data []byte
|
||||
tmp [16]byte
|
||||
tmpLines []Line // temporary storage used while decoding "repeated Line".
|
||||
}
|
||||
|
||||
type decoder func(*buffer, message) error
|
||||
|
@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error {
|
|||
if b.typ == 2 {
|
||||
// Packed encoding
|
||||
data := b.data
|
||||
tmp := make([]int64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
|
@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error {
|
|||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tmp = append(tmp, int64(u))
|
||||
*x = append(*x, int64(u))
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var i int64
|
||||
|
@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
|
|||
if b.typ == 2 {
|
||||
data := b.data
|
||||
// Packed encoding
|
||||
tmp := make([]uint64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
|
@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
|
|||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tmp = append(tmp, u)
|
||||
*x = append(*x, u)
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var u uint64
|
||||
|
|
|
@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
|
|||
prune := make(map[uint64]bool)
|
||||
pruneBeneath := make(map[uint64]bool)
|
||||
|
||||
// simplifyFunc can be expensive, so cache results.
|
||||
// Note that the same function name can be encountered many times due
|
||||
// different lines and addresses in the same function.
|
||||
pruneCache := map[string]bool{} // Map from function to whether or not to prune
|
||||
pruneFromHere := func(s string) bool {
|
||||
if r, ok := pruneCache[s]; ok {
|
||||
return r
|
||||
}
|
||||
funcName := simplifyFunc(s)
|
||||
if dropRx.MatchString(funcName) {
|
||||
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||
pruneCache[s] = true
|
||||
return true
|
||||
}
|
||||
}
|
||||
pruneCache[s] = false
|
||||
return false
|
||||
}
|
||||
|
||||
for _, loc := range p.Location {
|
||||
var i int
|
||||
for i = len(loc.Line) - 1; i >= 0; i-- {
|
||||
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||
funcName := simplifyFunc(fn.Name)
|
||||
if dropRx.MatchString(funcName) {
|
||||
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||
break
|
||||
}
|
||||
if pruneFromHere(fn.Name) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,30 +1,29 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2009, The Go Authors. Extensions copyright (c) 2011, Miek Gieben.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
As this is fork of the official Go code the same license applies.
|
||||
Extensions of the original work are copyright (c) 2011 Miek Gieben
|
||||
|
|
|
@ -77,6 +77,12 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
|||
* https://ping.sx/dig
|
||||
* https://fleetdeck.io/
|
||||
* https://github.com/markdingo/autoreverse
|
||||
* https://github.com/slackhq/nebula
|
||||
* https://addr.tools/
|
||||
* https://dnscheck.tools/
|
||||
* https://github.com/egbakou/domainverifier
|
||||
* https://github.com/semihalev/sdns
|
||||
* https://github.com/wintbiit/NineDNS
|
||||
|
||||
|
||||
Send pull request if you want to be listed here.
|
||||
|
@ -120,6 +126,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
|||
*all of them*
|
||||
|
||||
* 103{4,5} - DNS standard
|
||||
* 1183 - ISDN, X25 and other deprecated records
|
||||
* 1348 - NSAP record (removed the record)
|
||||
* 1982 - Serial Arithmetic
|
||||
* 1876 - LOC record
|
||||
|
@ -140,6 +147,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
|||
* 340{1,2,3} - NAPTR record
|
||||
* 3445 - Limiting the scope of (DNS)KEY
|
||||
* 3597 - Unknown RRs
|
||||
* 4025 - A Method for Storing IPsec Keying Material in DNS
|
||||
* 403{3,4,5} - DNSSEC + validation functions
|
||||
* 4255 - SSHFP record
|
||||
* 4343 - Case insensitivity
|
||||
|
@ -175,6 +183,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
|||
* 8080 - EdDSA for DNSSEC
|
||||
* 8499 - DNS Terminology
|
||||
* 8659 - DNS Certification Authority Authorization (CAA) Resource Record
|
||||
* 8777 - DNS Reverse IP Automatic Multicast Tunneling (AMT) Discovery
|
||||
* 8914 - Extended DNS Errors
|
||||
* 8976 - Message Digest for DNS Zones (ZONEMD RR)
|
||||
|
||||
|
|
|
@ -10,8 +10,6 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction
|
|||
//
|
||||
// * opcode isn't OpcodeQuery or OpcodeNotify
|
||||
//
|
||||
// * Zero bit isn't zero
|
||||
//
|
||||
// * does not have exactly 1 question in the question section
|
||||
//
|
||||
// * has more than 1 RR in the Answer section
|
||||
|
@ -19,7 +17,6 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction
|
|||
// * has more than 0 RRs in the Authority section
|
||||
//
|
||||
// * has more than 2 RRs in the Additional section
|
||||
//
|
||||
var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
|
||||
|
||||
// MsgAcceptAction represents the action to be taken.
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
|
@ -56,14 +55,20 @@ type Client struct {
|
|||
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
|
||||
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
|
||||
// Client.Dialer) or context.Context.Deadline (see ExchangeContext)
|
||||
Timeout time.Duration
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
|
||||
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
||||
group singleflight
|
||||
Timeout time.Duration
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
|
||||
|
||||
// SingleInflight previously serialised multiple concurrent queries for the
|
||||
// same Qname, Qtype and Qclass to ensure only one would be in flight at a
|
||||
// time.
|
||||
//
|
||||
// Deprecated: This is a no-op. Callers should implement their own in flight
|
||||
// query caching if needed. See github.com/miekg/dns/issues/1449.
|
||||
SingleInflight bool
|
||||
}
|
||||
|
||||
// Exchange performs a synchronous UDP query. It sends the message m to the address
|
||||
|
@ -106,7 +111,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
|
|||
}
|
||||
|
||||
// DialContext connects to the address on the named network, with a context.Context.
|
||||
// For TLS over TCP (DoT) the context isn't used yet. This will be enabled when Go 1.18 is released.
|
||||
func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, err error) {
|
||||
// create a new dialer with the appropriate timeout
|
||||
var d net.Dialer
|
||||
|
@ -127,15 +131,11 @@ func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, e
|
|||
if useTLS {
|
||||
network = strings.TrimSuffix(network, "-tls")
|
||||
|
||||
// TODO(miekg): Enable after Go 1.18 is released, to be able to support two prev. releases.
|
||||
/*
|
||||
tlsDialer := tls.Dialer{
|
||||
NetDialer: &d,
|
||||
Config: c.TLSConfig,
|
||||
}
|
||||
conn.Conn, err = tlsDialer.DialContext(ctx, network, address)
|
||||
*/
|
||||
conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
|
||||
tlsDialer := tls.Dialer{
|
||||
NetDialer: &d,
|
||||
Config: c.TLSConfig,
|
||||
}
|
||||
conn.Conn, err = tlsDialer.DialContext(ctx, network, address)
|
||||
} else {
|
||||
conn.Conn, err = d.DialContext(ctx, network, address)
|
||||
}
|
||||
|
@ -183,33 +183,13 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
|
|||
// This allows users of the library to implement their own connection management,
|
||||
// as opposed to Exchange, which will always use new connections and incur the added overhead
|
||||
// that entails when using "tcp" and especially "tcp-tls" clients.
|
||||
//
|
||||
// When the singleflight is set for this client the context is _not_ forwarded to the (shared) exchange, to
|
||||
// prevent one cancelation from canceling all outstanding requests.
|
||||
func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
return c.exchangeWithConnContext(context.Background(), m, conn)
|
||||
return c.ExchangeWithConnContext(context.Background(), m, conn)
|
||||
}
|
||||
|
||||
func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
if !c.SingleInflight {
|
||||
return c.exchangeContext(ctx, m, conn)
|
||||
}
|
||||
|
||||
q := m.Question[0]
|
||||
key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
|
||||
r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
|
||||
// When we're doing singleflight we don't want one context cancelation, cancel _all_ outstanding queries.
|
||||
// Hence we ignore the context and use Background().
|
||||
return c.exchangeContext(context.Background(), m, conn)
|
||||
})
|
||||
if r != nil && shared {
|
||||
r = r.Copy()
|
||||
}
|
||||
|
||||
return r, rtt, err
|
||||
}
|
||||
|
||||
func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
// ExchangeWithConnContext has the same behaviour as ExchangeWithConn and
|
||||
// additionally obeys deadlines from the passed Context.
|
||||
func (c *Client) ExchangeWithConnContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
opt := m.IsEdns0()
|
||||
// If EDNS0 is used use that for size.
|
||||
if opt != nil && opt.UDPSize() >= MinMsgSize {
|
||||
|
@ -431,7 +411,6 @@ func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error)
|
|||
// co.WriteMsg(m)
|
||||
// in, _ := co.ReadMsg()
|
||||
// co.Close()
|
||||
//
|
||||
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
|
||||
println("dns: ExchangeConn: this function is deprecated")
|
||||
co := new(Conn)
|
||||
|
@ -480,5 +459,5 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg,
|
|||
}
|
||||
defer conn.Close()
|
||||
|
||||
return c.exchangeWithConnContext(ctx, m, conn)
|
||||
return c.ExchangeWithConnContext(ctx, m, conn)
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
|
|||
}
|
||||
|
||||
case "search": // set search path to given servers
|
||||
c.Search = append([]string(nil), f[1:]...)
|
||||
c.Search = cloneSlice(f[1:])
|
||||
|
||||
case "options": // magic options
|
||||
for _, s := range f[1:] {
|
||||
|
|
|
@ -22,8 +22,7 @@ func (dns *Msg) SetReply(request *Msg) *Msg {
|
|||
}
|
||||
dns.Rcode = RcodeSuccess
|
||||
if len(request.Question) > 0 {
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = request.Question[0]
|
||||
dns.Question = []Question{request.Question[0]}
|
||||
}
|
||||
return dns
|
||||
}
|
||||
|
@ -208,7 +207,7 @@ func IsDomainName(s string) (labels int, ok bool) {
|
|||
}
|
||||
|
||||
// check for \DDD
|
||||
if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
|
||||
if isDDD(s[i+1:]) {
|
||||
i += 3
|
||||
begin += 3
|
||||
} else {
|
||||
|
@ -272,40 +271,39 @@ func IsMsg(buf []byte) error {
|
|||
|
||||
// IsFqdn checks if a domain name is fully qualified.
|
||||
func IsFqdn(s string) bool {
|
||||
s2 := strings.TrimSuffix(s, ".")
|
||||
if s == s2 {
|
||||
// Check for (and remove) a trailing dot, returning if there isn't one.
|
||||
if s == "" || s[len(s)-1] != '.' {
|
||||
return false
|
||||
}
|
||||
s = s[:len(s)-1]
|
||||
|
||||
i := strings.LastIndexFunc(s2, func(r rune) bool {
|
||||
// If we don't have an escape sequence before the final dot, we know it's
|
||||
// fully qualified and can return here.
|
||||
if s == "" || s[len(s)-1] != '\\' {
|
||||
return true
|
||||
}
|
||||
|
||||
// Otherwise we have to check if the dot is escaped or not by checking if
|
||||
// there are an odd or even number of escape sequences before the dot.
|
||||
i := strings.LastIndexFunc(s, func(r rune) bool {
|
||||
return r != '\\'
|
||||
})
|
||||
|
||||
// Test whether we have an even number of escape sequences before
|
||||
// the dot or none.
|
||||
return (len(s2)-i)%2 != 0
|
||||
return (len(s)-i)%2 != 0
|
||||
}
|
||||
|
||||
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
|
||||
// This means the RRs need to have the same type, name, and class. Returns true
|
||||
// if the RR set is valid, otherwise false.
|
||||
// IsRRset reports whether a set of RRs is a valid RRset as defined by RFC 2181.
|
||||
// This means the RRs need to have the same type, name, and class.
|
||||
func IsRRset(rrset []RR) bool {
|
||||
if len(rrset) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(rrset) == 1 {
|
||||
return true
|
||||
}
|
||||
rrHeader := rrset[0].Header()
|
||||
rrType := rrHeader.Rrtype
|
||||
rrClass := rrHeader.Class
|
||||
rrName := rrHeader.Name
|
||||
|
||||
baseH := rrset[0].Header()
|
||||
for _, rr := range rrset[1:] {
|
||||
curRRHeader := rr.Header()
|
||||
if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
|
||||
curH := rr.Header()
|
||||
if curH.Rrtype != baseH.Rrtype || curH.Class != baseH.Class || curH.Name != baseH.Name {
|
||||
// Mismatch between the records, so this is not a valid rrset for
|
||||
//signing/verifying
|
||||
// signing/verifying
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -323,9 +321,15 @@ func Fqdn(s string) string {
|
|||
}
|
||||
|
||||
// CanonicalName returns the domain name in canonical form. A name in canonical
|
||||
// form is lowercase and fully qualified. See Section 6.2 in RFC 4034.
|
||||
// form is lowercase and fully qualified. Only US-ASCII letters are affected. See
|
||||
// Section 6.2 in RFC 4034.
|
||||
func CanonicalName(s string) string {
|
||||
return strings.ToLower(Fqdn(s))
|
||||
return strings.Map(func(r rune) rune {
|
||||
if r >= 'A' && r <= 'Z' {
|
||||
r += 'a' - 'A'
|
||||
}
|
||||
return r
|
||||
}, Fqdn(s))
|
||||
}
|
||||
|
||||
// Copied from the official Go code.
|
||||
|
|
|
@ -128,10 +128,6 @@ type dnskeyWireFmt struct {
|
|||
/* Nothing is left out */
|
||||
}
|
||||
|
||||
func divRoundUp(a, b int) int {
|
||||
return (a + b - 1) / b
|
||||
}
|
||||
|
||||
// KeyTag calculates the keytag (or key-id) of the DNSKEY.
|
||||
func (k *DNSKEY) KeyTag() uint16 {
|
||||
if k == nil {
|
||||
|
@ -417,11 +413,11 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
|||
return err
|
||||
}
|
||||
|
||||
sigbuf := rr.sigBuf() // Get the binary signature data
|
||||
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
|
||||
// TODO(miek)
|
||||
// remove the domain name and assume its ours?
|
||||
}
|
||||
sigbuf := rr.sigBuf() // Get the binary signature data
|
||||
// TODO(miek)
|
||||
// remove the domain name and assume its ours?
|
||||
// if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
|
||||
// }
|
||||
|
||||
h, cryptohash, err := hashFromAlgorithm(rr.Algorithm)
|
||||
if err != nil {
|
||||
|
|
|
@ -37,7 +37,8 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
|
|||
return nil, ErrPrivKey
|
||||
}
|
||||
// TODO(mg): check if the pubkey matches the private key
|
||||
algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
|
||||
algoStr, _, _ := strings.Cut(m["algorithm"], " ")
|
||||
algo, err := strconv.ParseUint(algoStr, 10, 8)
|
||||
if err != nil {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
|
@ -159,7 +160,7 @@ func parseKey(r io.Reader, file string) (map[string]string, error) {
|
|||
k = l.token
|
||||
case zValue:
|
||||
if k == "" {
|
||||
return nil, &ParseError{file, "no private key seen", l}
|
||||
return nil, &ParseError{file: file, err: "no private key seen", lex: l}
|
||||
}
|
||||
|
||||
m[strings.ToLower(k)] = l.token
|
||||
|
|
|
@ -13,28 +13,28 @@ names in a message will result in a packing failure.
|
|||
Resource records are native types. They are not stored in wire format. Basic
|
||||
usage pattern for creating a new resource record:
|
||||
|
||||
r := new(dns.MX)
|
||||
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
|
||||
r.Preference = 10
|
||||
r.Mx = "mx.miek.nl."
|
||||
r := new(dns.MX)
|
||||
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
|
||||
r.Preference = 10
|
||||
r.Mx = "mx.miek.nl."
|
||||
|
||||
Or directly from a string:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
|
||||
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
|
||||
|
||||
Or when the default origin (.) and TTL (3600) and class (IN) suit you:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
|
||||
mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
|
||||
|
||||
Or even:
|
||||
|
||||
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
|
||||
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
|
||||
|
||||
In the DNS messages are exchanged, these messages contain resource records
|
||||
(sets). Use pattern for creating a message:
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
|
||||
Or when not certain if the domain name is fully qualified:
|
||||
|
||||
|
@ -45,17 +45,17 @@ records for the miek.nl. zone.
|
|||
|
||||
The following is slightly more verbose, but more flexible:
|
||||
|
||||
m1 := new(dns.Msg)
|
||||
m1.Id = dns.Id()
|
||||
m1.RecursionDesired = true
|
||||
m1.Question = make([]dns.Question, 1)
|
||||
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
|
||||
m1 := new(dns.Msg)
|
||||
m1.Id = dns.Id()
|
||||
m1.RecursionDesired = true
|
||||
m1.Question = make([]dns.Question, 1)
|
||||
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
|
||||
|
||||
After creating a message it can be sent. Basic use pattern for synchronous
|
||||
querying the DNS at a server configured on 127.0.0.1 and port 53:
|
||||
|
||||
c := new(dns.Client)
|
||||
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
|
||||
c := new(dns.Client)
|
||||
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
|
||||
|
||||
Suppressing multiple outstanding queries (with the same question, type and
|
||||
class) is as easy as setting:
|
||||
|
@ -72,7 +72,7 @@ and port to use for the connection:
|
|||
Port: 12345,
|
||||
Zone: "",
|
||||
}
|
||||
c.Dialer := &net.Dialer{
|
||||
c.Dialer = &net.Dialer{
|
||||
Timeout: 200 * time.Millisecond,
|
||||
LocalAddr: &laddr,
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ the Answer section:
|
|||
// do something with t.Txt
|
||||
}
|
||||
|
||||
Domain Name and TXT Character String Representations
|
||||
# Domain Name and TXT Character String Representations
|
||||
|
||||
Both domain names and TXT character strings are converted to presentation form
|
||||
both when unpacked and when converted to strings.
|
||||
|
@ -108,7 +108,7 @@ be escaped. Bytes below 32 and above 127 will be converted to \DDD form.
|
|||
For domain names, in addition to the above rules brackets, periods, spaces,
|
||||
semicolons and the at symbol are escaped.
|
||||
|
||||
DNSSEC
|
||||
# DNSSEC
|
||||
|
||||
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses
|
||||
public key cryptography to sign resource records. The public keys are stored in
|
||||
|
@ -117,12 +117,12 @@ DNSKEY records and the signatures in RRSIG records.
|
|||
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK)
|
||||
bit to a request.
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetEdns0(4096, true)
|
||||
m := new(dns.Msg)
|
||||
m.SetEdns0(4096, true)
|
||||
|
||||
Signature generation, signature verification and key generation are all supported.
|
||||
|
||||
DYNAMIC UPDATES
|
||||
# DYNAMIC UPDATES
|
||||
|
||||
Dynamic updates reuses the DNS message format, but renames three of the
|
||||
sections. Question is Zone, Answer is Prerequisite, Authority is Update, only
|
||||
|
@ -133,30 +133,30 @@ certain resource records or names in a zone to specify if resource records
|
|||
should be added or removed. The table from RFC 2136 supplemented with the Go
|
||||
DNS function shows which functions exist to specify the prerequisites.
|
||||
|
||||
3.2.4 - Table Of Metavalues Used In Prerequisite Section
|
||||
3.2.4 - Table Of Metavalues Used In Prerequisite Section
|
||||
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
--------------------------------------------------------------
|
||||
ANY ANY empty Name is in use dns.NameUsed
|
||||
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
|
||||
NONE ANY empty Name is not in use dns.NameNotUsed
|
||||
NONE rrset empty RRset does not exist dns.RRsetNotUsed
|
||||
zone rrset rr RRset exists (value dep) dns.Used
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
--------------------------------------------------------------
|
||||
ANY ANY empty Name is in use dns.NameUsed
|
||||
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
|
||||
NONE ANY empty Name is not in use dns.NameNotUsed
|
||||
NONE rrset empty RRset does not exist dns.RRsetNotUsed
|
||||
zone rrset rr RRset exists (value dep) dns.Used
|
||||
|
||||
The prerequisite section can also be left empty. If you have decided on the
|
||||
prerequisites you can tell what RRs should be added or deleted. The next table
|
||||
shows the options you have and what functions to call.
|
||||
|
||||
3.4.2.6 - Table Of Metavalues Used In Update Section
|
||||
3.4.2.6 - Table Of Metavalues Used In Update Section
|
||||
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
---------------------------------------------------------------
|
||||
ANY ANY empty Delete all RRsets from name dns.RemoveName
|
||||
ANY rrset empty Delete an RRset dns.RemoveRRset
|
||||
NONE rrset rr Delete an RR from RRset dns.Remove
|
||||
zone rrset rr Add to an RRset dns.Insert
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
---------------------------------------------------------------
|
||||
ANY ANY empty Delete all RRsets from name dns.RemoveName
|
||||
ANY rrset empty Delete an RRset dns.RemoveRRset
|
||||
NONE rrset rr Delete an RR from RRset dns.Remove
|
||||
zone rrset rr Add to an RRset dns.Insert
|
||||
|
||||
TRANSACTION SIGNATURE
|
||||
# TRANSACTION SIGNATURE
|
||||
|
||||
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
|
||||
The supported algorithms include: HmacSHA1, HmacSHA256 and HmacSHA512.
|
||||
|
@ -239,7 +239,7 @@ Basic use pattern validating and replying to a message that has TSIG set.
|
|||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
PRIVATE RRS
|
||||
# PRIVATE RRS
|
||||
|
||||
RFC 6895 sets aside a range of type codes for private use. This range is 65,280
|
||||
- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
|
||||
|
@ -248,7 +248,7 @@ can be used, before requesting an official type code from IANA.
|
|||
See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more
|
||||
information.
|
||||
|
||||
EDNS0
|
||||
# EDNS0
|
||||
|
||||
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by
|
||||
RFC 6891. It defines a new RR type, the OPT RR, which is then completely
|
||||
|
@ -279,9 +279,9 @@ SIG(0)
|
|||
|
||||
From RFC 2931:
|
||||
|
||||
SIG(0) provides protection for DNS transactions and requests ....
|
||||
... protection for glue records, DNS requests, protection for message headers
|
||||
on requests and responses, and protection of the overall integrity of a response.
|
||||
SIG(0) provides protection for DNS transactions and requests ....
|
||||
... protection for glue records, DNS requests, protection for message headers
|
||||
on requests and responses, and protection of the overall integrity of a response.
|
||||
|
||||
It works like TSIG, except that SIG(0) uses public key cryptography, instead of
|
||||
the shared secret approach in TSIG. Supported algorithms: ECDSAP256SHA256,
|
||||
|
|
|
@ -78,7 +78,10 @@ func (rr *OPT) String() string {
|
|||
if rr.Do() {
|
||||
s += "flags: do; "
|
||||
} else {
|
||||
s += "flags: ; "
|
||||
s += "flags:; "
|
||||
}
|
||||
if rr.Hdr.Ttl&0x7FFF != 0 {
|
||||
s += fmt.Sprintf("MBZ: 0x%04x, ", rr.Hdr.Ttl&0x7FFF)
|
||||
}
|
||||
s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
|
||||
|
||||
|
@ -98,6 +101,8 @@ func (rr *OPT) String() string {
|
|||
s += "\n; SUBNET: " + o.String()
|
||||
case *EDNS0_COOKIE:
|
||||
s += "\n; COOKIE: " + o.String()
|
||||
case *EDNS0_EXPIRE:
|
||||
s += "\n; EXPIRE: " + o.String()
|
||||
case *EDNS0_TCP_KEEPALIVE:
|
||||
s += "\n; KEEPALIVE: " + o.String()
|
||||
case *EDNS0_UL:
|
||||
|
@ -180,7 +185,7 @@ func (rr *OPT) Do() bool {
|
|||
|
||||
// SetDo sets the DO (DNSSEC OK) bit.
|
||||
// If we pass an argument, set the DO bit to that value.
|
||||
// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
|
||||
// It is possible to pass 2 or more arguments, but they will be ignored.
|
||||
func (rr *OPT) SetDo(do ...bool) {
|
||||
if len(do) == 1 {
|
||||
if do[0] {
|
||||
|
@ -258,7 +263,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid}
|
|||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_SUBNET)
|
||||
// e.Code = dns.EDNS0SUBNET
|
||||
// e.Code = dns.EDNS0SUBNET // by default this is filled in through unpacking OPT packets (unpackDataOpt)
|
||||
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
|
||||
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
|
||||
// e.SourceScope = 0
|
||||
|
@ -503,6 +508,7 @@ func (e *EDNS0_LLQ) String() string {
|
|||
" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *EDNS0_LLQ) copy() EDNS0 {
|
||||
return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife}
|
||||
}
|
||||
|
@ -515,8 +521,8 @@ type EDNS0_DAU struct {
|
|||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
|
||||
func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
func (e *EDNS0_DAU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil }
|
||||
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil }
|
||||
|
||||
func (e *EDNS0_DAU) String() string {
|
||||
s := ""
|
||||
|
@ -539,8 +545,8 @@ type EDNS0_DHU struct {
|
|||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
|
||||
func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
func (e *EDNS0_DHU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil }
|
||||
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil }
|
||||
|
||||
func (e *EDNS0_DHU) String() string {
|
||||
s := ""
|
||||
|
@ -563,8 +569,8 @@ type EDNS0_N3U struct {
|
|||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
|
||||
func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
func (e *EDNS0_N3U) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil }
|
||||
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil }
|
||||
|
||||
func (e *EDNS0_N3U) String() string {
|
||||
// Re-use the hash map
|
||||
|
@ -641,30 +647,21 @@ type EDNS0_LOCAL struct {
|
|||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
|
||||
|
||||
func (e *EDNS0_LOCAL) String() string {
|
||||
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
|
||||
}
|
||||
|
||||
func (e *EDNS0_LOCAL) copy() EDNS0 {
|
||||
b := make([]byte, len(e.Data))
|
||||
copy(b, e.Data)
|
||||
return &EDNS0_LOCAL{e.Code, b}
|
||||
return &EDNS0_LOCAL{e.Code, cloneSlice(e.Data)}
|
||||
}
|
||||
|
||||
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
|
||||
b := make([]byte, len(e.Data))
|
||||
copied := copy(b, e.Data)
|
||||
if copied != len(e.Data) {
|
||||
return nil, ErrBuf
|
||||
}
|
||||
return b, nil
|
||||
return cloneSlice(e.Data), nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LOCAL) unpack(b []byte) error {
|
||||
e.Data = make([]byte, len(b))
|
||||
copied := copy(e.Data, b)
|
||||
if copied != len(b) {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Data = cloneSlice(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -727,14 +724,10 @@ type EDNS0_PADDING struct {
|
|||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
|
||||
func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
|
||||
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
|
||||
func (e *EDNS0_PADDING) pack() ([]byte, error) { return cloneSlice(e.Padding), nil }
|
||||
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = cloneSlice(b); return nil }
|
||||
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
|
||||
func (e *EDNS0_PADDING) copy() EDNS0 {
|
||||
b := make([]byte, len(e.Padding))
|
||||
copy(b, e.Padding)
|
||||
return &EDNS0_PADDING{b}
|
||||
}
|
||||
func (e *EDNS0_PADDING) copy() EDNS0 { return &EDNS0_PADDING{cloneSlice(e.Padding)} }
|
||||
|
||||
// Extended DNS Error Codes (RFC 8914).
|
||||
const (
|
||||
|
@ -821,7 +814,7 @@ func (e *EDNS0_EDE) String() string {
|
|||
func (e *EDNS0_EDE) pack() ([]byte, error) {
|
||||
b := make([]byte, 2+len(e.ExtraText))
|
||||
binary.BigEndian.PutUint16(b[0:], e.InfoCode)
|
||||
copy(b[2:], []byte(e.ExtraText))
|
||||
copy(b[2:], e.ExtraText)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package dns
|
||||
|
|
|
@ -35,17 +35,17 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) {
|
|||
token = token[:i]
|
||||
}
|
||||
|
||||
sx := strings.SplitN(token, "-", 2)
|
||||
if len(sx) != 2 {
|
||||
startStr, endStr, ok := strings.Cut(token, "-")
|
||||
if !ok {
|
||||
return zp.setParseError("bad start-stop in $GENERATE range", l)
|
||||
}
|
||||
|
||||
start, err := strconv.ParseInt(sx[0], 10, 64)
|
||||
start, err := strconv.ParseInt(startStr, 10, 64)
|
||||
if err != nil {
|
||||
return zp.setParseError("bad start in $GENERATE range", l)
|
||||
}
|
||||
|
||||
end, err := strconv.ParseInt(sx[1], 10, 64)
|
||||
end, err := strconv.ParseInt(endStr, 10, 64)
|
||||
if err != nil {
|
||||
return zp.setParseError("bad stop in $GENERATE range", l)
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) {
|
|||
}
|
||||
|
||||
// _BLANK
|
||||
l, ok := zp.c.Next()
|
||||
l, ok = zp.c.Next()
|
||||
if !ok || l.value != zBlank {
|
||||
return zp.setParseError("garbage after $GENERATE range", l)
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ func (r *generateReader) parseError(msg string, end int) *ParseError {
|
|||
l.token = r.s[r.si-1 : end]
|
||||
l.column += r.si // l.column starts one zBLANK before r.s
|
||||
|
||||
return &ParseError{r.file, msg, l}
|
||||
return &ParseError{file: r.file, err: msg, lex: l}
|
||||
}
|
||||
|
||||
func (r *generateReader) Read(p []byte) (int, error) {
|
||||
|
@ -211,15 +211,16 @@ func (r *generateReader) ReadByte() (byte, error) {
|
|||
func modToPrintf(s string) (string, int64, string) {
|
||||
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
|
||||
// values for optional width and type, if necessary.
|
||||
var offStr, widthStr, base string
|
||||
switch xs := strings.Split(s, ","); len(xs) {
|
||||
case 1:
|
||||
offStr, widthStr, base = xs[0], "0", "d"
|
||||
case 2:
|
||||
offStr, widthStr, base = xs[0], xs[1], "d"
|
||||
case 3:
|
||||
offStr, widthStr, base = xs[0], xs[1], xs[2]
|
||||
default:
|
||||
offStr, s, ok0 := strings.Cut(s, ",")
|
||||
widthStr, s, ok1 := strings.Cut(s, ",")
|
||||
base, _, ok2 := strings.Cut(s, ",")
|
||||
if !ok0 {
|
||||
widthStr = "0"
|
||||
}
|
||||
if !ok1 {
|
||||
base = "d"
|
||||
}
|
||||
if ok2 {
|
||||
return "", 0, "bad modifier in $GENERATE"
|
||||
}
|
||||
|
||||
|
@ -234,8 +235,8 @@ func modToPrintf(s string) (string, int64, string) {
|
|||
return "", 0, "bad offset in $GENERATE"
|
||||
}
|
||||
|
||||
width, err := strconv.ParseInt(widthStr, 10, 64)
|
||||
if err != nil || width < 0 || width > 255 {
|
||||
width, err := strconv.ParseUint(widthStr, 10, 8)
|
||||
if err != nil {
|
||||
return "", 0, "bad width in $GENERATE"
|
||||
}
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ func Split(s string) []int {
|
|||
}
|
||||
|
||||
// NextLabel returns the index of the start of the next label in the
|
||||
// string s starting at offset.
|
||||
// string s starting at offset. A negative offset will cause a panic.
|
||||
// The bool end is true when the end of the string has been reached.
|
||||
// Also see PrevLabel.
|
||||
func NextLabel(s string, offset int) (i int, end bool) {
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
|
||||
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd
|
||||
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
|
||||
|
||||
package dns
|
||||
|
||||
|
@ -6,16 +7,18 @@ import "net"
|
|||
|
||||
const supportsReusePort = false
|
||||
|
||||
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
|
||||
if reuseport {
|
||||
func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) {
|
||||
if reuseport || reuseaddr {
|
||||
// TODO(tmthrgd): return an error?
|
||||
}
|
||||
|
||||
return net.Listen(network, addr)
|
||||
}
|
||||
|
||||
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
|
||||
if reuseport {
|
||||
const supportsReuseAddr = false
|
||||
|
||||
func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) {
|
||||
if reuseport || reuseaddr {
|
||||
// TODO(tmthrgd): return an error?
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// +build go1.11
|
||||
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package dns
|
||||
|
@ -25,19 +25,41 @@ func reuseportControl(network, address string, c syscall.RawConn) error {
|
|||
return opErr
|
||||
}
|
||||
|
||||
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
|
||||
const supportsReuseAddr = true
|
||||
|
||||
func reuseaddrControl(network, address string, c syscall.RawConn) error {
|
||||
var opErr error
|
||||
err := c.Control(func(fd uintptr) {
|
||||
opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR, 1)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return opErr
|
||||
}
|
||||
|
||||
func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) {
|
||||
var lc net.ListenConfig
|
||||
if reuseport {
|
||||
switch {
|
||||
case reuseaddr && reuseport:
|
||||
case reuseport:
|
||||
lc.Control = reuseportControl
|
||||
case reuseaddr:
|
||||
lc.Control = reuseaddrControl
|
||||
}
|
||||
|
||||
return lc.Listen(context.Background(), network, addr)
|
||||
}
|
||||
|
||||
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
|
||||
func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) {
|
||||
var lc net.ListenConfig
|
||||
if reuseport {
|
||||
switch {
|
||||
case reuseaddr && reuseport:
|
||||
case reuseport:
|
||||
lc.Control = reuseportControl
|
||||
case reuseaddr:
|
||||
lc.Control = reuseaddrControl
|
||||
}
|
||||
|
||||
return lc.ListenPacket(context.Background(), network, addr)
|
||||
|
|
|
@ -252,7 +252,7 @@ loop:
|
|||
}
|
||||
|
||||
// check for \DDD
|
||||
if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) {
|
||||
if isDDD(bs[i+1:]) {
|
||||
bs[i] = dddToByte(bs[i+1:])
|
||||
copy(bs[i+1:ls-3], bs[i+4:])
|
||||
ls -= 3
|
||||
|
@ -448,7 +448,7 @@ Loop:
|
|||
return string(s), off1, nil
|
||||
}
|
||||
|
||||
func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
|
||||
func packTxt(txt []string, msg []byte, offset int) (int, error) {
|
||||
if len(txt) == 0 {
|
||||
if offset >= len(msg) {
|
||||
return offset, ErrBuf
|
||||
|
@ -458,10 +458,7 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
|
|||
}
|
||||
var err error
|
||||
for _, s := range txt {
|
||||
if len(s) > len(tmp) {
|
||||
return offset, ErrBuf
|
||||
}
|
||||
offset, err = packTxtString(s, msg, offset, tmp)
|
||||
offset, err = packTxtString(s, msg, offset)
|
||||
if err != nil {
|
||||
return offset, err
|
||||
}
|
||||
|
@ -469,32 +466,30 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
|
|||
return offset, nil
|
||||
}
|
||||
|
||||
func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
|
||||
func packTxtString(s string, msg []byte, offset int) (int, error) {
|
||||
lenByteOffset := offset
|
||||
if offset >= len(msg) || len(s) > len(tmp) {
|
||||
if offset >= len(msg) || len(s) > 256*4+1 /* If all \DDD */ {
|
||||
return offset, ErrBuf
|
||||
}
|
||||
offset++
|
||||
bs := tmp[:len(s)]
|
||||
copy(bs, s)
|
||||
for i := 0; i < len(bs); i++ {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if len(msg) <= offset {
|
||||
return offset, ErrBuf
|
||||
}
|
||||
if bs[i] == '\\' {
|
||||
if s[i] == '\\' {
|
||||
i++
|
||||
if i == len(bs) {
|
||||
if i == len(s) {
|
||||
break
|
||||
}
|
||||
// check for \DDD
|
||||
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
|
||||
msg[offset] = dddToByte(bs[i:])
|
||||
if isDDD(s[i:]) {
|
||||
msg[offset] = dddToByte(s[i:])
|
||||
i += 2
|
||||
} else {
|
||||
msg[offset] = bs[i]
|
||||
msg[offset] = s[i]
|
||||
}
|
||||
} else {
|
||||
msg[offset] = bs[i]
|
||||
msg[offset] = s[i]
|
||||
}
|
||||
offset++
|
||||
}
|
||||
|
@ -506,30 +501,28 @@ func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
|
|||
return offset, nil
|
||||
}
|
||||
|
||||
func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
|
||||
if offset >= len(msg) || len(s) > len(tmp) {
|
||||
func packOctetString(s string, msg []byte, offset int) (int, error) {
|
||||
if offset >= len(msg) || len(s) > 256*4+1 {
|
||||
return offset, ErrBuf
|
||||
}
|
||||
bs := tmp[:len(s)]
|
||||
copy(bs, s)
|
||||
for i := 0; i < len(bs); i++ {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if len(msg) <= offset {
|
||||
return offset, ErrBuf
|
||||
}
|
||||
if bs[i] == '\\' {
|
||||
if s[i] == '\\' {
|
||||
i++
|
||||
if i == len(bs) {
|
||||
if i == len(s) {
|
||||
break
|
||||
}
|
||||
// check for \DDD
|
||||
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
|
||||
msg[offset] = dddToByte(bs[i:])
|
||||
if isDDD(s[i:]) {
|
||||
msg[offset] = dddToByte(s[i:])
|
||||
i += 2
|
||||
} else {
|
||||
msg[offset] = bs[i]
|
||||
msg[offset] = s[i]
|
||||
}
|
||||
} else {
|
||||
msg[offset] = bs[i]
|
||||
msg[offset] = s[i]
|
||||
}
|
||||
offset++
|
||||
}
|
||||
|
@ -551,12 +544,11 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
|
|||
// Helpers for dealing with escaped bytes
|
||||
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
|
||||
|
||||
func dddToByte(s []byte) byte {
|
||||
_ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
|
||||
func isDDD[T ~[]byte | ~string](s T) bool {
|
||||
return len(s) >= 3 && isDigit(s[0]) && isDigit(s[1]) && isDigit(s[2])
|
||||
}
|
||||
|
||||
func dddStringToByte(s string) byte {
|
||||
func dddToByte[T ~[]byte | ~string](s T) byte {
|
||||
_ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
|
||||
}
|
||||
|
@ -680,9 +672,9 @@ func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error)
|
|||
|
||||
// Convert a MsgHdr to a string, with dig-like headers:
|
||||
//
|
||||
//;; opcode: QUERY, status: NOERROR, id: 48404
|
||||
// ;; opcode: QUERY, status: NOERROR, id: 48404
|
||||
//
|
||||
//;; flags: qr aa rd ra;
|
||||
// ;; flags: qr aa rd ra;
|
||||
func (h *MsgHdr) String() string {
|
||||
if h == nil {
|
||||
return "<nil> MsgHdr"
|
||||
|
@ -866,7 +858,7 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
|
|||
// The header counts might have been wrong so we need to update it
|
||||
dh.Nscount = uint16(len(dns.Ns))
|
||||
if err == nil {
|
||||
dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off)
|
||||
dns.Extra, _, err = unpackRRslice(int(dh.Arcount), msg, off)
|
||||
}
|
||||
// The header counts might have been wrong so we need to update it
|
||||
dh.Arcount = uint16(len(dns.Extra))
|
||||
|
@ -876,11 +868,11 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
|
|||
dns.Rcode |= opt.ExtendedRcode()
|
||||
}
|
||||
|
||||
if off != len(msg) {
|
||||
// TODO(miek) make this an error?
|
||||
// use PackOpt to let people tell how detailed the error reporting should be?
|
||||
// println("dns: extra bytes in dns packet", off, "<", len(msg))
|
||||
}
|
||||
// TODO(miek) make this an error?
|
||||
// use PackOpt to let people tell how detailed the error reporting should be?
|
||||
// if off != len(msg) {
|
||||
// // println("dns: extra bytes in dns packet", off, "<", len(msg))
|
||||
// }
|
||||
return err
|
||||
|
||||
}
|
||||
|
@ -902,23 +894,38 @@ func (dns *Msg) String() string {
|
|||
return "<nil> MsgHdr"
|
||||
}
|
||||
s := dns.MsgHdr.String() + " "
|
||||
s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
|
||||
s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
|
||||
s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
|
||||
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
|
||||
if dns.MsgHdr.Opcode == OpcodeUpdate {
|
||||
s += "ZONE: " + strconv.Itoa(len(dns.Question)) + ", "
|
||||
s += "PREREQ: " + strconv.Itoa(len(dns.Answer)) + ", "
|
||||
s += "UPDATE: " + strconv.Itoa(len(dns.Ns)) + ", "
|
||||
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
|
||||
} else {
|
||||
s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
|
||||
s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
|
||||
s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
|
||||
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
|
||||
}
|
||||
opt := dns.IsEdns0()
|
||||
if opt != nil {
|
||||
// OPT PSEUDOSECTION
|
||||
s += opt.String() + "\n"
|
||||
}
|
||||
if len(dns.Question) > 0 {
|
||||
s += "\n;; QUESTION SECTION:\n"
|
||||
if dns.MsgHdr.Opcode == OpcodeUpdate {
|
||||
s += "\n;; ZONE SECTION:\n"
|
||||
} else {
|
||||
s += "\n;; QUESTION SECTION:\n"
|
||||
}
|
||||
for _, r := range dns.Question {
|
||||
s += r.String() + "\n"
|
||||
}
|
||||
}
|
||||
if len(dns.Answer) > 0 {
|
||||
s += "\n;; ANSWER SECTION:\n"
|
||||
if dns.MsgHdr.Opcode == OpcodeUpdate {
|
||||
s += "\n;; PREREQUISITE SECTION:\n"
|
||||
} else {
|
||||
s += "\n;; ANSWER SECTION:\n"
|
||||
}
|
||||
for _, r := range dns.Answer {
|
||||
if r != nil {
|
||||
s += r.String() + "\n"
|
||||
|
@ -926,7 +933,11 @@ func (dns *Msg) String() string {
|
|||
}
|
||||
}
|
||||
if len(dns.Ns) > 0 {
|
||||
s += "\n;; AUTHORITY SECTION:\n"
|
||||
if dns.MsgHdr.Opcode == OpcodeUpdate {
|
||||
s += "\n;; UPDATE SECTION:\n"
|
||||
} else {
|
||||
s += "\n;; AUTHORITY SECTION:\n"
|
||||
}
|
||||
for _, r := range dns.Ns {
|
||||
if r != nil {
|
||||
s += r.String() + "\n"
|
||||
|
@ -1024,7 +1035,7 @@ func escapedNameLen(s string) int {
|
|||
continue
|
||||
}
|
||||
|
||||
if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
|
||||
if isDDD(s[i+1:]) {
|
||||
nameLen -= 3
|
||||
i += 3
|
||||
} else {
|
||||
|
@ -1065,8 +1076,8 @@ func (dns *Msg) CopyTo(r1 *Msg) *Msg {
|
|||
r1.Compress = dns.Compress
|
||||
|
||||
if len(dns.Question) > 0 {
|
||||
r1.Question = make([]Question, len(dns.Question))
|
||||
copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
|
||||
// TODO(miek): Question is an immutable value, ok to do a shallow-copy
|
||||
r1.Question = cloneSlice(dns.Question)
|
||||
}
|
||||
|
||||
rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
|
||||
|
|
|
@ -20,9 +20,7 @@ func unpackDataA(msg []byte, off int) (net.IP, int, error) {
|
|||
if off+net.IPv4len > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking a"}
|
||||
}
|
||||
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
|
||||
off += net.IPv4len
|
||||
return a, off, nil
|
||||
return cloneSlice(msg[off : off+net.IPv4len]), off + net.IPv4len, nil
|
||||
}
|
||||
|
||||
func packDataA(a net.IP, msg []byte, off int) (int, error) {
|
||||
|
@ -47,9 +45,7 @@ func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
|
|||
if off+net.IPv6len > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
|
||||
}
|
||||
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
|
||||
off += net.IPv6len
|
||||
return aaaa, off, nil
|
||||
return cloneSlice(msg[off : off+net.IPv6len]), off + net.IPv6len, nil
|
||||
}
|
||||
|
||||
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
|
||||
|
@ -299,8 +295,7 @@ func unpackString(msg []byte, off int) (string, int, error) {
|
|||
}
|
||||
|
||||
func packString(s string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1)
|
||||
off, err := packTxtString(s, msg, off, txtTmp)
|
||||
off, err := packTxtString(s, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
|
@ -402,8 +397,7 @@ func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
|
|||
}
|
||||
|
||||
func packStringTxt(s []string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
|
||||
off, err := packTxt(s, msg, off, txtTmp)
|
||||
off, err := packTxt(s, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
|
@ -412,29 +406,24 @@ func packStringTxt(s []string, msg []byte, off int) (int, error) {
|
|||
|
||||
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
|
||||
var edns []EDNS0
|
||||
Option:
|
||||
var code uint16
|
||||
if off+4 > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
for off < len(msg) {
|
||||
if off+4 > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
code := binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
optlen := binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
if off+int(optlen) > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
opt := makeDataOpt(code)
|
||||
if err := opt.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, opt)
|
||||
off += int(optlen)
|
||||
}
|
||||
code = binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
optlen := binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
if off+int(optlen) > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
e := makeDataOpt(code)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
|
||||
if off < len(msg) {
|
||||
goto Option
|
||||
}
|
||||
|
||||
return edns, off, nil
|
||||
}
|
||||
|
||||
|
@ -463,8 +452,7 @@ func unpackStringOctet(msg []byte, off int) (string, int, error) {
|
|||
}
|
||||
|
||||
func packStringOctet(s string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1)
|
||||
off, err := packOctetString(s, msg, off, txtTmp)
|
||||
off, err := packOctetString(s, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
|
@ -625,7 +613,7 @@ func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) {
|
|||
}
|
||||
|
||||
func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) {
|
||||
pairs = append([]SVCBKeyValue(nil), pairs...)
|
||||
pairs = cloneSlice(pairs)
|
||||
sort.Slice(pairs, func(i, j int) bool {
|
||||
return pairs[i].Key() < pairs[j].Key()
|
||||
})
|
||||
|
@ -810,3 +798,37 @@ func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
|
|||
Network: ipnet,
|
||||
}, off, nil
|
||||
}
|
||||
|
||||
func unpackIPSECGateway(msg []byte, off int, gatewayType uint8) (net.IP, string, int, error) {
|
||||
var retAddr net.IP
|
||||
var retString string
|
||||
var err error
|
||||
|
||||
switch gatewayType {
|
||||
case IPSECGatewayNone: // do nothing
|
||||
case IPSECGatewayIPv4:
|
||||
retAddr, off, err = unpackDataA(msg, off)
|
||||
case IPSECGatewayIPv6:
|
||||
retAddr, off, err = unpackDataAAAA(msg, off)
|
||||
case IPSECGatewayHost:
|
||||
retString, off, err = UnpackDomainName(msg, off)
|
||||
}
|
||||
|
||||
return retAddr, retString, off, err
|
||||
}
|
||||
|
||||
func packIPSECGateway(gatewayAddr net.IP, gatewayString string, msg []byte, off int, gatewayType uint8, compression compressionMap, compress bool) (int, error) {
|
||||
var err error
|
||||
|
||||
switch gatewayType {
|
||||
case IPSECGatewayNone: // do nothing
|
||||
case IPSECGatewayIPv4:
|
||||
off, err = packDataA(gatewayAddr, msg, off)
|
||||
case IPSECGatewayIPv6:
|
||||
off, err = packDataAAAA(gatewayAddr, msg, off)
|
||||
case IPSECGatewayHost:
|
||||
off, err = packDomainName(gatewayString, msg, off, compression, compress)
|
||||
}
|
||||
|
||||
return off, err
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ Fetch:
|
|||
|
||||
err := r.Data.Parse(text)
|
||||
if err != nil {
|
||||
return &ParseError{"", err.Error(), l}
|
||||
return &ParseError{wrappedErr: err, lex: l}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -4,19 +4,21 @@ import (
|
|||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const maxTok = 2048 // Largest token we can return.
|
||||
const maxTok = 512 // Token buffer start size, and growth size amount.
|
||||
|
||||
// The maximum depth of $INCLUDE directives supported by the
|
||||
// ZoneParser API.
|
||||
const maxIncludeDepth = 7
|
||||
|
||||
// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
|
||||
// Tokenize a RFC 1035 zone file. The tokenizer will normalize it:
|
||||
// * Add ownernames if they are left blank;
|
||||
// * Suppress sequences of spaces;
|
||||
// * Make each RR fit on one line (_NEWLINE is send as last)
|
||||
|
@ -64,20 +66,26 @@ const (
|
|||
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
|
||||
// where the error occurred.
|
||||
type ParseError struct {
|
||||
file string
|
||||
err string
|
||||
lex lex
|
||||
file string
|
||||
err string
|
||||
wrappedErr error
|
||||
lex lex
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() (s string) {
|
||||
if e.file != "" {
|
||||
s = e.file + ": "
|
||||
}
|
||||
if e.err == "" && e.wrappedErr != nil {
|
||||
e.err = e.wrappedErr.Error()
|
||||
}
|
||||
s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
|
||||
strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
|
||||
return
|
||||
}
|
||||
|
||||
func (e *ParseError) Unwrap() error { return e.wrappedErr }
|
||||
|
||||
type lex struct {
|
||||
token string // text of the token
|
||||
err bool // when true, token text has lexer error
|
||||
|
@ -168,8 +176,9 @@ type ZoneParser struct {
|
|||
// sub is used to parse $INCLUDE files and $GENERATE directives.
|
||||
// Next, by calling subNext, forwards the resulting RRs from this
|
||||
// sub parser to the calling code.
|
||||
sub *ZoneParser
|
||||
osFile *os.File
|
||||
sub *ZoneParser
|
||||
r io.Reader
|
||||
fsys fs.FS
|
||||
|
||||
includeDepth uint8
|
||||
|
||||
|
@ -188,7 +197,7 @@ func NewZoneParser(r io.Reader, origin, file string) *ZoneParser {
|
|||
if origin != "" {
|
||||
origin = Fqdn(origin)
|
||||
if _, ok := IsDomainName(origin); !ok {
|
||||
pe = &ParseError{file, "bad initial origin name", lex{}}
|
||||
pe = &ParseError{file: file, err: "bad initial origin name"}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -220,6 +229,24 @@ func (zp *ZoneParser) SetIncludeAllowed(v bool) {
|
|||
zp.includeAllowed = v
|
||||
}
|
||||
|
||||
// SetIncludeFS provides an [fs.FS] to use when looking for the target of
|
||||
// $INCLUDE directives. ($INCLUDE must still be enabled separately by calling
|
||||
// [ZoneParser.SetIncludeAllowed].) If fsys is nil, [os.Open] will be used.
|
||||
//
|
||||
// When fsys is an on-disk FS, the ability of $INCLUDE to reach files from
|
||||
// outside its root directory depends upon the FS implementation. For
|
||||
// instance, [os.DirFS] will refuse to open paths like "../../etc/passwd",
|
||||
// however it will still follow links which may point anywhere on the system.
|
||||
//
|
||||
// FS paths are slash-separated on all systems, even Windows. $INCLUDE paths
|
||||
// containing other characters such as backslash and colon may be accepted as
|
||||
// valid, but those characters will never be interpreted by an FS
|
||||
// implementation as path element separators. See [fs.ValidPath] for more
|
||||
// details.
|
||||
func (zp *ZoneParser) SetIncludeFS(fsys fs.FS) {
|
||||
zp.fsys = fsys
|
||||
}
|
||||
|
||||
// Err returns the first non-EOF error that was encountered by the
|
||||
// ZoneParser.
|
||||
func (zp *ZoneParser) Err() error {
|
||||
|
@ -237,7 +264,7 @@ func (zp *ZoneParser) Err() error {
|
|||
}
|
||||
|
||||
func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) {
|
||||
zp.parseErr = &ParseError{zp.file, err, l}
|
||||
zp.parseErr = &ParseError{file: zp.file, err: err, lex: l}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
|
@ -260,9 +287,11 @@ func (zp *ZoneParser) subNext() (RR, bool) {
|
|||
return rr, true
|
||||
}
|
||||
|
||||
if zp.sub.osFile != nil {
|
||||
zp.sub.osFile.Close()
|
||||
zp.sub.osFile = nil
|
||||
if zp.sub.r != nil {
|
||||
if c, ok := zp.sub.r.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
zp.sub.r = nil
|
||||
}
|
||||
|
||||
if zp.sub.Err() != nil {
|
||||
|
@ -402,24 +431,44 @@ func (zp *ZoneParser) Next() (RR, bool) {
|
|||
|
||||
// Start with the new file
|
||||
includePath := l.token
|
||||
if !filepath.IsAbs(includePath) {
|
||||
includePath = filepath.Join(filepath.Dir(zp.file), includePath)
|
||||
}
|
||||
|
||||
r1, e1 := os.Open(includePath)
|
||||
if e1 != nil {
|
||||
var as string
|
||||
if !filepath.IsAbs(l.token) {
|
||||
as = fmt.Sprintf(" as `%s'", includePath)
|
||||
var r1 io.Reader
|
||||
var e1 error
|
||||
if zp.fsys != nil {
|
||||
// fs.FS always uses / as separator, even on Windows, so use
|
||||
// path instead of filepath here:
|
||||
if !path.IsAbs(includePath) {
|
||||
includePath = path.Join(path.Dir(zp.file), includePath)
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1)
|
||||
return zp.setParseError(msg, l)
|
||||
// os.DirFS, and probably others, expect all paths to be
|
||||
// relative, so clean the path and remove leading / if
|
||||
// present:
|
||||
includePath = strings.TrimLeft(path.Clean(includePath), "/")
|
||||
|
||||
r1, e1 = zp.fsys.Open(includePath)
|
||||
} else {
|
||||
if !filepath.IsAbs(includePath) {
|
||||
includePath = filepath.Join(filepath.Dir(zp.file), includePath)
|
||||
}
|
||||
r1, e1 = os.Open(includePath)
|
||||
}
|
||||
if e1 != nil {
|
||||
var as string
|
||||
if includePath != l.token {
|
||||
as = fmt.Sprintf(" as `%s'", includePath)
|
||||
}
|
||||
zp.parseErr = &ParseError{
|
||||
file: zp.file,
|
||||
wrappedErr: fmt.Errorf("failed to open `%s'%s: %w", l.token, as, e1),
|
||||
lex: l,
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
zp.sub = NewZoneParser(r1, neworigin, includePath)
|
||||
zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1
|
||||
zp.sub.defttl, zp.sub.includeDepth, zp.sub.r = zp.defttl, zp.includeDepth+1, r1
|
||||
zp.sub.SetIncludeAllowed(true)
|
||||
zp.sub.SetIncludeFS(zp.fsys)
|
||||
return zp.subNext()
|
||||
case zExpectDirTTLBl:
|
||||
if l.value != zBlank {
|
||||
|
@ -605,8 +654,6 @@ func (zp *ZoneParser) Next() (RR, bool) {
|
|||
if !isPrivate && zp.c.Peek().token == "" {
|
||||
// This is a dynamic update rr.
|
||||
|
||||
// TODO(tmthrgd): Previously slurpRemainder was only called
|
||||
// for certain RR types, which may have been important.
|
||||
if err := slurpRemainder(zp.c); err != nil {
|
||||
return zp.setParseError(err.err, err.lex)
|
||||
}
|
||||
|
@ -765,8 +812,8 @@ func (zl *zlexer) Next() (lex, bool) {
|
|||
}
|
||||
|
||||
var (
|
||||
str [maxTok]byte // Hold string text
|
||||
com [maxTok]byte // Hold comment text
|
||||
str = make([]byte, maxTok) // Hold string text
|
||||
com = make([]byte, maxTok) // Hold comment text
|
||||
|
||||
stri int // Offset in str (0 means empty)
|
||||
comi int // Offset in com (0 means empty)
|
||||
|
@ -785,14 +832,12 @@ func (zl *zlexer) Next() (lex, bool) {
|
|||
l.line, l.column = zl.line, zl.column
|
||||
|
||||
if stri >= len(str) {
|
||||
l.token = "token length insufficient for parsing"
|
||||
l.err = true
|
||||
return *l, true
|
||||
// if buffer length is insufficient, increase it.
|
||||
str = append(str[:], make([]byte, maxTok)...)
|
||||
}
|
||||
if comi >= len(com) {
|
||||
l.token = "comment length insufficient for parsing"
|
||||
l.err = true
|
||||
return *l, true
|
||||
// if buffer length is insufficient, increase it.
|
||||
com = append(com[:], make([]byte, maxTok)...)
|
||||
}
|
||||
|
||||
switch x {
|
||||
|
@ -816,7 +861,7 @@ func (zl *zlexer) Next() (lex, bool) {
|
|||
if stri == 0 {
|
||||
// Space directly in the beginning, handled in the grammar
|
||||
} else if zl.owner {
|
||||
// If we have a string and its the first, make it an owner
|
||||
// If we have a string and it's the first, make it an owner
|
||||
l.value = zOwner
|
||||
l.token = string(str[:stri])
|
||||
|
||||
|
@ -1218,42 +1263,34 @@ func stringToCm(token string) (e, m uint8, ok bool) {
|
|||
if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
|
||||
token = token[0 : len(token)-1]
|
||||
}
|
||||
s := strings.SplitN(token, ".", 2)
|
||||
var meters, cmeters, val int
|
||||
var err error
|
||||
switch len(s) {
|
||||
case 2:
|
||||
if cmeters, err = strconv.Atoi(s[1]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
meters, cmeters, val int
|
||||
err error
|
||||
)
|
||||
mStr, cmStr, hasCM := strings.Cut(token, ".")
|
||||
if hasCM {
|
||||
// There's no point in having more than 2 digits in this part, and would rather make the implementation complicated ('123' should be treated as '12').
|
||||
// So we simply reject it.
|
||||
// We also make sure the first character is a digit to reject '+-' signs.
|
||||
if len(s[1]) > 2 || s[1][0] < '0' || s[1][0] > '9' {
|
||||
cmeters, err = strconv.Atoi(cmStr)
|
||||
if err != nil || len(cmStr) > 2 || cmStr[0] < '0' || cmStr[0] > '9' {
|
||||
return
|
||||
}
|
||||
if len(s[1]) == 1 {
|
||||
if len(cmStr) == 1 {
|
||||
// 'nn.1' must be treated as 'nn-meters and 10cm, not 1cm.
|
||||
cmeters *= 10
|
||||
}
|
||||
if s[0] == "" {
|
||||
// This will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm).
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case 1:
|
||||
if meters, err = strconv.Atoi(s[0]); err != nil {
|
||||
return
|
||||
}
|
||||
// RFC1876 states the max value is 90000000.00. The latter two conditions enforce it.
|
||||
if s[0][0] < '0' || s[0][0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) {
|
||||
return
|
||||
}
|
||||
case 0:
|
||||
// huh?
|
||||
return 0, 0, false
|
||||
}
|
||||
ok = true
|
||||
// This slighly ugly condition will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm).
|
||||
if !hasCM || mStr != "" {
|
||||
meters, err = strconv.Atoi(mStr)
|
||||
// RFC1876 states the max value is 90000000.00. The latter two conditions enforce it.
|
||||
if err != nil || mStr[0] < '0' || mStr[0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if meters > 0 {
|
||||
e = 2
|
||||
val = meters
|
||||
|
@ -1265,8 +1302,7 @@ func stringToCm(token string) (e, m uint8, ok bool) {
|
|||
e++
|
||||
val /= 10
|
||||
}
|
||||
m = uint8(val)
|
||||
return
|
||||
return e, uint8(val), true
|
||||
}
|
||||
|
||||
func toAbsoluteName(name, origin string) (absolute string, ok bool) {
|
||||
|
@ -1339,12 +1375,12 @@ func slurpRemainder(c *zlexer) *ParseError {
|
|||
case zBlank:
|
||||
l, _ = c.Next()
|
||||
if l.value != zNewline && l.value != zEOF {
|
||||
return &ParseError{"", "garbage after rdata", l}
|
||||
return &ParseError{err: "garbage after rdata", lex: l}
|
||||
}
|
||||
case zNewline:
|
||||
case zEOF:
|
||||
default:
|
||||
return &ParseError{"", "garbage after rdata", l}
|
||||
return &ParseError{err: "garbage after rdata", lex: l}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1353,16 +1389,16 @@ func slurpRemainder(c *zlexer) *ParseError {
|
|||
// Used for NID and L64 record.
|
||||
func stringToNodeID(l lex) (uint64, *ParseError) {
|
||||
if len(l.token) < 19 {
|
||||
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
|
||||
return 0, &ParseError{file: l.token, err: "bad NID/L64 NodeID/Locator64", lex: l}
|
||||
}
|
||||
// There must be three colons at fixes positions, if not its a parse error
|
||||
if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
|
||||
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
|
||||
return 0, &ParseError{file: l.token, err: "bad NID/L64 NodeID/Locator64", lex: l}
|
||||
}
|
||||
s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
|
||||
u, err := strconv.ParseUint(s, 16, 64)
|
||||
if err != nil {
|
||||
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
|
||||
return 0, &ParseError{file: l.token, err: "bad NID/L64 NodeID/Locator64", lex: l}
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -18,7 +18,7 @@ import (
|
|||
const maxTCPQueries = 128
|
||||
|
||||
// aLongTimeAgo is a non-zero time, far in the past, used for
|
||||
// immediate cancelation of network operations.
|
||||
// immediate cancellation of network operations.
|
||||
var aLongTimeAgo = time.Unix(1, 0)
|
||||
|
||||
// Handler is implemented by any value that implements ServeDNS.
|
||||
|
@ -224,8 +224,12 @@ type Server struct {
|
|||
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
|
||||
MaxTCPQueries int
|
||||
// Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
|
||||
// It is only supported on go1.11+ and when using ListenAndServe.
|
||||
// It is only supported on certain GOOSes and when using ListenAndServe.
|
||||
ReusePort bool
|
||||
// Whether to set the SO_REUSEADDR socket option, allowing multiple listeners to be bound to a single address.
|
||||
// Crucially this allows binding when an existing server is listening on `0.0.0.0` or `::`.
|
||||
// It is only supported on certain GOOSes and when using ListenAndServe.
|
||||
ReuseAddr bool
|
||||
// AcceptMsgFunc will check the incoming message and will reject it early in the process.
|
||||
// By default DefaultMsgAcceptFunc will be used.
|
||||
MsgAcceptFunc MsgAcceptFunc
|
||||
|
@ -304,7 +308,7 @@ func (srv *Server) ListenAndServe() error {
|
|||
|
||||
switch srv.Net {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
l, err := listenTCP(srv.Net, addr, srv.ReusePort)
|
||||
l, err := listenTCP(srv.Net, addr, srv.ReusePort, srv.ReuseAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -317,7 +321,7 @@ func (srv *Server) ListenAndServe() error {
|
|||
return errors.New("dns: neither Certificates nor GetCertificate set in Config")
|
||||
}
|
||||
network := strings.TrimSuffix(srv.Net, "-tls")
|
||||
l, err := listenTCP(network, addr, srv.ReusePort)
|
||||
l, err := listenTCP(network, addr, srv.ReusePort, srv.ReuseAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -327,7 +331,7 @@ func (srv *Server) ListenAndServe() error {
|
|||
unlock()
|
||||
return srv.serveTCP(l)
|
||||
case "udp", "udp4", "udp6":
|
||||
l, err := listenUDP(srv.Net, addr, srv.ReusePort)
|
||||
l, err := listenUDP(srv.Net, addr, srv.ReusePort, srv.ReuseAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted for dns package usage by Miek Gieben.
|
||||
|
||||
package dns
|
||||
|
||||
import "sync"
|
||||
import "time"
|
||||
|
||||
// call is an in-flight or completed singleflight.Do call
|
||||
type call struct {
|
||||
wg sync.WaitGroup
|
||||
val *Msg
|
||||
rtt time.Duration
|
||||
err error
|
||||
dups int
|
||||
}
|
||||
|
||||
// singleflight represents a class of work and forms a namespace in
|
||||
// which units of work can be executed with duplicate suppression.
|
||||
type singleflight struct {
|
||||
sync.Mutex // protects m
|
||||
m map[string]*call // lazily initialized
|
||||
|
||||
dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
|
||||
}
|
||||
|
||||
// Do executes and returns the results of the given function, making
|
||||
// sure that only one execution is in-flight for a given key at a
|
||||
// time. If a duplicate comes in, the duplicate caller waits for the
|
||||
// original to complete and receives the same results.
|
||||
// The return value shared indicates whether v was given to multiple callers.
|
||||
func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
|
||||
g.Lock()
|
||||
if g.m == nil {
|
||||
g.m = make(map[string]*call)
|
||||
}
|
||||
if c, ok := g.m[key]; ok {
|
||||
c.dups++
|
||||
g.Unlock()
|
||||
c.wg.Wait()
|
||||
return c.val, c.rtt, c.err, true
|
||||
}
|
||||
c := new(call)
|
||||
c.wg.Add(1)
|
||||
g.m[key] = c
|
||||
g.Unlock()
|
||||
|
||||
c.val, c.rtt, c.err = fn()
|
||||
c.wg.Done()
|
||||
|
||||
if !g.dontDeleteForTesting {
|
||||
g.Lock()
|
||||
delete(g.m, key)
|
||||
g.Unlock()
|
||||
}
|
||||
|
||||
return c.val, c.rtt, c.err, c.dups > 0
|
||||
}
|
|
@ -85,7 +85,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError {
|
|||
l, _ := c.Next()
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return &ParseError{l.token, "bad SVCB priority", l}
|
||||
return &ParseError{file: l.token, err: "bad SVCB priority", lex: l}
|
||||
}
|
||||
rr.Priority = uint16(i)
|
||||
|
||||
|
@ -95,7 +95,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError {
|
|||
|
||||
name, nameOk := toAbsoluteName(l.token, o)
|
||||
if l.err || !nameOk {
|
||||
return &ParseError{l.token, "bad SVCB Target", l}
|
||||
return &ParseError{file: l.token, err: "bad SVCB Target", lex: l}
|
||||
}
|
||||
rr.Target = name
|
||||
|
||||
|
@ -111,7 +111,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError {
|
|||
if !canHaveNextKey {
|
||||
// The key we can now read was probably meant to be
|
||||
// a part of the last value.
|
||||
return &ParseError{l.token, "bad SVCB value quotation", l}
|
||||
return &ParseError{file: l.token, err: "bad SVCB value quotation", lex: l}
|
||||
}
|
||||
|
||||
// In key=value pairs, value does not have to be quoted unless value
|
||||
|
@ -124,7 +124,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError {
|
|||
// Key with no value and no equality sign
|
||||
key = l.token
|
||||
} else if idx == 0 {
|
||||
return &ParseError{l.token, "bad SVCB key", l}
|
||||
return &ParseError{file: l.token, err: "bad SVCB key", lex: l}
|
||||
} else {
|
||||
key, value = l.token[:idx], l.token[idx+1:]
|
||||
|
||||
|
@ -144,30 +144,30 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError {
|
|||
value = l.token
|
||||
l, _ = c.Next()
|
||||
if l.value != zQuote {
|
||||
return &ParseError{l.token, "SVCB unterminated value", l}
|
||||
return &ParseError{file: l.token, err: "SVCB unterminated value", lex: l}
|
||||
}
|
||||
case zQuote:
|
||||
// There's nothing in double quotes.
|
||||
default:
|
||||
return &ParseError{l.token, "bad SVCB value", l}
|
||||
return &ParseError{file: l.token, err: "bad SVCB value", lex: l}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
kv := makeSVCBKeyValue(svcbStringToKey(key))
|
||||
if kv == nil {
|
||||
return &ParseError{l.token, "bad SVCB key", l}
|
||||
return &ParseError{file: l.token, err: "bad SVCB key", lex: l}
|
||||
}
|
||||
if err := kv.parse(value); err != nil {
|
||||
return &ParseError{l.token, err.Error(), l}
|
||||
return &ParseError{file: l.token, wrappedErr: err, lex: l}
|
||||
}
|
||||
xs = append(xs, kv)
|
||||
case zQuote:
|
||||
return &ParseError{l.token, "SVCB key can't contain double quotes", l}
|
||||
return &ParseError{file: l.token, err: "SVCB key can't contain double quotes", lex: l}
|
||||
case zBlank:
|
||||
canHaveNextKey = true
|
||||
default:
|
||||
return &ParseError{l.token, "bad SVCB values", l}
|
||||
return &ParseError{file: l.token, err: "bad SVCB values", lex: l}
|
||||
}
|
||||
l, _ = c.Next()
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ func (s *SVCBMandatory) String() string {
|
|||
}
|
||||
|
||||
func (s *SVCBMandatory) pack() ([]byte, error) {
|
||||
codes := append([]SVCBKey(nil), s.Code...)
|
||||
codes := cloneSlice(s.Code)
|
||||
sort.Slice(codes, func(i, j int) bool {
|
||||
return codes[i] < codes[j]
|
||||
})
|
||||
|
@ -314,10 +314,11 @@ func (s *SVCBMandatory) unpack(b []byte) error {
|
|||
}
|
||||
|
||||
func (s *SVCBMandatory) parse(b string) error {
|
||||
str := strings.Split(b, ",")
|
||||
codes := make([]SVCBKey, 0, len(str))
|
||||
for _, e := range str {
|
||||
codes = append(codes, svcbStringToKey(e))
|
||||
codes := make([]SVCBKey, 0, strings.Count(b, ",")+1)
|
||||
for len(b) > 0 {
|
||||
var key string
|
||||
key, b, _ = strings.Cut(b, ",")
|
||||
codes = append(codes, svcbStringToKey(key))
|
||||
}
|
||||
s.Code = codes
|
||||
return nil
|
||||
|
@ -328,9 +329,7 @@ func (s *SVCBMandatory) len() int {
|
|||
}
|
||||
|
||||
func (s *SVCBMandatory) copy() SVCBKeyValue {
|
||||
return &SVCBMandatory{
|
||||
append([]SVCBKey(nil), s.Code...),
|
||||
}
|
||||
return &SVCBMandatory{cloneSlice(s.Code)}
|
||||
}
|
||||
|
||||
// SVCBAlpn pair is used to list supported connection protocols.
|
||||
|
@ -353,7 +352,7 @@ func (*SVCBAlpn) Key() SVCBKey { return SVCB_ALPN }
|
|||
func (s *SVCBAlpn) String() string {
|
||||
// An ALPN value is a comma-separated list of values, each of which can be
|
||||
// an arbitrary binary value. In order to allow parsing, the comma and
|
||||
// backslash characters are themselves excaped.
|
||||
// backslash characters are themselves escaped.
|
||||
//
|
||||
// However, this escaping is done in addition to the normal escaping which
|
||||
// happens in zone files, meaning that these values must be
|
||||
|
@ -481,9 +480,7 @@ func (s *SVCBAlpn) len() int {
|
|||
}
|
||||
|
||||
func (s *SVCBAlpn) copy() SVCBKeyValue {
|
||||
return &SVCBAlpn{
|
||||
append([]string(nil), s.Alpn...),
|
||||
}
|
||||
return &SVCBAlpn{cloneSlice(s.Alpn)}
|
||||
}
|
||||
|
||||
// SVCBNoDefaultAlpn pair signifies no support for default connection protocols.
|
||||
|
@ -563,15 +560,15 @@ func (s *SVCBPort) parse(b string) error {
|
|||
// to the hinted IP address may be terminated and a new connection may be opened.
|
||||
// Basic use pattern for creating an ipv4hint option:
|
||||
//
|
||||
// h := new(dns.HTTPS)
|
||||
// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET}
|
||||
// e := new(dns.SVCBIPv4Hint)
|
||||
// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()}
|
||||
// h := new(dns.HTTPS)
|
||||
// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET}
|
||||
// e := new(dns.SVCBIPv4Hint)
|
||||
// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()}
|
||||
//
|
||||
// Or
|
||||
// Or
|
||||
//
|
||||
// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()}
|
||||
// h.Value = append(h.Value, e)
|
||||
// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()}
|
||||
// h.Value = append(h.Value, e)
|
||||
type SVCBIPv4Hint struct {
|
||||
Hint []net.IP
|
||||
}
|
||||
|
@ -595,6 +592,7 @@ func (s *SVCBIPv4Hint) unpack(b []byte) error {
|
|||
if len(b) == 0 || len(b)%4 != 0 {
|
||||
return errors.New("dns: svcbipv4hint: ipv4 address byte array length is not a multiple of 4")
|
||||
}
|
||||
b = cloneSlice(b)
|
||||
x := make([]net.IP, 0, len(b)/4)
|
||||
for i := 0; i < len(b); i += 4 {
|
||||
x = append(x, net.IP(b[i:i+4]))
|
||||
|
@ -616,31 +614,33 @@ func (s *SVCBIPv4Hint) String() string {
|
|||
}
|
||||
|
||||
func (s *SVCBIPv4Hint) parse(b string) error {
|
||||
if b == "" {
|
||||
return errors.New("dns: svcbipv4hint: empty hint")
|
||||
}
|
||||
if strings.Contains(b, ":") {
|
||||
return errors.New("dns: svcbipv4hint: expected ipv4, got ipv6")
|
||||
}
|
||||
str := strings.Split(b, ",")
|
||||
dst := make([]net.IP, len(str))
|
||||
for i, e := range str {
|
||||
|
||||
hint := make([]net.IP, 0, strings.Count(b, ",")+1)
|
||||
for len(b) > 0 {
|
||||
var e string
|
||||
e, b, _ = strings.Cut(b, ",")
|
||||
ip := net.ParseIP(e).To4()
|
||||
if ip == nil {
|
||||
return errors.New("dns: svcbipv4hint: bad ip")
|
||||
}
|
||||
dst[i] = ip
|
||||
hint = append(hint, ip)
|
||||
}
|
||||
s.Hint = dst
|
||||
s.Hint = hint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBIPv4Hint) copy() SVCBKeyValue {
|
||||
hint := make([]net.IP, len(s.Hint))
|
||||
for i, ip := range s.Hint {
|
||||
hint[i] = copyIP(ip)
|
||||
}
|
||||
|
||||
return &SVCBIPv4Hint{
|
||||
Hint: hint,
|
||||
hint[i] = cloneSlice(ip)
|
||||
}
|
||||
return &SVCBIPv4Hint{Hint: hint}
|
||||
}
|
||||
|
||||
// SVCBECHConfig pair contains the ECHConfig structure defined in draft-ietf-tls-esni [RFC xxxx].
|
||||
|
@ -660,19 +660,18 @@ func (s *SVCBECHConfig) String() string { return toBase64(s.ECH) }
|
|||
func (s *SVCBECHConfig) len() int { return len(s.ECH) }
|
||||
|
||||
func (s *SVCBECHConfig) pack() ([]byte, error) {
|
||||
return append([]byte(nil), s.ECH...), nil
|
||||
return cloneSlice(s.ECH), nil
|
||||
}
|
||||
|
||||
func (s *SVCBECHConfig) copy() SVCBKeyValue {
|
||||
return &SVCBECHConfig{
|
||||
append([]byte(nil), s.ECH...),
|
||||
}
|
||||
return &SVCBECHConfig{cloneSlice(s.ECH)}
|
||||
}
|
||||
|
||||
func (s *SVCBECHConfig) unpack(b []byte) error {
|
||||
s.ECH = append([]byte(nil), b...)
|
||||
s.ECH = cloneSlice(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBECHConfig) parse(b string) error {
|
||||
x, err := fromBase64([]byte(b))
|
||||
if err != nil {
|
||||
|
@ -715,6 +714,7 @@ func (s *SVCBIPv6Hint) unpack(b []byte) error {
|
|||
if len(b) == 0 || len(b)%16 != 0 {
|
||||
return errors.New("dns: svcbipv6hint: ipv6 address byte array length not a multiple of 16")
|
||||
}
|
||||
b = cloneSlice(b)
|
||||
x := make([]net.IP, 0, len(b)/16)
|
||||
for i := 0; i < len(b); i += 16 {
|
||||
ip := net.IP(b[i : i+16])
|
||||
|
@ -739,9 +739,14 @@ func (s *SVCBIPv6Hint) String() string {
|
|||
}
|
||||
|
||||
func (s *SVCBIPv6Hint) parse(b string) error {
|
||||
str := strings.Split(b, ",")
|
||||
dst := make([]net.IP, len(str))
|
||||
for i, e := range str {
|
||||
if b == "" {
|
||||
return errors.New("dns: svcbipv6hint: empty hint")
|
||||
}
|
||||
|
||||
hint := make([]net.IP, 0, strings.Count(b, ",")+1)
|
||||
for len(b) > 0 {
|
||||
var e string
|
||||
e, b, _ = strings.Cut(b, ",")
|
||||
ip := net.ParseIP(e)
|
||||
if ip == nil {
|
||||
return errors.New("dns: svcbipv6hint: bad ip")
|
||||
|
@ -749,21 +754,18 @@ func (s *SVCBIPv6Hint) parse(b string) error {
|
|||
if ip.To4() != nil {
|
||||
return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4-mapped-ipv6")
|
||||
}
|
||||
dst[i] = ip
|
||||
hint = append(hint, ip)
|
||||
}
|
||||
s.Hint = dst
|
||||
s.Hint = hint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBIPv6Hint) copy() SVCBKeyValue {
|
||||
hint := make([]net.IP, len(s.Hint))
|
||||
for i, ip := range s.Hint {
|
||||
hint[i] = copyIP(ip)
|
||||
}
|
||||
|
||||
return &SVCBIPv6Hint{
|
||||
Hint: hint,
|
||||
hint[i] = cloneSlice(ip)
|
||||
}
|
||||
return &SVCBIPv6Hint{Hint: hint}
|
||||
}
|
||||
|
||||
// SVCBDoHPath pair is used to indicate the URI template that the
|
||||
|
@ -831,11 +833,11 @@ type SVCBLocal struct {
|
|||
|
||||
func (s *SVCBLocal) Key() SVCBKey { return s.KeyCode }
|
||||
func (s *SVCBLocal) String() string { return svcbParamToStr(s.Data) }
|
||||
func (s *SVCBLocal) pack() ([]byte, error) { return append([]byte(nil), s.Data...), nil }
|
||||
func (s *SVCBLocal) pack() ([]byte, error) { return cloneSlice(s.Data), nil }
|
||||
func (s *SVCBLocal) len() int { return len(s.Data) }
|
||||
|
||||
func (s *SVCBLocal) unpack(b []byte) error {
|
||||
s.Data = append([]byte(nil), b...)
|
||||
s.Data = cloneSlice(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -849,9 +851,7 @@ func (s *SVCBLocal) parse(b string) error {
|
|||
}
|
||||
|
||||
func (s *SVCBLocal) copy() SVCBKeyValue {
|
||||
return &SVCBLocal{s.KeyCode,
|
||||
append([]byte(nil), s.Data...),
|
||||
}
|
||||
return &SVCBLocal{s.KeyCode, cloneSlice(s.Data)}
|
||||
}
|
||||
|
||||
func (rr *SVCB) String() string {
|
||||
|
@ -867,8 +867,8 @@ func (rr *SVCB) String() string {
|
|||
// areSVCBPairArraysEqual checks if SVCBKeyValue arrays are equal after sorting their
|
||||
// copies. arrA and arrB have equal lengths, otherwise zduplicate.go wouldn't call this function.
|
||||
func areSVCBPairArraysEqual(a []SVCBKeyValue, b []SVCBKeyValue) bool {
|
||||
a = append([]SVCBKeyValue(nil), a...)
|
||||
b = append([]SVCBKeyValue(nil), b...)
|
||||
a = cloneSlice(a)
|
||||
b = cloneSlice(b)
|
||||
sort.Slice(a, func(i, j int) bool { return a[i].Key() < a[j].Key() })
|
||||
sort.Slice(b, func(i, j int) bool { return b[i].Key() < b[j].Key() })
|
||||
for i, e := range a {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
// We include our tool dependencies for `go generate` here to ensure they're
|
||||
|
|
|
@ -65,6 +65,7 @@ const (
|
|||
TypeAPL uint16 = 42
|
||||
TypeDS uint16 = 43
|
||||
TypeSSHFP uint16 = 44
|
||||
TypeIPSECKEY uint16 = 45
|
||||
TypeRRSIG uint16 = 46
|
||||
TypeNSEC uint16 = 47
|
||||
TypeDNSKEY uint16 = 48
|
||||
|
@ -98,6 +99,7 @@ const (
|
|||
TypeURI uint16 = 256
|
||||
TypeCAA uint16 = 257
|
||||
TypeAVC uint16 = 258
|
||||
TypeAMTRELAY uint16 = 260
|
||||
|
||||
TypeTKEY uint16 = 249
|
||||
TypeTSIG uint16 = 250
|
||||
|
@ -133,8 +135,8 @@ const (
|
|||
RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update]
|
||||
RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update]
|
||||
RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG]
|
||||
RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG]
|
||||
RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0]
|
||||
RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] https://www.rfc-editor.org/rfc/rfc6895.html#section-2.3
|
||||
RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] https://www.rfc-editor.org/rfc/rfc6895.html#section-2.3
|
||||
RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG]
|
||||
RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG]
|
||||
RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY]
|
||||
|
@ -159,6 +161,22 @@ const (
|
|||
ZoneMDHashAlgSHA512 = 2
|
||||
)
|
||||
|
||||
// Used in IPSEC https://datatracker.ietf.org/doc/html/rfc4025#section-2.3
|
||||
const (
|
||||
IPSECGatewayNone uint8 = iota
|
||||
IPSECGatewayIPv4
|
||||
IPSECGatewayIPv6
|
||||
IPSECGatewayHost
|
||||
)
|
||||
|
||||
// Used in AMTRELAY https://datatracker.ietf.org/doc/html/rfc8777#section-4.2.3
|
||||
const (
|
||||
AMTRELAYNone = IPSECGatewayNone
|
||||
AMTRELAYIPv4 = IPSECGatewayIPv4
|
||||
AMTRELAYIPv6 = IPSECGatewayIPv6
|
||||
AMTRELAYHost = IPSECGatewayHost
|
||||
)
|
||||
|
||||
// Header is the wire format for the DNS packet header.
|
||||
type Header struct {
|
||||
Id uint16
|
||||
|
@ -180,7 +198,7 @@ const (
|
|||
_CD = 1 << 4 // checking disabled
|
||||
)
|
||||
|
||||
// Various constants used in the LOC RR. See RFC 1887.
|
||||
// Various constants used in the LOC RR. See RFC 1876.
|
||||
const (
|
||||
LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2.
|
||||
LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
|
||||
|
@ -218,6 +236,9 @@ var CertTypeToString = map[uint16]string{
|
|||
CertOID: "OID",
|
||||
}
|
||||
|
||||
// Prefix for IPv4 encoded as IPv6 address
|
||||
const ipv4InIPv6Prefix = "::ffff:"
|
||||
|
||||
//go:generate go run types_generate.go
|
||||
|
||||
// Question holds a DNS question. Usually there is just one. While the
|
||||
|
@ -381,6 +402,17 @@ func (rr *X25) String() string {
|
|||
return rr.Hdr.String() + rr.PSDNAddress
|
||||
}
|
||||
|
||||
// ISDN RR. See RFC 1183, Section 3.2.
|
||||
type ISDN struct {
|
||||
Hdr RR_Header
|
||||
Address string
|
||||
SubAddress string
|
||||
}
|
||||
|
||||
func (rr *ISDN) String() string {
|
||||
return rr.Hdr.String() + sprintTxt([]string{rr.Address, rr.SubAddress})
|
||||
}
|
||||
|
||||
// RT RR. See RFC 1183, Section 3.3.
|
||||
type RT struct {
|
||||
Hdr RR_Header
|
||||
|
@ -613,8 +645,8 @@ func nextByte(s string, offset int) (byte, int) {
|
|||
return 0, 0
|
||||
case 2, 3: // too short to be \ddd
|
||||
default: // maybe \ddd
|
||||
if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) {
|
||||
return dddStringToByte(s[offset+1:]), 4
|
||||
if isDDD(s[offset+1:]) {
|
||||
return dddToByte(s[offset+1:]), 4
|
||||
}
|
||||
}
|
||||
// not \ddd, just an RFC 1035 "quoted" character
|
||||
|
@ -733,6 +765,11 @@ func (rr *AAAA) String() string {
|
|||
if rr.AAAA == nil {
|
||||
return rr.Hdr.String()
|
||||
}
|
||||
|
||||
if rr.AAAA.To4() != nil {
|
||||
return rr.Hdr.String() + ipv4InIPv6Prefix + rr.AAAA.String()
|
||||
}
|
||||
|
||||
return rr.Hdr.String() + rr.AAAA.String()
|
||||
}
|
||||
|
||||
|
@ -760,7 +797,7 @@ func (rr *GPOS) String() string {
|
|||
return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude
|
||||
}
|
||||
|
||||
// LOC RR. See RFC RFC 1876.
|
||||
// LOC RR. See RFC 1876.
|
||||
type LOC struct {
|
||||
Hdr RR_Header
|
||||
Version uint8
|
||||
|
@ -774,7 +811,10 @@ type LOC struct {
|
|||
|
||||
// cmToM takes a cm value expressed in RFC 1876 SIZE mantissa/exponent
|
||||
// format and returns a string in m (two decimals for the cm).
|
||||
func cmToM(m, e uint8) string {
|
||||
func cmToM(x uint8) string {
|
||||
m := x & 0xf0 >> 4
|
||||
e := x & 0x0f
|
||||
|
||||
if e < 2 {
|
||||
if e == 1 {
|
||||
m *= 10
|
||||
|
@ -830,10 +870,9 @@ func (rr *LOC) String() string {
|
|||
s += fmt.Sprintf("%.0fm ", alt)
|
||||
}
|
||||
|
||||
s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m "
|
||||
s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m "
|
||||
s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m"
|
||||
|
||||
s += cmToM(rr.Size) + "m "
|
||||
s += cmToM(rr.HorizPre) + "m "
|
||||
s += cmToM(rr.VertPre) + "m"
|
||||
return s
|
||||
}
|
||||
|
||||
|
@ -870,6 +909,11 @@ func (rr *RRSIG) String() string {
|
|||
return s
|
||||
}
|
||||
|
||||
// NXT RR. See RFC 2535.
|
||||
type NXT struct {
|
||||
NSEC
|
||||
}
|
||||
|
||||
// NSEC RR. See RFC 4034 and RFC 3755.
|
||||
type NSEC struct {
|
||||
Hdr RR_Header
|
||||
|
@ -954,7 +998,7 @@ func (rr *TALINK) String() string {
|
|||
sprintName(rr.PreviousName) + " " + sprintName(rr.NextName)
|
||||
}
|
||||
|
||||
// SSHFP RR. See RFC RFC 4255.
|
||||
// SSHFP RR. See RFC 4255.
|
||||
type SSHFP struct {
|
||||
Hdr RR_Header
|
||||
Algorithm uint8
|
||||
|
@ -968,7 +1012,7 @@ func (rr *SSHFP) String() string {
|
|||
" " + strings.ToUpper(rr.FingerPrint)
|
||||
}
|
||||
|
||||
// KEY RR. See RFC RFC 2535.
|
||||
// KEY RR. See RFC 2535.
|
||||
type KEY struct {
|
||||
DNSKEY
|
||||
}
|
||||
|
@ -994,6 +1038,69 @@ func (rr *DNSKEY) String() string {
|
|||
" " + rr.PublicKey
|
||||
}
|
||||
|
||||
// IPSECKEY RR. See RFC 4025.
|
||||
type IPSECKEY struct {
|
||||
Hdr RR_Header
|
||||
Precedence uint8
|
||||
GatewayType uint8
|
||||
Algorithm uint8
|
||||
GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost
|
||||
GatewayHost string `dns:"ipsechost"`
|
||||
PublicKey string `dns:"base64"`
|
||||
}
|
||||
|
||||
func (rr *IPSECKEY) String() string {
|
||||
var gateway string
|
||||
switch rr.GatewayType {
|
||||
case IPSECGatewayIPv4, IPSECGatewayIPv6:
|
||||
gateway = rr.GatewayAddr.String()
|
||||
case IPSECGatewayHost:
|
||||
gateway = rr.GatewayHost
|
||||
case IPSECGatewayNone:
|
||||
fallthrough
|
||||
default:
|
||||
gateway = "."
|
||||
}
|
||||
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) +
|
||||
" " + strconv.Itoa(int(rr.GatewayType)) +
|
||||
" " + strconv.Itoa(int(rr.Algorithm)) +
|
||||
" " + gateway +
|
||||
" " + rr.PublicKey
|
||||
}
|
||||
|
||||
// AMTRELAY RR. See RFC 8777.
|
||||
type AMTRELAY struct {
|
||||
Hdr RR_Header
|
||||
Precedence uint8
|
||||
GatewayType uint8 // discovery is packed in here at bit 0x80
|
||||
GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost
|
||||
GatewayHost string `dns:"amtrelayhost"`
|
||||
}
|
||||
|
||||
func (rr *AMTRELAY) String() string {
|
||||
var gateway string
|
||||
switch rr.GatewayType & 0x7f {
|
||||
case AMTRELAYIPv4, AMTRELAYIPv6:
|
||||
gateway = rr.GatewayAddr.String()
|
||||
case AMTRELAYHost:
|
||||
gateway = rr.GatewayHost
|
||||
case AMTRELAYNone:
|
||||
fallthrough
|
||||
default:
|
||||
gateway = "."
|
||||
}
|
||||
boolS := "0"
|
||||
if rr.GatewayType&0x80 == 0x80 {
|
||||
boolS = "1"
|
||||
}
|
||||
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) +
|
||||
" " + boolS +
|
||||
" " + strconv.Itoa(int(rr.GatewayType&0x7f)) +
|
||||
" " + gateway
|
||||
}
|
||||
|
||||
// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template.
|
||||
type RKEY struct {
|
||||
Hdr RR_Header
|
||||
|
@ -1215,7 +1322,7 @@ type NINFO struct {
|
|||
|
||||
func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) }
|
||||
|
||||
// NID RR. See RFC RFC 6742.
|
||||
// NID RR. See RFC 6742.
|
||||
type NID struct {
|
||||
Hdr RR_Header
|
||||
Preference uint16
|
||||
|
@ -1434,7 +1541,7 @@ func (a *APLPrefix) str() string {
|
|||
case net.IPv6len:
|
||||
// add prefix for IPv4-mapped IPv6
|
||||
if v4 := a.Network.IP.To4(); v4 != nil {
|
||||
sb.WriteString("::ffff:")
|
||||
sb.WriteString(ipv4InIPv6Prefix)
|
||||
}
|
||||
sb.WriteString(a.Network.IP.String())
|
||||
}
|
||||
|
@ -1450,7 +1557,7 @@ func (a *APLPrefix) str() string {
|
|||
// equals reports whether two APL prefixes are identical.
|
||||
func (a *APLPrefix) equals(b *APLPrefix) bool {
|
||||
return a.Negation == b.Negation &&
|
||||
bytes.Equal(a.Network.IP, b.Network.IP) &&
|
||||
a.Network.IP.Equal(b.Network.IP) &&
|
||||
bytes.Equal(a.Network.Mask, b.Network.Mask)
|
||||
}
|
||||
|
||||
|
@ -1518,21 +1625,19 @@ func euiToString(eui uint64, bits int) (hex string) {
|
|||
return
|
||||
}
|
||||
|
||||
// copyIP returns a copy of ip.
|
||||
func copyIP(ip net.IP) net.IP {
|
||||
p := make(net.IP, len(ip))
|
||||
copy(p, ip)
|
||||
return p
|
||||
// cloneSlice returns a shallow copy of s.
|
||||
func cloneSlice[E any, S ~[]E](s S) S {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return append(S(nil), s...)
|
||||
}
|
||||
|
||||
// copyNet returns a copy of a subnet.
|
||||
func copyNet(n net.IPNet) net.IPNet {
|
||||
m := make(net.IPMask, len(n.Mask))
|
||||
copy(m, n.Mask)
|
||||
|
||||
return net.IPNet{
|
||||
IP: copyIP(n.IP),
|
||||
Mask: m,
|
||||
IP: cloneSlice(n.IP),
|
||||
Mask: cloneSlice(n.Mask),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package dns
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
// TODO(tmthrgd): Remove this Windows-specific code if go.dev/issue/7175 and
|
||||
// go.dev/issue/7174 are ever fixed.
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
@ -14,7 +18,6 @@ func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
|||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
n, raddr, err := conn.ReadFrom(b)
|
||||
if err != nil {
|
||||
|
@ -24,12 +27,9 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
|||
}
|
||||
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
|
||||
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||
return conn.WriteTo(b, session.raddr)
|
||||
}
|
||||
|
||||
// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
|
||||
// use the standard method in udp.go for these.
|
||||
func setUDPSocketOptions(*net.UDPConn) error { return nil }
|
||||
func parseDstFromOOB([]byte, net.IP) net.IP { return nil }
|
||||
|
|
|
@ -3,7 +3,7 @@ package dns
|
|||
import "fmt"
|
||||
|
||||
// Version is current version of this library.
|
||||
var Version = v{1, 1, 50}
|
||||
var Version = v{1, 1, 58}
|
||||
|
||||
// v holds the version of this library.
|
||||
type v struct {
|
||||
|
|
|
@ -44,7 +44,6 @@ func (t *Transfer) tsigProvider() TsigProvider {
|
|||
// dnscon := &dns.Conn{Conn:con}
|
||||
// transfer = &dns.Transfer{Conn: dnscon}
|
||||
// channel, err := transfer.In(message, master)
|
||||
//
|
||||
func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
|
||||
switch q.Question[0].Qtype {
|
||||
case TypeAXFR, TypeIXFR:
|
||||
|
@ -81,8 +80,13 @@ func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
|
|||
|
||||
func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) {
|
||||
first := true
|
||||
defer t.Close()
|
||||
defer close(c)
|
||||
defer func() {
|
||||
// First close the connection, then the channel. This allows functions blocked on
|
||||
// the channel to assume that the connection is closed and no further operations are
|
||||
// pending when they resume.
|
||||
t.Close()
|
||||
close(c)
|
||||
}()
|
||||
timeout := dnsTimeout
|
||||
if t.ReadTimeout != 0 {
|
||||
timeout = t.ReadTimeout
|
||||
|
@ -132,8 +136,13 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) {
|
|||
axfr := true
|
||||
n := 0
|
||||
qser := q.Ns[0].(*SOA).Serial
|
||||
defer t.Close()
|
||||
defer close(c)
|
||||
defer func() {
|
||||
// First close the connection, then the channel. This allows functions blocked on
|
||||
// the channel to assume that the connection is closed and no further operations are
|
||||
// pending when they resume.
|
||||
t.Close()
|
||||
close(c)
|
||||
}()
|
||||
timeout := dnsTimeout
|
||||
if t.ReadTimeout != 0 {
|
||||
timeout = t.ReadTimeout
|
||||
|
|
|
@ -43,6 +43,32 @@ func (r1 *AFSDB) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *AMTRELAY) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*AMTRELAY)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.Precedence != r2.Precedence {
|
||||
return false
|
||||
}
|
||||
if r1.GatewayType != r2.GatewayType {
|
||||
return false
|
||||
}
|
||||
switch r1.GatewayType {
|
||||
case IPSECGatewayIPv4, IPSECGatewayIPv6:
|
||||
if !r1.GatewayAddr.Equal(r2.GatewayAddr) {
|
||||
return false
|
||||
}
|
||||
case IPSECGatewayHost:
|
||||
if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *ANY) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*ANY)
|
||||
if !ok {
|
||||
|
@ -423,6 +449,53 @@ func (r1 *HTTPS) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *IPSECKEY) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*IPSECKEY)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.Precedence != r2.Precedence {
|
||||
return false
|
||||
}
|
||||
if r1.GatewayType != r2.GatewayType {
|
||||
return false
|
||||
}
|
||||
if r1.Algorithm != r2.Algorithm {
|
||||
return false
|
||||
}
|
||||
switch r1.GatewayType {
|
||||
case IPSECGatewayIPv4, IPSECGatewayIPv6:
|
||||
if !r1.GatewayAddr.Equal(r2.GatewayAddr) {
|
||||
return false
|
||||
}
|
||||
case IPSECGatewayHost:
|
||||
if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if r1.PublicKey != r2.PublicKey {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *ISDN) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*ISDN)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.Address != r2.Address {
|
||||
return false
|
||||
}
|
||||
if r1.SubAddress != r2.SubAddress {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *KEY) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*KEY)
|
||||
if !ok {
|
||||
|
@ -813,6 +886,26 @@ func (r1 *NULL) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *NXT) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*NXT)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if !isDuplicateName(r1.NextDomain, r2.NextDomain) {
|
||||
return false
|
||||
}
|
||||
if len(r1.TypeBitMap) != len(r2.TypeBitMap) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(r1.TypeBitMap); i++ {
|
||||
if r1.TypeBitMap[i] != r2.TypeBitMap[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *OPENPGPKEY) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*OPENPGPKEY)
|
||||
if !ok {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue