TUN-5675: Remove github.com/dgrijalva/jwt-go dependency by upgrading coredns version
This commit is contained in:
parent
a84cbcde7e
commit
8a5343d0a5
50
go.mod
50
go.mod
|
@ -6,7 +6,7 @@ require (
|
|||
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 // indirect
|
||||
github.com/cloudflare/brotli-go v0.0.0-20191101163834-d34379f7ff93
|
||||
github.com/cloudflare/golibs v0.0.0-20170913112048-333127dbecfc
|
||||
github.com/coredns/coredns v1.7.0
|
||||
github.com/coredns/coredns v1.8.7
|
||||
github.com/coreos/go-oidc v0.0.0-20171002155002-a93f71fdfe73
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
|
@ -22,52 +22,50 @@ require (
|
|||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/gobwas/ws v1.0.4
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
|
||||
github.com/google/uuid v1.1.2
|
||||
github.com/gorilla/mux v1.7.3
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.24.0
|
||||
github.com/mattn/go-colorable v0.1.8
|
||||
github.com/miekg/dns v1.1.31
|
||||
github.com/miekg/dns v1.1.45
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.13.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92
|
||||
github.com/rs/zerolog v1.20.0
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stretchr/testify v1.6.0
|
||||
github.com/urfave/cli/v2 v2.2.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
go.uber.org/automaxprocs v1.4.0
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
|
||||
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d // indirect
|
||||
google.golang.org/grpc v1.32.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
|
||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect
|
||||
google.golang.org/grpc v1.43.0 // indirect
|
||||
gopkg.in/coreos/go-oidc.v2 v2.1.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
gopkg.in/square/go-jose.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
zombiezen.com/go/capnproto2 v2.18.0+incompatible
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/apparentlymart/go-cidr v1.1.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/caddyserver/caddy v1.0.5 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
github.com/coredns/caddy v1.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
|
@ -83,18 +81,18 @@ require (
|
|||
github.com/mattn/go-runewidth v0.0.8 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/onsi/ginkgo v1.16.4 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.1.3 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/rivo/uniseg v0.1.0 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/tools v0.1.1 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.6 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
)
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package tunneldns
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/metrics"
|
||||
|
@ -11,10 +10,11 @@ import (
|
|||
"github.com/coredns/coredns/plugin/pkg/rcode"
|
||||
"github.com/coredns/coredns/request"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
const (
|
||||
pluginName = "cloudflared"
|
||||
)
|
||||
|
||||
// MetricsPlugin is an adapter for CoreDNS and built-in metrics
|
||||
type MetricsPlugin struct {
|
||||
|
@ -23,14 +23,6 @@ type MetricsPlugin struct {
|
|||
|
||||
// NewMetricsPlugin creates a plugin with configured metrics
|
||||
func NewMetricsPlugin(next plugin.Handler) *MetricsPlugin {
|
||||
once.Do(func() {
|
||||
prometheus.MustRegister(vars.RequestCount)
|
||||
prometheus.MustRegister(vars.RequestDuration)
|
||||
prometheus.MustRegister(vars.RequestSize)
|
||||
prometheus.MustRegister(vars.RequestDo)
|
||||
prometheus.MustRegister(vars.ResponseSize)
|
||||
prometheus.MustRegister(vars.ResponseRcode)
|
||||
})
|
||||
return &MetricsPlugin{Next: next}
|
||||
}
|
||||
|
||||
|
@ -43,7 +35,7 @@ func (p MetricsPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dn
|
|||
|
||||
// Update built-in metrics
|
||||
server := metrics.WithServer(ctx)
|
||||
vars.Report(server, state, ".", rcode.ToString(rw.Rcode), rw.Len, rw.Start)
|
||||
vars.Report(server, state, ".", rcode.ToString(rw.Rcode), pluginName, rw.Len, rw.Start)
|
||||
|
||||
return status, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
Copyright (c) 2015 Martin Atkins
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,236 @@
|
|||
// Package cidr is a collection of assorted utilities for computing
|
||||
// network and host addresses within network ranges.
|
||||
//
|
||||
// It expects a CIDR-type address structure where addresses are divided into
|
||||
// some number of prefix bits representing the network and then the remaining
|
||||
// suffix bits represent the host.
|
||||
//
|
||||
// For example, it can help to calculate addresses for sub-networks of a
|
||||
// parent network, or to calculate host addresses within a particular prefix.
|
||||
//
|
||||
// At present this package is prioritizing simplicity of implementation and
|
||||
// de-prioritizing speed and memory usage. Thus caution is advised before
|
||||
// using this package in performance-critical applications or hot codepaths.
|
||||
// Patches to improve the speed and memory usage may be accepted as long as
|
||||
// they do not result in a significant increase in code complexity.
|
||||
package cidr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Subnet takes a parent CIDR range and creates a subnet within it
|
||||
// with the given number of additional prefix bits and the given
|
||||
// network number.
|
||||
//
|
||||
// For example, 10.3.0.0/16, extended by 8 bits, with a network number
|
||||
// of 5, becomes 10.3.5.0/24 .
|
||||
func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) {
|
||||
return SubnetBig(base, newBits, big.NewInt(int64(num)))
|
||||
}
|
||||
|
||||
// SubnetBig takes a parent CIDR range and creates a subnet within it with the
|
||||
// given number of additional prefix bits and the given network number. It
|
||||
// differs from Subnet in that it takes a *big.Int for the num, instead of an int.
|
||||
//
|
||||
// For example, 10.3.0.0/16, extended by 8 bits, with a network number of 5,
|
||||
// becomes 10.3.5.0/24 .
|
||||
func SubnetBig(base *net.IPNet, newBits int, num *big.Int) (*net.IPNet, error) {
|
||||
ip := base.IP
|
||||
mask := base.Mask
|
||||
|
||||
parentLen, addrLen := mask.Size()
|
||||
newPrefixLen := parentLen + newBits
|
||||
|
||||
if newPrefixLen > addrLen {
|
||||
return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits)
|
||||
}
|
||||
|
||||
maxNetNum := uint64(1<<uint64(newBits)) - 1
|
||||
if num.Uint64() > maxNetNum {
|
||||
return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num)
|
||||
}
|
||||
|
||||
return &net.IPNet{
|
||||
IP: insertNumIntoIP(ip, num, newPrefixLen),
|
||||
Mask: net.CIDRMask(newPrefixLen, addrLen),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Host takes a parent CIDR range and turns it into a host IP address with the
|
||||
// given host number.
|
||||
//
|
||||
// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2.
|
||||
func Host(base *net.IPNet, num int) (net.IP, error) {
|
||||
return HostBig(base, big.NewInt(int64(num)))
|
||||
}
|
||||
|
||||
// HostBig takes a parent CIDR range and turns it into a host IP address with
|
||||
// the given host number. It differs from Host in that it takes a *big.Int for
|
||||
// the num, instead of an int.
|
||||
//
|
||||
// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2.
|
||||
func HostBig(base *net.IPNet, num *big.Int) (net.IP, error) {
|
||||
ip := base.IP
|
||||
mask := base.Mask
|
||||
|
||||
parentLen, addrLen := mask.Size()
|
||||
hostLen := addrLen - parentLen
|
||||
|
||||
maxHostNum := big.NewInt(int64(1))
|
||||
maxHostNum.Lsh(maxHostNum, uint(hostLen))
|
||||
maxHostNum.Sub(maxHostNum, big.NewInt(1))
|
||||
|
||||
numUint64 := big.NewInt(int64(num.Uint64()))
|
||||
if num.Cmp(big.NewInt(0)) == -1 {
|
||||
numUint64.Neg(num)
|
||||
numUint64.Sub(numUint64, big.NewInt(int64(1)))
|
||||
num.Sub(maxHostNum, numUint64)
|
||||
}
|
||||
|
||||
if numUint64.Cmp(maxHostNum) == 1 {
|
||||
return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num)
|
||||
}
|
||||
var bitlength int
|
||||
if ip.To4() != nil {
|
||||
bitlength = 32
|
||||
} else {
|
||||
bitlength = 128
|
||||
}
|
||||
return insertNumIntoIP(ip, num, bitlength), nil
|
||||
}
|
||||
|
||||
// AddressRange returns the first and last addresses in the given CIDR range.
|
||||
func AddressRange(network *net.IPNet) (net.IP, net.IP) {
|
||||
// the first IP is easy
|
||||
firstIP := network.IP
|
||||
|
||||
// the last IP is the network address OR NOT the mask address
|
||||
prefixLen, bits := network.Mask.Size()
|
||||
if prefixLen == bits {
|
||||
// Easy!
|
||||
// But make sure that our two slices are distinct, since they
|
||||
// would be in all other cases.
|
||||
lastIP := make([]byte, len(firstIP))
|
||||
copy(lastIP, firstIP)
|
||||
return firstIP, lastIP
|
||||
}
|
||||
|
||||
firstIPInt, bits := ipToInt(firstIP)
|
||||
hostLen := uint(bits) - uint(prefixLen)
|
||||
lastIPInt := big.NewInt(1)
|
||||
lastIPInt.Lsh(lastIPInt, hostLen)
|
||||
lastIPInt.Sub(lastIPInt, big.NewInt(1))
|
||||
lastIPInt.Or(lastIPInt, firstIPInt)
|
||||
|
||||
return firstIP, intToIP(lastIPInt, bits)
|
||||
}
|
||||
|
||||
// AddressCount returns the number of distinct host addresses within the given
|
||||
// CIDR range.
|
||||
//
|
||||
// Since the result is a uint64, this function returns meaningful information
|
||||
// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65.
|
||||
func AddressCount(network *net.IPNet) uint64 {
|
||||
prefixLen, bits := network.Mask.Size()
|
||||
return 1 << (uint64(bits) - uint64(prefixLen))
|
||||
}
|
||||
|
||||
//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies
|
||||
//none of the subnets overlap and all subnets are in the supernet
|
||||
//it returns an error if any of those conditions are not satisfied
|
||||
func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error {
|
||||
firstLastIP := make([][]net.IP, len(subnets))
|
||||
for i, s := range subnets {
|
||||
first, last := AddressRange(s)
|
||||
firstLastIP[i] = []net.IP{first, last}
|
||||
}
|
||||
for i, s := range subnets {
|
||||
if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) {
|
||||
return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String())
|
||||
}
|
||||
for j := 0; j < len(subnets); j++ {
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
|
||||
first := firstLastIP[j][0]
|
||||
last := firstLastIP[j][1]
|
||||
if s.Contains(first) || s.Contains(last) {
|
||||
return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PreviousSubnet returns the subnet of the desired mask in the IP space
|
||||
// just lower than the start of IPNet provided. If the IP space rolls over
|
||||
// then the second return value is true
|
||||
func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
|
||||
startIP := checkIPv4(network.IP)
|
||||
previousIP := make(net.IP, len(startIP))
|
||||
copy(previousIP, startIP)
|
||||
cMask := net.CIDRMask(prefixLen, 8*len(previousIP))
|
||||
previousIP = Dec(previousIP)
|
||||
previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask}
|
||||
if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) {
|
||||
return previous, true
|
||||
}
|
||||
return previous, false
|
||||
}
|
||||
|
||||
// NextSubnet returns the next available subnet of the desired mask size
|
||||
// starting for the maximum IP of the offset subnet
|
||||
// If the IP exceeds the maxium IP then the second return value is true
|
||||
func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
|
||||
_, currentLast := AddressRange(network)
|
||||
mask := net.CIDRMask(prefixLen, 8*len(currentLast))
|
||||
currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask}
|
||||
_, last := AddressRange(currentSubnet)
|
||||
last = Inc(last)
|
||||
next := &net.IPNet{IP: last.Mask(mask), Mask: mask}
|
||||
if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) {
|
||||
return next, true
|
||||
}
|
||||
return next, false
|
||||
}
|
||||
|
||||
//Inc increases the IP by one this returns a new []byte for the IP
|
||||
func Inc(IP net.IP) net.IP {
|
||||
IP = checkIPv4(IP)
|
||||
incIP := make([]byte, len(IP))
|
||||
copy(incIP, IP)
|
||||
for j := len(incIP) - 1; j >= 0; j-- {
|
||||
incIP[j]++
|
||||
if incIP[j] > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return incIP
|
||||
}
|
||||
|
||||
//Dec decreases the IP by one this returns a new []byte for the IP
|
||||
func Dec(IP net.IP) net.IP {
|
||||
IP = checkIPv4(IP)
|
||||
decIP := make([]byte, len(IP))
|
||||
copy(decIP, IP)
|
||||
decIP = checkIPv4(decIP)
|
||||
for j := len(decIP) - 1; j >= 0; j-- {
|
||||
decIP[j]--
|
||||
if decIP[j] < 255 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return decIP
|
||||
}
|
||||
|
||||
func checkIPv4(ip net.IP) net.IP {
|
||||
// Go for some reason allocs IPv6len for IPv4 so we have to correct it
|
||||
if v4 := ip.To4(); v4 != nil {
|
||||
return v4
|
||||
}
|
||||
return ip
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
package cidr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
)
|
||||
|
||||
func ipToInt(ip net.IP) (*big.Int, int) {
|
||||
val := &big.Int{}
|
||||
val.SetBytes([]byte(ip))
|
||||
if len(ip) == net.IPv4len {
|
||||
return val, 32
|
||||
} else if len(ip) == net.IPv6len {
|
||||
return val, 128
|
||||
} else {
|
||||
panic(fmt.Errorf("Unsupported address length %d", len(ip)))
|
||||
}
|
||||
}
|
||||
|
||||
func intToIP(ipInt *big.Int, bits int) net.IP {
|
||||
ipBytes := ipInt.Bytes()
|
||||
ret := make([]byte, bits/8)
|
||||
// Pack our IP bytes into the end of the return array,
|
||||
// since big.Int.Bytes() removes front zero padding.
|
||||
for i := 1; i <= len(ipBytes); i++ {
|
||||
ret[len(ret)-i] = ipBytes[len(ipBytes)-i]
|
||||
}
|
||||
return net.IP(ret)
|
||||
}
|
||||
|
||||
func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP {
|
||||
ipInt, totalBits := ipToInt(ip)
|
||||
bigNum.Lsh(bigNum, uint(totalBits-prefixLen))
|
||||
ipInt.Or(ipInt, bigNum)
|
||||
return intToIP(ipInt, totalBits)
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
# Mutilated beyond recognition from the example at:
|
||||
# https://docs.microsoft.com/azure/devops/pipelines/languages/go
|
||||
|
||||
trigger:
|
||||
- master
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
linux:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
mac:
|
||||
imageName: macos-10.13
|
||||
gorootDir: /usr/local
|
||||
windows:
|
||||
imageName: windows-2019
|
||||
gorootDir: C:\
|
||||
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
|
||||
variables:
|
||||
GOROOT: $(gorootDir)/go
|
||||
GOPATH: $(system.defaultWorkingDirectory)/gopath
|
||||
GOBIN: $(GOPATH)/bin
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||
# TODO: modules should be the default in Go 1.13, so this won't be needed
|
||||
GO111MODULE: on
|
||||
|
||||
steps:
|
||||
- bash: |
|
||||
latestGo=$(curl "https://golang.org/VERSION?m=text")
|
||||
echo "##vso[task.setvariable variable=LATEST_GO]$latestGo"
|
||||
echo "Latest Go version: $latestGo"
|
||||
displayName: "Get latest Go version"
|
||||
|
||||
- bash: |
|
||||
sudo rm -f $(which go)
|
||||
echo '##vso[task.prependpath]$(GOBIN)'
|
||||
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||
mkdir -p '$(modulePath)'
|
||||
shopt -s extglob
|
||||
shopt -s dotglob
|
||||
mv !(gopath) '$(modulePath)'
|
||||
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
|
||||
|
||||
# Install Go (this varies by platform)
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(LATEST_GO).linux-amd64.tar.gz"
|
||||
sudo tar -C $(gorootDir) -xzf "$(LATEST_GO).linux-amd64.tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Go on Linux
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(LATEST_GO).darwin-amd64.tar.gz"
|
||||
sudo tar -C $(gorootDir) -xzf "$(LATEST_GO).darwin-amd64.tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Go on macOS
|
||||
|
||||
- powershell: |
|
||||
Write-Host "Downloading Go... (please be patient, I am very slow)"
|
||||
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(LATEST_GO).windows-amd64.zip", "$(LATEST_GO).windows-amd64.zip")
|
||||
Write-Host "Extracting Go... (I'm slow too)"
|
||||
Expand-Archive "$(LATEST_GO).windows-amd64.zip" -DestinationPath "$(gorootDir)"
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Go on Windows
|
||||
|
||||
# TODO: When this issue is fixed, replace with installer script:
|
||||
# https://github.com/golangci/golangci-lint/issues/472
|
||||
- script: go get -v github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
displayName: Install golangci-lint
|
||||
|
||||
- bash: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
displayName: Print Go version and environment
|
||||
|
||||
- script: |
|
||||
go get -v -t -d ./...
|
||||
golangci-lint run -E gofmt -E goimports -E misspell
|
||||
go test -race ./...
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Run tests
|
|
@ -1,310 +0,0 @@
|
|||
// Copyright 2015 Light Code Labs, LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// Init initializes this package so that it may
|
||||
// be used. Do not call this function more than
|
||||
// once. Init panics if it is called more than
|
||||
// once or if the UUID value is empty. Once this
|
||||
// function is called, the rest of the package
|
||||
// may safely be used. If this function is not
|
||||
// called, the collector functions may still be
|
||||
// invoked, but they will be no-ops.
|
||||
//
|
||||
// Any metrics keys that are passed in the second
|
||||
// argument will be permanently disabled for the
|
||||
// lifetime of the process.
|
||||
func Init(instanceID uuid.UUID, disabledMetricsKeys []string) {
|
||||
if enabled {
|
||||
panic("already initialized")
|
||||
}
|
||||
if str := instanceID.String(); str == "" ||
|
||||
str == "00000000-0000-0000-0000-000000000000" {
|
||||
panic("empty UUID")
|
||||
}
|
||||
instanceUUID = instanceID
|
||||
disabledMetricsMu.Lock()
|
||||
for _, key := range disabledMetricsKeys {
|
||||
disabledMetrics[strings.TrimSpace(key)] = false
|
||||
}
|
||||
disabledMetricsMu.Unlock()
|
||||
enabled = true
|
||||
}
|
||||
|
||||
// StartEmitting sends the current payload and begins the
|
||||
// transmission cycle for updates. This is the first
|
||||
// update sent, and future ones will be sent until
|
||||
// StopEmitting is called.
|
||||
//
|
||||
// This function is non-blocking (it spawns a new goroutine).
|
||||
//
|
||||
// This function panics if it was called more than once.
|
||||
// It is a no-op if this package was not initialized.
|
||||
func StartEmitting() {
|
||||
if !enabled {
|
||||
return
|
||||
}
|
||||
updateTimerMu.Lock()
|
||||
if updateTimer != nil {
|
||||
updateTimerMu.Unlock()
|
||||
panic("updates already started")
|
||||
}
|
||||
updateTimerMu.Unlock()
|
||||
updateMu.Lock()
|
||||
if updating {
|
||||
updateMu.Unlock()
|
||||
panic("update already in progress")
|
||||
}
|
||||
updateMu.Unlock()
|
||||
go logEmit(false)
|
||||
}
|
||||
|
||||
// StopEmitting sends the current payload and terminates
|
||||
// the update cycle. No more updates will be sent.
|
||||
//
|
||||
// It is a no-op if the package was never initialized
|
||||
// or if emitting was never started.
|
||||
//
|
||||
// NOTE: This function is blocking. Run in a goroutine if
|
||||
// you want to guarantee no blocking at critical times
|
||||
// like exiting the program.
|
||||
func StopEmitting() {
|
||||
if !enabled {
|
||||
return
|
||||
}
|
||||
updateTimerMu.Lock()
|
||||
if updateTimer == nil {
|
||||
updateTimerMu.Unlock()
|
||||
return
|
||||
}
|
||||
updateTimerMu.Unlock()
|
||||
logEmit(true) // likely too early; may take minutes to return
|
||||
}
|
||||
|
||||
// Reset empties the current payload buffer.
|
||||
func Reset() {
|
||||
resetBuffer()
|
||||
}
|
||||
|
||||
// Set puts a value in the buffer to be included
|
||||
// in the next emission. It overwrites any
|
||||
// previous value.
|
||||
//
|
||||
// This function is safe for multiple goroutines,
|
||||
// and it is recommended to call this using the
|
||||
// go keyword after the call to SendHello so it
|
||||
// doesn't block crucial code.
|
||||
func Set(key string, val interface{}) {
|
||||
if !enabled || isDisabled(key) {
|
||||
return
|
||||
}
|
||||
bufferMu.Lock()
|
||||
if _, ok := buffer[key]; !ok {
|
||||
if bufferItemCount >= maxBufferItems {
|
||||
bufferMu.Unlock()
|
||||
return
|
||||
}
|
||||
bufferItemCount++
|
||||
}
|
||||
buffer[key] = val
|
||||
bufferMu.Unlock()
|
||||
}
|
||||
|
||||
// SetNested puts a value in the buffer to be included
|
||||
// in the next emission, nested under the top-level key
|
||||
// as subkey. It overwrites any previous value.
|
||||
//
|
||||
// This function is safe for multiple goroutines,
|
||||
// and it is recommended to call this using the
|
||||
// go keyword after the call to SendHello so it
|
||||
// doesn't block crucial code.
|
||||
func SetNested(key, subkey string, val interface{}) {
|
||||
if !enabled || isDisabled(key) {
|
||||
return
|
||||
}
|
||||
bufferMu.Lock()
|
||||
if topLevel, ok1 := buffer[key]; ok1 {
|
||||
topLevelMap, ok2 := topLevel.(map[string]interface{})
|
||||
if !ok2 {
|
||||
bufferMu.Unlock()
|
||||
log.Printf("[PANIC] Telemetry: key %s is already used for non-nested-map value", key)
|
||||
return
|
||||
}
|
||||
if _, ok3 := topLevelMap[subkey]; !ok3 {
|
||||
// don't exceed max buffer size
|
||||
if bufferItemCount >= maxBufferItems {
|
||||
bufferMu.Unlock()
|
||||
return
|
||||
}
|
||||
bufferItemCount++
|
||||
}
|
||||
topLevelMap[subkey] = val
|
||||
} else {
|
||||
// don't exceed max buffer size
|
||||
if bufferItemCount >= maxBufferItems {
|
||||
bufferMu.Unlock()
|
||||
return
|
||||
}
|
||||
bufferItemCount++
|
||||
buffer[key] = map[string]interface{}{subkey: val}
|
||||
}
|
||||
bufferMu.Unlock()
|
||||
}
|
||||
|
||||
// Append appends value to a list named key.
|
||||
// If key is new, a new list will be created.
|
||||
// If key maps to a type that is not a list,
|
||||
// a panic is logged, and this is a no-op.
|
||||
func Append(key string, value interface{}) {
|
||||
if !enabled || isDisabled(key) {
|
||||
return
|
||||
}
|
||||
bufferMu.Lock()
|
||||
if bufferItemCount >= maxBufferItems {
|
||||
bufferMu.Unlock()
|
||||
return
|
||||
}
|
||||
// TODO: Test this...
|
||||
bufVal, inBuffer := buffer[key]
|
||||
sliceVal, sliceOk := bufVal.([]interface{})
|
||||
if inBuffer && !sliceOk {
|
||||
bufferMu.Unlock()
|
||||
log.Printf("[PANIC] Telemetry: key %s already used for non-slice value", key)
|
||||
return
|
||||
}
|
||||
if sliceVal == nil {
|
||||
buffer[key] = []interface{}{value}
|
||||
} else if sliceOk {
|
||||
buffer[key] = append(sliceVal, value)
|
||||
}
|
||||
bufferItemCount++
|
||||
bufferMu.Unlock()
|
||||
}
|
||||
|
||||
// AppendUnique adds value to a set named key.
|
||||
// Set items are unordered. Values in the set
|
||||
// are unique, but how many times they are
|
||||
// appended is counted. The value must be
|
||||
// hashable.
|
||||
//
|
||||
// If key is new, a new set will be created for
|
||||
// values with that key. If key maps to a type
|
||||
// that is not a counting set, a panic is logged,
|
||||
// and this is a no-op.
|
||||
func AppendUnique(key string, value interface{}) {
|
||||
if !enabled || isDisabled(key) {
|
||||
return
|
||||
}
|
||||
bufferMu.Lock()
|
||||
bufVal, inBuffer := buffer[key]
|
||||
setVal, setOk := bufVal.(countingSet)
|
||||
if inBuffer && !setOk {
|
||||
bufferMu.Unlock()
|
||||
log.Printf("[PANIC] Telemetry: key %s already used for non-counting-set value", key)
|
||||
return
|
||||
}
|
||||
if setVal == nil {
|
||||
// ensure the buffer is not too full, then add new unique value
|
||||
if bufferItemCount >= maxBufferItems {
|
||||
bufferMu.Unlock()
|
||||
return
|
||||
}
|
||||
buffer[key] = countingSet{value: 1}
|
||||
bufferItemCount++
|
||||
} else if setOk {
|
||||
// unique value already exists, so just increment counter
|
||||
setVal[value]++
|
||||
}
|
||||
bufferMu.Unlock()
|
||||
}
|
||||
|
||||
// Add adds amount to a value named key.
|
||||
// If it does not exist, it is created with
|
||||
// a value of 1. If key maps to a type that
|
||||
// is not an integer, a panic is logged,
|
||||
// and this is a no-op.
|
||||
func Add(key string, amount int) {
|
||||
atomicAdd(key, amount)
|
||||
}
|
||||
|
||||
// Increment is a shortcut for Add(key, 1)
|
||||
func Increment(key string) {
|
||||
atomicAdd(key, 1)
|
||||
}
|
||||
|
||||
// atomicAdd adds amount (negative to subtract)
|
||||
// to key.
|
||||
func atomicAdd(key string, amount int) {
|
||||
if !enabled || isDisabled(key) {
|
||||
return
|
||||
}
|
||||
bufferMu.Lock()
|
||||
bufVal, inBuffer := buffer[key]
|
||||
intVal, intOk := bufVal.(int)
|
||||
if inBuffer && !intOk {
|
||||
bufferMu.Unlock()
|
||||
log.Printf("[PANIC] Telemetry: key %s already used for non-integer value", key)
|
||||
return
|
||||
}
|
||||
if !inBuffer {
|
||||
if bufferItemCount >= maxBufferItems {
|
||||
bufferMu.Unlock()
|
||||
return
|
||||
}
|
||||
bufferItemCount++
|
||||
}
|
||||
buffer[key] = intVal + amount
|
||||
bufferMu.Unlock()
|
||||
}
|
||||
|
||||
// FastHash hashes input using a 32-bit hashing algorithm
|
||||
// that is fast, and returns the hash as a hex-encoded string.
|
||||
// Do not use this for cryptographic purposes.
|
||||
func FastHash(input []byte) string {
|
||||
h := fnv.New32a()
|
||||
if _, err := h.Write(input); err != nil {
|
||||
log.Println("[ERROR] failed to write bytes: ", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", h.Sum32())
|
||||
}
|
||||
|
||||
// isDisabled returns whether key is
|
||||
// a disabled metric key. ALL collection
|
||||
// functions should call this and not
|
||||
// save the value if this returns true.
|
||||
func isDisabled(key string) bool {
|
||||
// for keys that are augmented with data, such as
|
||||
// "tls_client_hello_ua:<hash>", just
|
||||
// check the prefix "tls_client_hello_ua"
|
||||
checkKey := key
|
||||
if idx := strings.Index(key, ":"); idx > -1 {
|
||||
checkKey = key[:idx]
|
||||
}
|
||||
|
||||
disabledMetricsMu.RLock()
|
||||
_, ok := disabledMetrics[checkKey]
|
||||
disabledMetricsMu.RUnlock()
|
||||
return ok
|
||||
}
|
|
@ -1,428 +0,0 @@
|
|||
// Copyright 2015 Light Code Labs, LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package telemetry implements the client for server-side telemetry
|
||||
// of the network. Functions in this package are synchronous and blocking
|
||||
// unless otherwise specified. For convenience, most functions here do
|
||||
// not return errors, but errors are logged to the standard logger.
|
||||
//
|
||||
// To use this package, first call Init(). You can then call any of the
|
||||
// collection/aggregation functions. Call StartEmitting() when you are
|
||||
// ready to begin sending telemetry updates.
|
||||
//
|
||||
// When collecting metrics (functions like Set, AppendUnique, or Increment),
|
||||
// it may be desirable and even recommended to invoke them in a new
|
||||
// goroutine in case there is lock contention; they are thread-safe (unless
|
||||
// noted), and you may not want them to block the main thread of execution.
|
||||
// However, sometimes blocking may be necessary too; for example, adding
|
||||
// startup metrics to the buffer before the call to StartEmitting().
|
||||
//
|
||||
// This package is designed to be as fast and space-efficient as reasonably
|
||||
// possible, so that it does not disrupt the flow of execution.
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// logEmit calls emit and then logs the error, if any.
|
||||
// See docs for emit.
|
||||
func logEmit(final bool) {
|
||||
err := emit(final)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Sending telemetry: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// emit sends an update to the telemetry server.
|
||||
// Set final to true if this is the last call to emit.
|
||||
// If final is true, no future updates will be scheduled.
|
||||
// Otherwise, the next update will be scheduled.
|
||||
func emit(final bool) error {
|
||||
if !enabled {
|
||||
return fmt.Errorf("telemetry not enabled")
|
||||
}
|
||||
|
||||
// some metrics are updated/set at time of emission
|
||||
setEmitTimeMetrics()
|
||||
|
||||
// ensure only one update happens at a time;
|
||||
// skip update if previous one still in progress
|
||||
updateMu.Lock()
|
||||
if updating {
|
||||
updateMu.Unlock()
|
||||
log.Println("[NOTICE] Skipping this telemetry update because previous one is still working")
|
||||
return nil
|
||||
}
|
||||
updating = true
|
||||
updateMu.Unlock()
|
||||
defer func() {
|
||||
updateMu.Lock()
|
||||
updating = false
|
||||
updateMu.Unlock()
|
||||
}()
|
||||
|
||||
// terminate any pending update if this is the last one
|
||||
if final {
|
||||
stopUpdateTimer()
|
||||
}
|
||||
|
||||
payloadBytes, err := makePayloadAndResetBuffer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// this will hold the server's reply
|
||||
var reply Response
|
||||
|
||||
// transmit the payload - use a loop to retry in case of failure
|
||||
for i := 0; i < 4; i++ {
|
||||
if i > 0 && err != nil {
|
||||
// don't hammer the server; first failure might have been
|
||||
// a fluke, but back off more after that
|
||||
log.Printf("[WARNING] Sending telemetry (attempt %d): %v - backing off and retrying", i, err)
|
||||
time.Sleep(time.Duration((i+1)*(i+1)*(i+1)) * time.Second)
|
||||
}
|
||||
|
||||
// send it
|
||||
var resp *http.Response
|
||||
resp, err = httpClient.Post(endpoint+instanceUUID.String(), "application/json", bytes.NewReader(payloadBytes))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// check for any special-case response codes
|
||||
if resp.StatusCode == http.StatusGone {
|
||||
// the endpoint has been deprecated and is no longer servicing clients
|
||||
err = fmt.Errorf("telemetry server replied with HTTP %d; upgrade required", resp.StatusCode)
|
||||
if clen := resp.Header.Get("Content-Length"); clen != "0" && clen != "" {
|
||||
bodyBytes, readErr := ioutil.ReadAll(resp.Body)
|
||||
if readErr != nil {
|
||||
log.Printf("[ERROR] Reading response body from server: %v", readErr)
|
||||
}
|
||||
err = fmt.Errorf("%v - %s", err, bodyBytes)
|
||||
}
|
||||
resp.Body.Close()
|
||||
reply.Stop = true
|
||||
break
|
||||
}
|
||||
if resp.StatusCode == http.StatusUnavailableForLegalReasons {
|
||||
// the endpoint is unavailable, at least to this client, for legal reasons (!)
|
||||
err = fmt.Errorf("telemetry server replied with HTTP %d %s: please consult the project website and developers for guidance", resp.StatusCode, resp.Status)
|
||||
if clen := resp.Header.Get("Content-Length"); clen != "0" && clen != "" {
|
||||
bodyBytes, readErr := ioutil.ReadAll(resp.Body)
|
||||
if readErr != nil {
|
||||
log.Printf("[ERROR] Reading response body from server: %v", readErr)
|
||||
}
|
||||
err = fmt.Errorf("%v - %s", err, bodyBytes)
|
||||
}
|
||||
resp.Body.Close()
|
||||
reply.Stop = true
|
||||
break
|
||||
}
|
||||
|
||||
// okay, ensure we can interpret the response
|
||||
if ct := resp.Header.Get("Content-Type"); (resp.StatusCode < 300 || resp.StatusCode >= 400) &&
|
||||
!strings.Contains(ct, "json") {
|
||||
err = fmt.Errorf("telemetry server replied with unknown content-type: '%s' and HTTP %s", ct, resp.Status)
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// read the response body
|
||||
err = json.NewDecoder(resp.Body).Decode(&reply)
|
||||
resp.Body.Close() // close response body as soon as we're done with it
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// update the list of enabled/disabled keys, if any
|
||||
for _, key := range reply.EnableKeys {
|
||||
disabledMetricsMu.Lock()
|
||||
// only re-enable this metric if it is temporarily disabled
|
||||
if temp, ok := disabledMetrics[key]; ok && temp {
|
||||
delete(disabledMetrics, key)
|
||||
}
|
||||
disabledMetricsMu.Unlock()
|
||||
}
|
||||
for _, key := range reply.DisableKeys {
|
||||
disabledMetricsMu.Lock()
|
||||
disabledMetrics[key] = true // all remotely-disabled keys are "temporarily" disabled
|
||||
disabledMetricsMu.Unlock()
|
||||
}
|
||||
|
||||
// make sure we didn't send the update too soon; if so,
|
||||
// just wait and try again -- this is a special case of
|
||||
// error that we handle differently, as you can see
|
||||
if resp.StatusCode == http.StatusTooManyRequests {
|
||||
if reply.NextUpdate <= 0 {
|
||||
raStr := resp.Header.Get("Retry-After")
|
||||
if ra, err := strconv.Atoi(raStr); err == nil {
|
||||
reply.NextUpdate = time.Duration(ra) * time.Second
|
||||
}
|
||||
}
|
||||
if !final {
|
||||
log.Printf("[NOTICE] Sending telemetry: we were too early; waiting %s before trying again", reply.NextUpdate)
|
||||
time.Sleep(reply.NextUpdate)
|
||||
continue
|
||||
}
|
||||
} else if resp.StatusCode >= 400 {
|
||||
err = fmt.Errorf("telemetry server returned status code %d", resp.StatusCode)
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
if err == nil && !final {
|
||||
// (remember, if there was an error, we return it
|
||||
// below, so it WILL get logged if it's supposed to)
|
||||
log.Println("[INFO] Sending telemetry: success")
|
||||
}
|
||||
|
||||
// even if there was an error after all retries, we should
|
||||
// schedule the next update using our default update
|
||||
// interval because the server might be healthy later
|
||||
|
||||
// ensure we won't slam the telemetry server; add a little variance
|
||||
if reply.NextUpdate < 1*time.Second {
|
||||
reply.NextUpdate = defaultUpdateInterval + time.Duration(rand.Int63n(int64(1*time.Minute)))
|
||||
}
|
||||
|
||||
// schedule the next update (if this wasn't the last one and
|
||||
// if the remote server didn't tell us to stop sending)
|
||||
if !final && !reply.Stop {
|
||||
updateTimerMu.Lock()
|
||||
updateTimer = time.AfterFunc(reply.NextUpdate, func() {
|
||||
logEmit(false)
|
||||
})
|
||||
updateTimerMu.Unlock()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func stopUpdateTimer() {
|
||||
updateTimerMu.Lock()
|
||||
updateTimer.Stop()
|
||||
updateTimer = nil
|
||||
updateTimerMu.Unlock()
|
||||
}
|
||||
|
||||
// setEmitTimeMetrics sets some metrics that should
|
||||
// be recorded just before emitting.
|
||||
func setEmitTimeMetrics() {
|
||||
Set("goroutines", runtime.NumGoroutine())
|
||||
|
||||
var mem runtime.MemStats
|
||||
runtime.ReadMemStats(&mem)
|
||||
SetNested("memory", "heap_alloc", mem.HeapAlloc)
|
||||
SetNested("memory", "sys", mem.Sys)
|
||||
}
|
||||
|
||||
// makePayloadAndResetBuffer prepares a payload
|
||||
// by emptying the collection buffer. It returns
|
||||
// the bytes of the payload to send to the server.
|
||||
// Since the buffer is reset by this, if the
|
||||
// resulting byte slice is lost, the payload is
|
||||
// gone with it.
|
||||
func makePayloadAndResetBuffer() ([]byte, error) {
|
||||
bufCopy := resetBuffer()
|
||||
|
||||
// encode payload in preparation for transmission
|
||||
payload := Payload{
|
||||
InstanceID: instanceUUID.String(),
|
||||
Timestamp: time.Now().UTC(),
|
||||
Data: bufCopy,
|
||||
}
|
||||
return json.Marshal(payload)
|
||||
}
|
||||
|
||||
// resetBuffer makes a local pointer to the buffer,
|
||||
// then resets the buffer by assigning to be a newly-
|
||||
// made value to clear it out, then sets the buffer
|
||||
// item count to 0. It returns the copied pointer to
|
||||
// the original map so the old buffer value can be
|
||||
// used locally.
|
||||
func resetBuffer() map[string]interface{} {
|
||||
bufferMu.Lock()
|
||||
bufCopy := buffer
|
||||
buffer = make(map[string]interface{})
|
||||
bufferItemCount = 0
|
||||
bufferMu.Unlock()
|
||||
return bufCopy
|
||||
}
|
||||
|
||||
// Response contains the body of a response from the
|
||||
// telemetry server.
|
||||
type Response struct {
|
||||
// NextUpdate is how long to wait before the next update.
|
||||
NextUpdate time.Duration `json:"next_update"`
|
||||
|
||||
// Stop instructs the telemetry server to stop sending
|
||||
// telemetry. This would only be done under extenuating
|
||||
// circumstances, but we are prepared for it nonetheless.
|
||||
Stop bool `json:"stop,omitempty"`
|
||||
|
||||
// Error will be populated with an error message, if any.
|
||||
// This field should be empty if the status code is < 400.
|
||||
Error string `json:"error,omitempty"`
|
||||
|
||||
// DisableKeys will contain a list of keys/metrics that
|
||||
// should NOT be sent until further notice. The client
|
||||
// must NOT store these items in its buffer or send them
|
||||
// to the telemetry server while they are disabled. If
|
||||
// this list and EnableKeys have the same value (which is
|
||||
// not supposed to happen), this field should dominate.
|
||||
DisableKeys []string `json:"disable_keys,omitempty"`
|
||||
|
||||
// EnableKeys will contain a list of keys/metrics that
|
||||
// MAY be sent until further notice.
|
||||
EnableKeys []string `json:"enable_keys,omitempty"`
|
||||
}
|
||||
|
||||
// Payload is the data that gets sent to the telemetry server.
|
||||
type Payload struct {
|
||||
// The universally unique ID of the instance
|
||||
InstanceID string `json:"instance_id"`
|
||||
|
||||
// The UTC timestamp of the transmission
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
|
||||
// The timestamp before which the next update is expected
|
||||
// (NOT populated by client - the server fills this in
|
||||
// before it stores the data)
|
||||
ExpectNext time.Time `json:"expect_next,omitempty"`
|
||||
|
||||
// The metrics
|
||||
Data map[string]interface{} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// Int returns the value of the data keyed by key
|
||||
// if it is an integer; otherwise it returns 0.
|
||||
func (p Payload) Int(key string) int {
|
||||
val, _ := p.Data[key]
|
||||
switch p.Data[key].(type) {
|
||||
case int:
|
||||
return val.(int)
|
||||
case float64: // after JSON-decoding, int becomes float64...
|
||||
return int(val.(float64))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// countingSet implements a set that counts how many
|
||||
// times a key is inserted. It marshals to JSON in a
|
||||
// way such that keys are converted to values next
|
||||
// to their associated counts.
|
||||
type countingSet map[interface{}]int
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
// It converts the set to an array so that the values
|
||||
// are JSON object values instead of keys, since keys
|
||||
// are difficult to query in databases.
|
||||
func (s countingSet) MarshalJSON() ([]byte, error) {
|
||||
type Item struct {
|
||||
Value interface{} `json:"value"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
var list []Item
|
||||
|
||||
for k, v := range s {
|
||||
list = append(list, Item{Value: k, Count: v})
|
||||
}
|
||||
|
||||
return json.Marshal(list)
|
||||
}
|
||||
|
||||
var (
|
||||
// httpClient should be used for HTTP requests. It
|
||||
// is configured with a timeout for reliability.
|
||||
httpClient = http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSHandshakeTimeout: 30 * time.Second,
|
||||
DisableKeepAlives: true,
|
||||
},
|
||||
Timeout: 1 * time.Minute,
|
||||
}
|
||||
|
||||
// buffer holds the data that we are building up to send.
|
||||
buffer = make(map[string]interface{})
|
||||
bufferItemCount = 0
|
||||
bufferMu sync.RWMutex // protects both the buffer and its count
|
||||
|
||||
// updating is used to ensure only one
|
||||
// update happens at a time.
|
||||
updating bool
|
||||
updateMu sync.Mutex
|
||||
|
||||
// updateTimer fires off the next update.
|
||||
// If no update is scheduled, this is nil.
|
||||
updateTimer *time.Timer
|
||||
updateTimerMu sync.Mutex
|
||||
|
||||
// disabledMetrics is a set of metric keys
|
||||
// that should NOT be saved to the buffer
|
||||
// or sent to the telemetry server. The value
|
||||
// indicates whether the entry is temporary.
|
||||
// If the value is true, it may be removed if
|
||||
// the metric is re-enabled remotely later. If
|
||||
// the value is false, it is permanent
|
||||
// (presumably because the user explicitly
|
||||
// disabled it) and can only be re-enabled
|
||||
// with user consent.
|
||||
disabledMetrics = make(map[string]bool)
|
||||
disabledMetricsMu sync.RWMutex
|
||||
|
||||
// instanceUUID is the ID of the current instance.
|
||||
// This MUST be set to emit telemetry.
|
||||
// This MUST NOT be openly exposed to clients, for privacy.
|
||||
instanceUUID uuid.UUID
|
||||
|
||||
// enabled indicates whether the package has
|
||||
// been initialized and can be actively used.
|
||||
enabled bool
|
||||
|
||||
// maxBufferItems is the maximum number of items we'll allow
|
||||
// in the buffer before we start dropping new ones, in a
|
||||
// rough (simple) attempt to keep memory use under control.
|
||||
maxBufferItems = 100000
|
||||
)
|
||||
|
||||
const (
|
||||
// endpoint is the base URL to remote telemetry server;
|
||||
// the instance ID will be appended to it.
|
||||
endpoint = "https://telemetry.caddyserver.com/v1/update/"
|
||||
|
||||
// defaultUpdateInterval is how long to wait before emitting
|
||||
// more telemetry data if all retires fail. This value is
|
||||
// only used if the client receives a nonsensical value, or
|
||||
// doesn't send one at all, or if a connection can't be made,
|
||||
// likely indicating a problem with the server. Thus, this
|
||||
// value should be a long duration to help alleviate extra
|
||||
// load on the server.
|
||||
defaultUpdateInterval = 1 * time.Hour
|
||||
)
|
|
@ -1,24 +1,5 @@
|
|||
<p align="center">
|
||||
<a href="https://caddyserver.com"><img src="https://user-images.githubusercontent.com/1128849/36338535-05fb646a-136f-11e8-987b-e6901e717d5a.png" alt="Caddy" width="450"></a>
|
||||
</p>
|
||||
<h3 align="center">Every Site on HTTPS <!-- Serve Confidently --></h3>
|
||||
<p align="center">Caddy is a general-purpose HTTP/2 web server that serves HTTPS by default.</p>
|
||||
<p align="center">
|
||||
<a href="https://dev.azure.com/mholt-dev/Caddy/_build?definitionId=5"><img src="https://img.shields.io/azure-devops/build/mholt-dev/afec6074-9842-457f-98cf-69df6adbbf2e/5/master.svg?label=cross-platform%20tests"></a>
|
||||
<a href="https://godoc.org/github.com/caddyserver/caddy"><img src="https://img.shields.io/badge/godoc-reference-blue.svg"></a>
|
||||
<a href="https://goreportcard.com/report/caddyserver/caddy"><img src="https://goreportcard.com/badge/github.com/caddyserver/caddy"></a>
|
||||
<br>
|
||||
<a href="https://twitter.com/caddyserver" title="@caddyserver on Twitter"><img src="https://img.shields.io/badge/twitter-@caddyserver-55acee.svg" alt="@caddyserver on Twitter"></a>
|
||||
<a href="https://caddy.community" title="Caddy Forum"><img src="https://img.shields.io/badge/community-forum-ff69b4.svg" alt="Caddy Forum"></a>
|
||||
<a href="https://sourcegraph.com/github.com/caddyserver/caddy?badge" title="Caddy on Sourcegraph"><img src="https://sourcegraph.com/github.com/caddyserver/caddy/-/badge.svg" alt="Caddy on Sourcegraph"></a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://caddyserver.com/download">Download</a> ·
|
||||
<a href="https://caddyserver.com/docs">Documentation</a> ·
|
||||
<a href="https://caddy.community">Community</a>
|
||||
</p>
|
||||
THIS IS A FORK OF CADDY v1 - EVERYTHING IS STRIPPED EXCEPT THE PIECES NEEDED IN COREDNS.
|
||||
|
||||
---
|
||||
|
||||
Caddy is a **production-ready** open-source web server that is fast, easy to use, and makes you more productive.
|
||||
|
||||
|
@ -51,7 +32,7 @@ Available for Windows, Mac, Linux, BSD, Solaris, and [Android](https://github.co
|
|||
- **Extensible with plugins** because a convenient web server is a helpful one
|
||||
- **Runs anywhere** with **no external dependencies** (not even libc)
|
||||
|
||||
[See a more complete list of features built into Caddy.](https://caddyserver.com/features) On top of all those, Caddy does even more with plugins: choose which plugins you want at [download](https://caddyserver.com/download).
|
||||
[See a more complete list of features built into Caddy.](https://caddyserver.com/#features) On top of all those, Caddy does even more with plugins: choose which plugins you want at [download](https://caddyserver.com/download).
|
||||
|
||||
Altogether, Caddy can do things other web servers simply cannot do. Its features and plugins save you time and mistakes, and will cheer you up. Your Caddy instance takes care of the details for you!
|
||||
|
18
vendor/github.com/caddyserver/caddy/caddy.go → vendor/github.com/coredns/caddy/caddy.go
generated
vendored
18
vendor/github.com/caddyserver/caddy/caddy.go → vendor/github.com/coredns/caddy/caddy.go
generated
vendored
|
@ -20,7 +20,7 @@
|
|||
// 2. Call LoadCaddyfile() to get the Caddyfile.
|
||||
// Pass in the name of the server type (like "http").
|
||||
// Make sure the server type's package is imported
|
||||
// (import _ "github.com/caddyserver/caddy/caddyhttp").
|
||||
// (import _ "github.com/coredns/caddy/caddyhttp").
|
||||
// 3. Call caddy.Start() to start Caddy. You get back
|
||||
// an Instance, on which you can call Restart() to
|
||||
// restart it or Stop() to stop it.
|
||||
|
@ -43,8 +43,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/caddyserver/caddy/caddyfile"
|
||||
"github.com/caddyserver/caddy/telemetry"
|
||||
"github.com/coredns/caddy/caddyfile"
|
||||
)
|
||||
|
||||
// Configurable application parameters
|
||||
|
@ -600,12 +599,6 @@ func ValidateAndExecuteDirectives(cdyfile Input, inst *Instance, justValidate bo
|
|||
return err
|
||||
}
|
||||
|
||||
for _, sb := range sblocks {
|
||||
for dir := range sb.Tokens {
|
||||
telemetry.AppendUnique("directives", dir)
|
||||
}
|
||||
}
|
||||
|
||||
inst.context = stype.NewContext(inst)
|
||||
if inst.context == nil {
|
||||
return fmt.Errorf("server type %s produced a nil Context", stypeName)
|
||||
|
@ -616,8 +609,6 @@ func ValidateAndExecuteDirectives(cdyfile Input, inst *Instance, justValidate bo
|
|||
return fmt.Errorf("error inspecting server blocks: %v", err)
|
||||
}
|
||||
|
||||
telemetry.Set("num_server_blocks", len(sblocks))
|
||||
|
||||
return executeDirectives(inst, cdyfile.Path(), stype.Directives(), sblocks, justValidate)
|
||||
}
|
||||
|
||||
|
@ -657,6 +648,11 @@ func executeDirectives(inst *Instance, filename string,
|
|||
ServerBlockStorage: storages[i][dir],
|
||||
}
|
||||
|
||||
// only set up directives for the first key in a block
|
||||
if j > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
setup, err := DirectiveAction(inst.serverType, dir)
|
||||
if err != nil {
|
||||
return err
|
|
@ -48,6 +48,9 @@ func (l *lexer) load(input io.Reader) error {
|
|||
// discard byte order mark, if present
|
||||
firstCh, _, err := l.reader.ReadRune()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if firstCh != 0xFEFF {
|
|
@ -17,7 +17,7 @@ package caddy
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/caddyserver/caddy/caddyfile"
|
||||
"github.com/coredns/caddy/caddyfile"
|
||||
)
|
||||
|
||||
// Controller is given to the setup function of directives which
|
|
@ -21,7 +21,7 @@ import (
|
|||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/caddyserver/caddy/caddyfile"
|
||||
"github.com/coredns/caddy/caddyfile"
|
||||
)
|
||||
|
||||
// These are all the registered plugins.
|
|
@ -19,8 +19,6 @@ import (
|
|||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
|
||||
"github.com/caddyserver/caddy/telemetry"
|
||||
)
|
||||
|
||||
// TrapSignals create signal handlers for all applicable signals for this
|
||||
|
@ -54,9 +52,6 @@ func trapSignalsCrossPlatform() {
|
|||
|
||||
log.Println("[INFO] SIGINT: Shutting down")
|
||||
|
||||
telemetry.AppendUnique("sigtrap", "SIGINT")
|
||||
go telemetry.StopEmitting() // not guaranteed to finish in time; that's OK (just don't block!)
|
||||
|
||||
// important cleanup actions before shutdown callbacks
|
||||
for _, f := range OnProcessExit {
|
||||
f()
|
|
@ -21,8 +21,6 @@ import (
|
|||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/caddyserver/caddy/telemetry"
|
||||
)
|
||||
|
||||
// trapSignalsPosix captures POSIX-only signals.
|
||||
|
@ -52,14 +50,10 @@ func trapSignalsPosix() {
|
|||
exitCode = 3
|
||||
}
|
||||
|
||||
telemetry.AppendUnique("sigtrap", "SIGTERM")
|
||||
go telemetry.StopEmitting() // won't finish in time, but that's OK - just don't block
|
||||
|
||||
os.Exit(exitCode)
|
||||
|
||||
case syscall.SIGUSR1:
|
||||
log.Println("[INFO] SIGUSR1: Reloading")
|
||||
go telemetry.AppendUnique("sigtrap", "SIGUSR1")
|
||||
|
||||
// Start with the existing Caddyfile
|
||||
caddyfileToUse, inst, err := getCurrentCaddyfile()
|
||||
|
@ -100,14 +94,12 @@ func trapSignalsPosix() {
|
|||
|
||||
case syscall.SIGUSR2:
|
||||
log.Println("[INFO] SIGUSR2: Upgrading")
|
||||
go telemetry.AppendUnique("sigtrap", "SIGUSR2")
|
||||
if err := Upgrade(); err != nil {
|
||||
log.Printf("[ERROR] SIGUSR2: upgrading: %v", err)
|
||||
}
|
||||
|
||||
case syscall.SIGHUP:
|
||||
// ignore; this signal is sometimes sent outside of the user's control
|
||||
go telemetry.AppendUnique("sigtrap", "SIGHUP")
|
||||
}
|
||||
}
|
||||
}()
|
|
@ -4,20 +4,13 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/pkg/parse"
|
||||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
type zoneAddr struct {
|
||||
Zone string
|
||||
Port string
|
||||
Transport string // dns, tls or grpc
|
||||
IPNet *net.IPNet // if reverse zone this hold the IPNet
|
||||
Address string // used for bound zoneAddr - validation of overlapping
|
||||
Transport string // dns, tls or grpc
|
||||
Address string // used for bound zoneAddr - validation of overlapping
|
||||
}
|
||||
|
||||
// String returns the string representation of z.
|
||||
|
@ -29,32 +22,6 @@ func (z zoneAddr) String() string {
|
|||
return s
|
||||
}
|
||||
|
||||
// normalizeZone parses a zone string into a structured format with separate
|
||||
// host, and port portions, as well as the original input string.
|
||||
func normalizeZone(str string) (zoneAddr, error) {
|
||||
trans, str := parse.Transport(str)
|
||||
|
||||
host, port, ipnet, err := plugin.SplitHostPort(str)
|
||||
if err != nil {
|
||||
return zoneAddr{}, err
|
||||
}
|
||||
|
||||
if port == "" {
|
||||
switch trans {
|
||||
case transport.DNS:
|
||||
port = Port
|
||||
case transport.TLS:
|
||||
port = transport.TLSPort
|
||||
case transport.GRPC:
|
||||
port = transport.GRPCPort
|
||||
case transport.HTTPS:
|
||||
port = transport.HTTPSPort
|
||||
}
|
||||
}
|
||||
|
||||
return zoneAddr{Zone: dns.Fqdn(host), Port: port, Transport: trans, IPNet: ipnet}, nil
|
||||
}
|
||||
|
||||
// SplitProtocolHostPort splits a full formed address like "dns://[::1]:53" into parts.
|
||||
func SplitProtocolHostPort(address string) (protocol string, ip string, port string, err error) {
|
||||
parts := strings.Split(address, "://")
|
||||
|
|
|
@ -3,10 +3,10 @@ package dnsserver
|
|||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/plugin"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
)
|
||||
|
||||
// Config configuration for a single server.
|
||||
|
@ -32,10 +32,10 @@ type Config struct {
|
|||
// DNS-over-TLS or DNS-over-gRPC.
|
||||
Transport string
|
||||
|
||||
// If this function is not nil it will be used to further filter access
|
||||
// to this handler. The primary use is to limit access to a reverse zone
|
||||
// on a non-octet boundary, i.e. /17
|
||||
FilterFunc func(string) bool
|
||||
// If this function is not nil it will be used to inspect and validate
|
||||
// HTTP requests. Although this isn't referenced in-tree, external plugins
|
||||
// may depend on it.
|
||||
HTTPRequestValidateFunc func(*http.Request) bool
|
||||
|
||||
// TLSConfig when listening for encrypted connections (gRPC, DNS-over-TLS).
|
||||
TLSConfig *tls.Config
|
||||
|
@ -50,9 +50,13 @@ type Config struct {
|
|||
// on them should register themselves here. The name should be the name as return by the
|
||||
// Handler's Name method.
|
||||
registry map[string]plugin.Handler
|
||||
|
||||
// firstConfigInBlock is used to reference the first config in a server block, for the
|
||||
// purpose of sharing single instance of each plugin among all zones in a server block.
|
||||
firstConfigInBlock *Config
|
||||
}
|
||||
|
||||
// keyForConfig build a key for identifying the configs during setup time
|
||||
// keyForConfig builds a key for identifying the configs during setup time
|
||||
func keyForConfig(blocIndex int, blocKeyIndex int) string {
|
||||
return fmt.Sprintf("%d:%d", blocIndex, blocKeyIndex)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package dnsserver
|
|||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/coredns/coredns/plugin/pkg/nonwriter"
|
||||
)
|
||||
|
@ -14,6 +15,9 @@ type DoHWriter struct {
|
|||
raddr net.Addr
|
||||
// laddr is our address. This can be optionally set.
|
||||
laddr net.Addr
|
||||
|
||||
// request is the HTTP request we're currently handling.
|
||||
request *http.Request
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote address.
|
||||
|
@ -21,3 +25,6 @@ func (d *DoHWriter) RemoteAddr() net.Addr { return d.raddr }
|
|||
|
||||
// LocalAddr returns the local address.
|
||||
func (d *DoHWriter) LocalAddr() net.Addr { return d.laddr }
|
||||
|
||||
// Request returns the HTTP request
|
||||
func (d *DoHWriter) Request() *http.Request { return d.request }
|
||||
|
|
|
@ -1,14 +1,25 @@
|
|||
package dnsserver
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// startUpZones create the text that we show when starting up:
|
||||
// startUpZones creates the text that we show when starting up:
|
||||
// grpc://example.com.:1055
|
||||
// example.com.:1053 on 127.0.0.1
|
||||
func startUpZones(protocol, addr string, zones map[string]*Config) string {
|
||||
s := ""
|
||||
|
||||
for zone := range zones {
|
||||
keys := make([]string, len(zones))
|
||||
i := 0
|
||||
for k := range zones {
|
||||
keys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, zone := range keys {
|
||||
// split addr into protocol, IP and Port
|
||||
_, ip, port, err := SplitProtocolHostPort(addr)
|
||||
|
||||
|
|
|
@ -4,16 +4,15 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/caddy/caddyfile"
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/pkg/dnsutil"
|
||||
"github.com/coredns/coredns/plugin/pkg/parse"
|
||||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
"github.com/caddyserver/caddy/caddyfile"
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
const serverType = "dns"
|
||||
|
@ -22,6 +21,7 @@ const serverType = "dns"
|
|||
// wise they potentially clash with other server types.
|
||||
func init() {
|
||||
flag.StringVar(&Port, serverType+".port", DefaultPort, "Default port")
|
||||
flag.StringVar(&Port, "p", DefaultPort, "Default port")
|
||||
|
||||
caddy.RegisterServerType(serverType, caddy.ServerType{
|
||||
Directives: func() []string { return Directives },
|
||||
|
@ -61,11 +61,57 @@ var _ caddy.Context = &dnsContext{}
|
|||
func (h *dnsContext) InspectServerBlocks(sourceFile string, serverBlocks []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) {
|
||||
// Normalize and check all the zone names and check for duplicates
|
||||
for ib, s := range serverBlocks {
|
||||
// Walk the s.Keys and expand any reverse address in their proper DNS in-addr zones. If the expansions leads for
|
||||
// more than one reverse zone, replace the current value and add the rest to s.Keys.
|
||||
zoneAddrs := []zoneAddr{}
|
||||
for ik, k := range s.Keys {
|
||||
za, err := normalizeZone(k)
|
||||
trans, k1 := parse.Transport(k) // get rid of any dns:// or other scheme.
|
||||
hosts, port, err := plugin.SplitHostPort(k1)
|
||||
// We need to make this a fully qualified domain name to catch all errors here and not later when
|
||||
// plugin.Normalize is called again on these strings, with the prime difference being that the domain
|
||||
// name is fully qualified. This was found by fuzzing where "ȶ" is deemed OK, but "ȶ." is not (might be a
|
||||
// bug in miekg/dns actually). But here we were checking ȶ, which is OK, and later we barf in ȶ. leading to
|
||||
// "index out of range".
|
||||
for ih := range hosts {
|
||||
_, _, err := plugin.SplitHostPort(dns.Fqdn(hosts[ih]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if port == "" {
|
||||
switch trans {
|
||||
case transport.DNS:
|
||||
port = Port
|
||||
case transport.TLS:
|
||||
port = transport.TLSPort
|
||||
case transport.GRPC:
|
||||
port = transport.GRPCPort
|
||||
case transport.HTTPS:
|
||||
port = transport.HTTPSPort
|
||||
}
|
||||
}
|
||||
|
||||
if len(hosts) > 1 {
|
||||
s.Keys[ik] = hosts[0] + ":" + port // replace for the first
|
||||
for _, h := range hosts[1:] { // add the rest
|
||||
s.Keys = append(s.Keys, h+":"+port)
|
||||
}
|
||||
}
|
||||
for i := range hosts {
|
||||
zoneAddrs = append(zoneAddrs, zoneAddr{Zone: dns.Fqdn(hosts[i]), Port: port, Transport: trans})
|
||||
}
|
||||
}
|
||||
|
||||
serverBlocks[ib].Keys = s.Keys // important to save back the new keys that are potentially created here.
|
||||
|
||||
var firstConfigInBlock *Config
|
||||
|
||||
for ik := range s.Keys {
|
||||
za := zoneAddrs[ik]
|
||||
s.Keys[ik] = za.String()
|
||||
// Save the config to our master list, and key it for lookups.
|
||||
cfg := &Config{
|
||||
|
@ -74,23 +120,16 @@ func (h *dnsContext) InspectServerBlocks(sourceFile string, serverBlocks []caddy
|
|||
Port: za.Port,
|
||||
Transport: za.Transport,
|
||||
}
|
||||
keyConfig := keyForConfig(ib, ik)
|
||||
if za.IPNet == nil {
|
||||
h.saveConfig(keyConfig, cfg)
|
||||
continue
|
||||
}
|
||||
|
||||
ones, bits := za.IPNet.Mask.Size()
|
||||
if (bits-ones)%8 != 0 { // only do this for non-octet boundaries
|
||||
cfg.FilterFunc = func(s string) bool {
|
||||
// TODO(miek): strings.ToLower! Slow and allocates new string.
|
||||
addr := dnsutil.ExtractAddressFromReverse(strings.ToLower(s))
|
||||
if addr == "" {
|
||||
return true
|
||||
}
|
||||
return za.IPNet.Contains(net.ParseIP(addr))
|
||||
}
|
||||
// Set reference to the first config in the current block.
|
||||
// This is used later by MakeServers to share a single plugin list
|
||||
// for all zones in a server block.
|
||||
if ik == 0 {
|
||||
firstConfigInBlock = cfg
|
||||
}
|
||||
cfg.firstConfigInBlock = firstConfigInBlock
|
||||
|
||||
keyConfig := keyForConfig(ib, ik)
|
||||
h.saveConfig(keyConfig, cfg)
|
||||
}
|
||||
}
|
||||
|
@ -107,6 +146,17 @@ func (h *dnsContext) MakeServers() ([]caddy.Server, error) {
|
|||
return nil, errValid
|
||||
}
|
||||
|
||||
// Copy the Plugin, ListenHosts and Debug from first config in the block
|
||||
// to all other config in the same block . Doing this results in zones
|
||||
// sharing the same plugin instances and settings as other zones in
|
||||
// the same block.
|
||||
for _, c := range h.configs {
|
||||
c.Plugin = c.firstConfigInBlock.Plugin
|
||||
c.ListenHosts = c.firstConfigInBlock.ListenHosts
|
||||
c.Debug = c.firstConfigInBlock.Debug
|
||||
c.TLSConfig = c.firstConfigInBlock.TLSConfig
|
||||
}
|
||||
|
||||
// we must map (group) each config to a bind address
|
||||
groups, err := groupConfigsByListenAddr(h.configs)
|
||||
if err != nil {
|
||||
|
@ -223,7 +273,6 @@ func (h *dnsContext) validateZonesAndListeningAddresses() error {
|
|||
// address (what you pass into net.Listen) to the list of site configs.
|
||||
// This function does NOT vet the configs to ensure they are compatible.
|
||||
func groupConfigsByListenAddr(configs []*Config) (map[string][]*Config, error) {
|
||||
|
||||
groups := make(map[string][]*Config)
|
||||
for _, conf := range configs {
|
||||
for _, h := range conf.ListenHosts {
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/metrics/vars"
|
||||
"github.com/coredns/coredns/plugin/pkg/edns"
|
||||
|
@ -20,7 +21,6 @@ import (
|
|||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
"github.com/coredns/coredns/request"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
"github.com/miekg/dns"
|
||||
ot "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
@ -66,10 +66,6 @@ func NewServer(addr string, group []*Config) (*Server, error) {
|
|||
if site.Debug {
|
||||
s.debug = true
|
||||
log.D.Set()
|
||||
} else {
|
||||
// When reloading we need to explicitly disable debug logging if it is now disabled.
|
||||
s.debug = false
|
||||
log.D.Clear()
|
||||
}
|
||||
// set the config per zone
|
||||
s.zones[site.Zone] = site
|
||||
|
@ -97,6 +93,11 @@ func NewServer(addr string, group []*Config) (*Server, error) {
|
|||
site.pluginChain = stack
|
||||
}
|
||||
|
||||
if !s.debug {
|
||||
// When reloading we need to explicitly disable debug logging if it is now disabled.
|
||||
log.D.Clear()
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
@ -109,6 +110,7 @@ func (s *Server) Serve(l net.Listener) error {
|
|||
s.m.Lock()
|
||||
s.server[tcp] = &dns.Server{Listener: l, Net: "tcp", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
ctx := context.WithValue(context.Background(), Key{}, s)
|
||||
ctx = context.WithValue(ctx, LoopKey{}, 0)
|
||||
s.ServeDNS(ctx, w, r)
|
||||
})}
|
||||
s.m.Unlock()
|
||||
|
@ -122,6 +124,7 @@ func (s *Server) ServePacket(p net.PacketConn) error {
|
|||
s.m.Lock()
|
||||
s.server[udp] = &dns.Server{PacketConn: p, Net: "udp", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
ctx := context.WithValue(context.Background(), Key{}, s)
|
||||
ctx = context.WithValue(ctx, LoopKey{}, 0)
|
||||
s.ServeDNS(ctx, w, r)
|
||||
})}
|
||||
s.m.Unlock()
|
||||
|
@ -193,7 +196,7 @@ func (s *Server) Stop() (err error) {
|
|||
// Address together with Stop() implement caddy.GracefulServer.
|
||||
func (s *Server) Address() string { return s.Addr }
|
||||
|
||||
// ServeDNS is the entry point for every request to the address that s
|
||||
// ServeDNS is the entry point for every request to the address that
|
||||
// is bound to. It acts as a multiplexer for the requests zonename as
|
||||
// defined in the request so that the correct zone
|
||||
// (configuration and plugin stack) will handle the request.
|
||||
|
@ -210,7 +213,7 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
|
|||
// In case the user doesn't enable error plugin, we still
|
||||
// need to make sure that we stay alive up here
|
||||
if rec := recover(); rec != nil {
|
||||
log.Errorf("Recovered from panic in server: %q", s.Addr)
|
||||
log.Errorf("Recovered from panic in server: %q %v", s.Addr, rec)
|
||||
vars.Panic.Inc()
|
||||
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)
|
||||
}
|
||||
|
@ -239,23 +242,16 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
|
|||
|
||||
for {
|
||||
if h, ok := s.zones[q[off:]]; ok {
|
||||
if h.pluginChain == nil { // zone defined, but has not got any plugins
|
||||
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)
|
||||
return
|
||||
}
|
||||
if r.Question[0].Qtype != dns.TypeDS {
|
||||
if h.FilterFunc == nil {
|
||||
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
|
||||
if !plugin.ClientWrite(rcode) {
|
||||
errorFunc(s.Addr, w, r, rcode)
|
||||
}
|
||||
return
|
||||
}
|
||||
// FilterFunc is set, call it to see if we should use this handler.
|
||||
// This is given to full query name.
|
||||
if h.FilterFunc(q) {
|
||||
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
|
||||
if !plugin.ClientWrite(rcode) {
|
||||
errorFunc(s.Addr, w, r, rcode)
|
||||
}
|
||||
return
|
||||
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
|
||||
if !plugin.ClientWrite(rcode) {
|
||||
errorFunc(s.Addr, w, r, rcode)
|
||||
}
|
||||
return
|
||||
}
|
||||
// The type is DS, keep the handler, but keep on searching as maybe we are serving
|
||||
// the parent as well and the DS should be routed to it - this will probably *misroute* DS
|
||||
|
@ -332,7 +328,7 @@ func errorAndMetricsFunc(server string, w dns.ResponseWriter, r *dns.Msg, rc int
|
|||
answer.SetRcode(r, rc)
|
||||
state.SizeAndDo(answer)
|
||||
|
||||
vars.Report(server, state, vars.Dropped, rcode.ToString(rc), answer.Len(), time.Now())
|
||||
vars.Report(server, state, vars.Dropped, rcode.ToString(rc), "" /* plugin */, answer.Len(), time.Now())
|
||||
|
||||
w.WriteMsg(answer)
|
||||
}
|
||||
|
@ -342,8 +338,13 @@ const (
|
|||
udp = 1
|
||||
)
|
||||
|
||||
// Key is the context key for the current server added to the context.
|
||||
type Key struct{}
|
||||
type (
|
||||
// Key is the context key for the current server added to the context.
|
||||
Key struct{}
|
||||
|
||||
// LoopKey is the context key to detect server wide loops.
|
||||
LoopKey struct{}
|
||||
)
|
||||
|
||||
// EnableChaos is a map with plugin names for which we should open CH class queries as we block these by default.
|
||||
var EnableChaos = map[string]struct{}{
|
||||
|
|
|
@ -7,11 +7,11 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/pb"
|
||||
"github.com/coredns/coredns/plugin/pkg/reuseport"
|
||||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
@ -34,12 +34,17 @@ func NewServergRPC(addr string, group []*Config) (*ServergRPC, error) {
|
|||
return nil, err
|
||||
}
|
||||
// The *tls* plugin must make sure that multiple conflicting
|
||||
// TLS configuration return an error: it can only be specified once.
|
||||
// TLS configuration returns an error: it can only be specified once.
|
||||
var tlsConfig *tls.Config
|
||||
for _, conf := range s.zones {
|
||||
// Should we error if some configs *don't* have TLS?
|
||||
tlsConfig = conf.TLSConfig
|
||||
}
|
||||
// http/2 is required when using gRPC. We need to specify it in next protos
|
||||
// or the upgrade won't happen.
|
||||
if tlsConfig != nil {
|
||||
tlsConfig.NextProtos = []string{"h2"}
|
||||
}
|
||||
|
||||
return &ServergRPC{Server: s, tlsConfig: tlsConfig}, nil
|
||||
}
|
||||
|
@ -134,6 +139,7 @@ func (s *ServergRPC) Query(ctx context.Context, in *pb.DnsPacket) (*pb.DnsPacket
|
|||
w := &gRPCresponse{localAddr: s.listenAddr, remoteAddr: a, Msg: msg}
|
||||
|
||||
dnsCtx := context.WithValue(ctx, Key{}, s.Server)
|
||||
dnsCtx = context.WithValue(dnsCtx, LoopKey{}, 0)
|
||||
s.ServeDNS(dnsCtx, w, msg)
|
||||
|
||||
packed, err := w.Msg.Pack()
|
||||
|
|
|
@ -9,38 +9,60 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/plugin/pkg/dnsutil"
|
||||
"github.com/coredns/coredns/plugin/pkg/doh"
|
||||
"github.com/coredns/coredns/plugin/pkg/response"
|
||||
"github.com/coredns/coredns/plugin/pkg/reuseport"
|
||||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
)
|
||||
|
||||
// ServerHTTPS represents an instance of a DNS-over-HTTPS server.
|
||||
type ServerHTTPS struct {
|
||||
*Server
|
||||
httpsServer *http.Server
|
||||
listenAddr net.Addr
|
||||
tlsConfig *tls.Config
|
||||
httpsServer *http.Server
|
||||
listenAddr net.Addr
|
||||
tlsConfig *tls.Config
|
||||
validRequest func(*http.Request) bool
|
||||
}
|
||||
|
||||
// NewServerHTTPS returns a new CoreDNS GRPC server and compiles all plugins in to it.
|
||||
// NewServerHTTPS returns a new CoreDNS HTTPS server and compiles all plugins in to it.
|
||||
func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) {
|
||||
s, err := NewServer(addr, group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The *tls* plugin must make sure that multiple conflicting
|
||||
// TLS configuration return an error: it can only be specified once.
|
||||
// TLS configuration returns an error: it can only be specified once.
|
||||
var tlsConfig *tls.Config
|
||||
for _, conf := range s.zones {
|
||||
// Should we error if some configs *don't* have TLS?
|
||||
tlsConfig = conf.TLSConfig
|
||||
}
|
||||
|
||||
sh := &ServerHTTPS{Server: s, tlsConfig: tlsConfig, httpsServer: new(http.Server)}
|
||||
// http/2 is recommended when using DoH. We need to specify it in next protos
|
||||
// or the upgrade won't happen.
|
||||
if tlsConfig != nil {
|
||||
tlsConfig.NextProtos = []string{"h2", "http/1.1"}
|
||||
}
|
||||
|
||||
// Use a custom request validation func or use the standard DoH path check.
|
||||
var validator func(*http.Request) bool
|
||||
for _, conf := range s.zones {
|
||||
validator = conf.HTTPRequestValidateFunc
|
||||
}
|
||||
if validator == nil {
|
||||
validator = func(r *http.Request) bool { return r.URL.Path == doh.Path }
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 120 * time.Second,
|
||||
}
|
||||
sh := &ServerHTTPS{
|
||||
Server: s, tlsConfig: tlsConfig, httpsServer: srv, validRequest: validator,
|
||||
}
|
||||
sh.httpsServer.Handler = sh
|
||||
|
||||
return sh, nil
|
||||
|
@ -104,7 +126,7 @@ func (s *ServerHTTPS) Stop() error {
|
|||
// chain, converts it back and write it to the client.
|
||||
func (s *ServerHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if r.URL.Path != doh.Path {
|
||||
if !s.validRequest(r) {
|
||||
http.Error(w, "", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
@ -118,11 +140,16 @@ func (s *ServerHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
// Create a DoHWriter with the correct addresses in it.
|
||||
h, p, _ := net.SplitHostPort(r.RemoteAddr)
|
||||
port, _ := strconv.Atoi(p)
|
||||
dw := &DoHWriter{laddr: s.listenAddr, raddr: &net.TCPAddr{IP: net.ParseIP(h), Port: port}}
|
||||
dw := &DoHWriter{
|
||||
laddr: s.listenAddr,
|
||||
raddr: &net.TCPAddr{IP: net.ParseIP(h), Port: port},
|
||||
request: r,
|
||||
}
|
||||
|
||||
// We just call the normal chain handler - all error handling is done there.
|
||||
// We should expect a packet to be returned that we can send to the client.
|
||||
ctx := context.WithValue(context.Background(), Key{}, s.Server)
|
||||
ctx = context.WithValue(ctx, LoopKey{}, 0)
|
||||
s.ServeDNS(ctx, dw, msg)
|
||||
|
||||
// See section 4.2.1 of RFC 8484.
|
||||
|
|
|
@ -6,10 +6,10 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/plugin/pkg/reuseport"
|
||||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
|
@ -26,7 +26,7 @@ func NewServerTLS(addr string, group []*Config) (*ServerTLS, error) {
|
|||
return nil, err
|
||||
}
|
||||
// The *tls* plugin must make sure that multiple conflicting
|
||||
// TLS configuration return an error: it can only be specified once.
|
||||
// TLS configuration returns an error: it can only be specified once.
|
||||
var tlsConfig *tls.Config
|
||||
for _, conf := range s.zones {
|
||||
// Should we error if some configs *don't* have TLS?
|
||||
|
@ -50,6 +50,7 @@ func (s *ServerTLS) Serve(l net.Listener) error {
|
|||
// Only fill out the TCP server for this one.
|
||||
s.server[tcp] = &dns.Server{Listener: l, Net: "tcp-tls", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
ctx := context.WithValue(context.Background(), Key{}, s.Server)
|
||||
ctx = context.WithValue(ctx, LoopKey{}, 0)
|
||||
s.ServeDNS(ctx, w, r)
|
||||
})}
|
||||
s.m.Unlock()
|
||||
|
|
|
@ -11,6 +11,7 @@ package dnsserver
|
|||
// care what plugin above them are doing.
|
||||
var Directives = []string{
|
||||
"metadata",
|
||||
"geoip",
|
||||
"cancel",
|
||||
"tls",
|
||||
"reload",
|
||||
|
@ -27,6 +28,7 @@ var Directives = []string{
|
|||
"errors",
|
||||
"log",
|
||||
"dnstap",
|
||||
"local",
|
||||
"dns64",
|
||||
"acl",
|
||||
"any",
|
||||
|
@ -34,8 +36,10 @@ var Directives = []string{
|
|||
"loadbalance",
|
||||
"cache",
|
||||
"rewrite",
|
||||
"header",
|
||||
"dnssec",
|
||||
"autopath",
|
||||
"minimal",
|
||||
"template",
|
||||
"transfer",
|
||||
"hosts",
|
||||
|
|
|
@ -4,15 +4,13 @@ package coremain
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/core/dnsserver"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -36,21 +34,6 @@ func init() {
|
|||
// Run is CoreDNS's main() function.
|
||||
func Run() {
|
||||
caddy.TrapSignals()
|
||||
|
||||
// Reset flag.CommandLine to get rid of unwanted flags for instance from glog (used in kubernetes).
|
||||
// And read the ones we want to keep.
|
||||
flag.VisitAll(func(f *flag.Flag) {
|
||||
if _, ok := flagsBlacklist[f.Name]; ok {
|
||||
return
|
||||
}
|
||||
flagsToKeep = append(flagsToKeep, f)
|
||||
})
|
||||
|
||||
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
|
||||
for _, f := range flagsToKeep {
|
||||
flag.Var(f.Value, f.Name, f.Usage)
|
||||
}
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if len(flag.Args()) > 0 {
|
||||
|
@ -112,7 +95,7 @@ func confLoader(serverType string) (caddy.Input, error) {
|
|||
return caddy.CaddyfileFromPipe(os.Stdin, serverType)
|
||||
}
|
||||
|
||||
contents, err := ioutil.ReadFile(conf)
|
||||
contents, err := os.ReadFile(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -125,7 +108,7 @@ func confLoader(serverType string) (caddy.Input, error) {
|
|||
|
||||
// defaultLoader loads the Corefile from the current working directory.
|
||||
func defaultLoader(serverType string) (caddy.Input, error) {
|
||||
contents, err := ioutil.ReadFile(caddy.DefaultConfigFile)
|
||||
contents, err := os.ReadFile(caddy.DefaultConfigFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
|
@ -198,16 +181,3 @@ var (
|
|||
// Gitcommit contains the commit where we built CoreDNS from.
|
||||
GitCommit string
|
||||
)
|
||||
|
||||
// flagsBlacklist removes flags with these names from our flagset.
|
||||
var flagsBlacklist = map[string]struct{}{
|
||||
"logtostderr": {},
|
||||
"alsologtostderr": {},
|
||||
"v": {},
|
||||
"stderrthreshold": {},
|
||||
"vmodule": {},
|
||||
"log_backtrace_at": {},
|
||||
"log_dir": {},
|
||||
}
|
||||
|
||||
var flagsToKeep []*flag.Flag
|
||||
|
|
|
@ -2,7 +2,7 @@ package coremain
|
|||
|
||||
// Various CoreDNS constants.
|
||||
const (
|
||||
CoreVersion = "1.7.0"
|
||||
CoreVersion = "1.8.7"
|
||||
coreName = "CoreDNS"
|
||||
serverType = "dns"
|
||||
)
|
||||
|
|
|
@ -26,23 +26,14 @@ type ServiceBackend interface {
|
|||
// Note: it does not implement a specific service.
|
||||
Records(ctx context.Context, state request.Request, exact bool) ([]msg.Service, error)
|
||||
|
||||
// IsNameError return true if err indicated a record not found condition
|
||||
// IsNameError returns true if err indicated a record not found condition
|
||||
IsNameError(err error) bool
|
||||
|
||||
Transferer
|
||||
}
|
||||
|
||||
// Transferer defines an interface for backends that provide AXFR of all records.
|
||||
type Transferer interface {
|
||||
// Serial returns a SOA serial number to construct a SOA record.
|
||||
Serial(state request.Request) uint32
|
||||
|
||||
// MinTTL returns the minimum TTL to be used in the SOA record.
|
||||
MinTTL(state request.Request) uint32
|
||||
|
||||
// Transfer handles a zone transfer it writes to the client just
|
||||
// like any other handler.
|
||||
Transfer(ctx context.Context, state request.Request) (int, error)
|
||||
}
|
||||
|
||||
// Options are extra options that can be specified for a lookup.
|
||||
|
|
|
@ -422,7 +422,7 @@ func NS(ctx context.Context, b ServiceBackend, zone string, state request.Reques
|
|||
old := state.QName()
|
||||
|
||||
state.Clear()
|
||||
state.Req.Question[0].Name = "ns.dns." + zone
|
||||
state.Req.Question[0].Name = dnsutil.Join("ns.dns.", zone)
|
||||
services, err := b.Services(ctx, state, false, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -440,8 +440,8 @@ func NS(ctx context.Context, b ServiceBackend, zone string, state request.Reques
|
|||
|
||||
case dns.TypeA, dns.TypeAAAA:
|
||||
serv.Host = msg.Domain(serv.Key)
|
||||
extra = append(extra, newAddress(serv, serv.Host, ip, what))
|
||||
ns := serv.NewNS(state.QName())
|
||||
extra = append(extra, newAddress(serv, ns.Ns, ip, what))
|
||||
if _, ok := seen[ns.Ns]; ok {
|
||||
continue
|
||||
}
|
||||
|
@ -462,12 +462,8 @@ func SOA(ctx context.Context, b ServiceBackend, zone string, state request.Reque
|
|||
|
||||
header := dns.RR_Header{Name: zone, Rrtype: dns.TypeSOA, Ttl: ttl, Class: dns.ClassINET}
|
||||
|
||||
Mbox := hostmaster + "."
|
||||
Ns := "ns.dns."
|
||||
if zone[0] != '.' {
|
||||
Mbox += zone
|
||||
Ns += zone
|
||||
}
|
||||
Mbox := dnsutil.Join(hostmaster, zone)
|
||||
Ns := dnsutil.Join("ns.dns", zone)
|
||||
|
||||
soa := &dns.SOA{Hdr: header,
|
||||
Mbox: Mbox,
|
||||
|
|
|
@ -10,6 +10,9 @@ With *cache* enabled, all records except zone transfers and metadata records wil
|
|||
3600s. Caching is mostly useful in a scenario when fetching data from the backend (upstream,
|
||||
database, etc.) is expensive.
|
||||
|
||||
*Cache* will change the query to enable DNSSEC (DNSSEC OK; DO) if it passes through the plugin. If
|
||||
the client didn't request any DNSSEC (records), these are filtered out when replying.
|
||||
|
||||
This plugin can only be used once per Server Block.
|
||||
|
||||
## Syntax
|
||||
|
@ -72,9 +75,12 @@ If monitoring is enabled (via the *prometheus* plugin) then the following metric
|
|||
|
||||
* `coredns_cache_entries{server, type}` - Total elements in the cache by cache type.
|
||||
* `coredns_cache_hits_total{server, type}` - Counter of cache hits by cache type.
|
||||
* `coredns_cache_misses_total{server}` - Counter of cache misses.
|
||||
* `coredns_cache_misses_total{server}` - Counter of cache misses. - Deprecated, derive misses from cache hits/requests counters.
|
||||
* `coredns_cache_requests_total{server}` - Counter of cache requests.
|
||||
* `coredns_cache_prefetch_total{server}` - Counter of times the cache has prefetched a cached item.
|
||||
* `coredns_cache_drops_total{server}` - Counter of responses excluded from the cache due to request/response question name mismatch.
|
||||
* `coredns_cache_served_stale_total{server}` - Counter of requests served from stale cache entries.
|
||||
* `coredns_cache_evictions_total{server, type}` - Counter of cache evictions.
|
||||
|
||||
Cache types are either "denial" or "success". `Server` is the server handling the request, see the
|
||||
prometheus plugin for documentation.
|
||||
|
|
|
@ -65,31 +65,21 @@ func New() *Cache {
|
|||
// key returns key under which we store the item, -1 will be returned if we don't store the message.
|
||||
// Currently we do not cache Truncated, errors zone transfers or dynamic update messages.
|
||||
// qname holds the already lowercased qname.
|
||||
func key(qname string, m *dns.Msg, t response.Type, do bool) (bool, uint64) {
|
||||
func key(qname string, m *dns.Msg, t response.Type) (bool, uint64) {
|
||||
// We don't store truncated responses.
|
||||
if m.Truncated {
|
||||
return false, 0
|
||||
}
|
||||
// Nor errors or Meta or Update
|
||||
// Nor errors or Meta or Update.
|
||||
if t == response.OtherError || t == response.Meta || t == response.Update {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
return true, hash(qname, m.Question[0].Qtype, do)
|
||||
return true, hash(qname, m.Question[0].Qtype)
|
||||
}
|
||||
|
||||
var one = []byte("1")
|
||||
var zero = []byte("0")
|
||||
|
||||
func hash(qname string, qtype uint16, do bool) uint64 {
|
||||
func hash(qname string, qtype uint16) uint64 {
|
||||
h := fnv.New64()
|
||||
|
||||
if do {
|
||||
h.Write(one)
|
||||
} else {
|
||||
h.Write(zero)
|
||||
}
|
||||
|
||||
h.Write([]byte{byte(qtype >> 8)})
|
||||
h.Write([]byte{byte(qtype)})
|
||||
h.Write([]byte(qname))
|
||||
|
@ -114,6 +104,7 @@ type ResponseWriter struct {
|
|||
state request.Request
|
||||
server string // Server handling the request.
|
||||
|
||||
do bool // When true the original request had the DO bit set.
|
||||
prefetch bool // When true write nothing back to the client.
|
||||
remoteAddr net.Addr
|
||||
}
|
||||
|
@ -152,14 +143,10 @@ func (w *ResponseWriter) RemoteAddr() net.Addr {
|
|||
|
||||
// WriteMsg implements the dns.ResponseWriter interface.
|
||||
func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
|
||||
do := false
|
||||
mt, opt := response.Typify(res, w.now().UTC())
|
||||
if opt != nil {
|
||||
do = opt.Do()
|
||||
}
|
||||
mt, _ := response.Typify(res, w.now().UTC())
|
||||
|
||||
// key returns empty string for anything we don't want to cache.
|
||||
hasKey, key := key(w.state.Name(), res, mt, do)
|
||||
hasKey, key := key(w.state.Name(), res, mt)
|
||||
|
||||
msgTTL := dnsutil.MinimalTTL(res, mt)
|
||||
var duration time.Duration
|
||||
|
@ -188,18 +175,16 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
|
|||
}
|
||||
|
||||
// Apply capped TTL to this reply to avoid jarring TTL experience 1799 -> 8 (e.g.)
|
||||
// We also may need to filter out DNSSEC records, see toMsg() for similar code.
|
||||
ttl := uint32(duration.Seconds())
|
||||
for i := range res.Answer {
|
||||
res.Answer[i].Header().Ttl = ttl
|
||||
}
|
||||
for i := range res.Ns {
|
||||
res.Ns[i].Header().Ttl = ttl
|
||||
}
|
||||
for i := range res.Extra {
|
||||
if res.Extra[i].Header().Rrtype != dns.TypeOPT {
|
||||
res.Extra[i].Header().Ttl = ttl
|
||||
}
|
||||
res.Answer = filterRRSlice(res.Answer, ttl, w.do, false)
|
||||
res.Ns = filterRRSlice(res.Ns, ttl, w.do, false)
|
||||
res.Extra = filterRRSlice(res.Extra, ttl, w.do, false)
|
||||
|
||||
if !w.do {
|
||||
res.AuthenticatedData = false // unset AD bit if client is not OK with DNSSEC
|
||||
}
|
||||
|
||||
return w.ResponseWriter.WriteMsg(res)
|
||||
}
|
||||
|
||||
|
@ -209,7 +194,9 @@ func (w *ResponseWriter) set(m *dns.Msg, key uint64, mt response.Type, duration
|
|||
switch mt {
|
||||
case response.NoError, response.Delegation:
|
||||
i := newItem(m, w.now(), duration)
|
||||
w.pcache.Add(key, i)
|
||||
if w.pcache.Add(key, i) {
|
||||
evictions.WithLabelValues(w.server, Success).Inc()
|
||||
}
|
||||
// when pre-fetching, remove the negative cache entry if it exists
|
||||
if w.prefetch {
|
||||
w.ncache.Remove(key)
|
||||
|
@ -217,7 +204,9 @@ func (w *ResponseWriter) set(m *dns.Msg, key uint64, mt response.Type, duration
|
|||
|
||||
case response.NameError, response.NoData, response.ServerError:
|
||||
i := newItem(m, w.now(), duration)
|
||||
w.ncache.Add(key, i)
|
||||
if w.ncache.Add(key, i) {
|
||||
evictions.WithLabelValues(w.server, Denial).Inc()
|
||||
}
|
||||
|
||||
case response.OtherError:
|
||||
// don't cache these
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
package cache
|
||||
|
||||
import "github.com/miekg/dns"
|
||||
|
||||
// isDNSSEC returns true if r is a DNSSEC record. NSEC,NSEC3,DS and RRSIG/SIG
|
||||
// are DNSSEC records. DNSKEYs is not in this list on the assumption that the
|
||||
// client explicitly asked for it.
|
||||
func isDNSSEC(r dns.RR) bool {
|
||||
switch r.Header().Rrtype {
|
||||
case dns.TypeNSEC:
|
||||
return true
|
||||
case dns.TypeNSEC3:
|
||||
return true
|
||||
case dns.TypeDS:
|
||||
return true
|
||||
case dns.TypeRRSIG:
|
||||
return true
|
||||
case dns.TypeSIG:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// filterRRSlice filters rrs and removes DNSSEC RRs when do is false. In the returned slice
|
||||
// the TTLs are set to ttl. If dup is true the RRs in rrs are _copied_ into the slice that is
|
||||
// returned.
|
||||
func filterRRSlice(rrs []dns.RR, ttl uint32, do, dup bool) []dns.RR {
|
||||
j := 0
|
||||
rs := make([]dns.RR, len(rrs))
|
||||
for _, r := range rrs {
|
||||
if !do && isDNSSEC(r) {
|
||||
continue
|
||||
}
|
||||
if r.Header().Rrtype == dns.TypeOPT {
|
||||
continue
|
||||
}
|
||||
r.Header().Ttl = ttl
|
||||
if dup {
|
||||
rs[j] = dns.Copy(r)
|
||||
} else {
|
||||
rs[j] = r
|
||||
}
|
||||
j++
|
||||
}
|
||||
return rs[:j]
|
||||
}
|
|
@ -14,50 +14,53 @@ import (
|
|||
|
||||
// ServeDNS implements the plugin.Handler interface.
|
||||
func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
|
||||
state := request.Request{W: w, Req: r}
|
||||
rc := r.Copy() // We potentially modify r, to prevent other plugins from seeing this (r is a pointer), copy r into rc.
|
||||
state := request.Request{W: w, Req: rc}
|
||||
do := state.Do()
|
||||
|
||||
zone := plugin.Zones(c.Zones).Matches(state.Name())
|
||||
if zone == "" {
|
||||
return plugin.NextOrFailure(c.Name(), c.Next, ctx, w, r)
|
||||
return plugin.NextOrFailure(c.Name(), c.Next, ctx, w, rc)
|
||||
}
|
||||
|
||||
now := c.now().UTC()
|
||||
|
||||
server := metrics.WithServer(ctx)
|
||||
|
||||
// On cache miss, if the request has the OPT record and the DO bit set we leave the message as-is. If there isn't a DO bit
|
||||
// set we will modify the request to _add_ one. This means we will always do DNSSEC lookups on cache misses.
|
||||
// When writing to cache, any DNSSEC RRs in the response are written to cache with the response.
|
||||
// When sending a response to a non-DNSSEC client, we remove DNSSEC RRs from the response. We use a 2048 buffer size, which is
|
||||
// less than 4096 (and older default) and more than 1024 which may be too small. We might need to tweaks this
|
||||
// value to be smaller still to prevent UDP fragmentation?
|
||||
|
||||
ttl := 0
|
||||
i := c.getIgnoreTTL(now, state, server)
|
||||
if i != nil {
|
||||
ttl = i.ttl(now)
|
||||
}
|
||||
if i == nil {
|
||||
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server}
|
||||
return plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r)
|
||||
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do}
|
||||
return c.doRefresh(ctx, state, crr)
|
||||
}
|
||||
if ttl < 0 {
|
||||
servedStale.WithLabelValues(server).Inc()
|
||||
// Adjust the time to get a 0 TTL in the reply built from a stale item.
|
||||
now = now.Add(time.Duration(ttl) * time.Second)
|
||||
go func() {
|
||||
r := r.Copy()
|
||||
crr := &ResponseWriter{Cache: c, state: state, server: server, prefetch: true, remoteAddr: w.LocalAddr()}
|
||||
plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r)
|
||||
}()
|
||||
cw := newPrefetchResponseWriter(server, state, c)
|
||||
go c.doPrefetch(ctx, state, cw, i, now)
|
||||
} else if c.shouldPrefetch(i, now) {
|
||||
cw := newPrefetchResponseWriter(server, state, c)
|
||||
go c.doPrefetch(ctx, state, cw, i, now)
|
||||
}
|
||||
resp := i.toMsg(r, now)
|
||||
resp := i.toMsg(r, now, do)
|
||||
w.WriteMsg(resp)
|
||||
|
||||
if c.shouldPrefetch(i, now) {
|
||||
go c.doPrefetch(ctx, state, server, i, now)
|
||||
}
|
||||
return dns.RcodeSuccess, nil
|
||||
}
|
||||
|
||||
func (c *Cache) doPrefetch(ctx context.Context, state request.Request, server string, i *item, now time.Time) {
|
||||
cw := newPrefetchResponseWriter(server, state, c)
|
||||
|
||||
cachePrefetches.WithLabelValues(server).Inc()
|
||||
plugin.NextOrFailure(c.Name(), c.Next, ctx, cw, state.Req)
|
||||
func (c *Cache) doPrefetch(ctx context.Context, state request.Request, cw *ResponseWriter, i *item, now time.Time) {
|
||||
cachePrefetches.WithLabelValues(cw.server).Inc()
|
||||
c.doRefresh(ctx, state, cw)
|
||||
|
||||
// When prefetching we loose the item i, and with it the frequency
|
||||
// that we've gathered sofar. See we copy the frequencies info back
|
||||
|
@ -67,6 +70,13 @@ func (c *Cache) doPrefetch(ctx context.Context, state request.Request, server st
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cache) doRefresh(ctx context.Context, state request.Request, cw *ResponseWriter) (int, error) {
|
||||
if !state.Do() {
|
||||
setDo(state.Req)
|
||||
}
|
||||
return plugin.NextOrFailure(c.Name(), c.Next, ctx, cw, state.Req)
|
||||
}
|
||||
|
||||
func (c *Cache) shouldPrefetch(i *item, now time.Time) bool {
|
||||
if c.prefetch <= 0 {
|
||||
return false
|
||||
|
@ -80,7 +90,8 @@ func (c *Cache) shouldPrefetch(i *item, now time.Time) bool {
|
|||
func (c *Cache) Name() string { return "cache" }
|
||||
|
||||
func (c *Cache) get(now time.Time, state request.Request, server string) (*item, bool) {
|
||||
k := hash(state.Name(), state.QType(), state.Do())
|
||||
k := hash(state.Name(), state.QType())
|
||||
cacheRequests.WithLabelValues(server).Inc()
|
||||
|
||||
if i, ok := c.ncache.Get(k); ok && i.(*item).ttl(now) > 0 {
|
||||
cacheHits.WithLabelValues(server, Denial).Inc()
|
||||
|
@ -97,7 +108,8 @@ func (c *Cache) get(now time.Time, state request.Request, server string) (*item,
|
|||
|
||||
// getIgnoreTTL unconditionally returns an item if it exists in the cache.
|
||||
func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string) *item {
|
||||
k := hash(state.Name(), state.QType(), state.Do())
|
||||
k := hash(state.Name(), state.QType())
|
||||
cacheRequests.WithLabelValues(server).Inc()
|
||||
|
||||
if i, ok := c.ncache.Get(k); ok {
|
||||
ttl := i.(*item).ttl(now)
|
||||
|
@ -118,7 +130,7 @@ func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string
|
|||
}
|
||||
|
||||
func (c *Cache) exists(state request.Request) *item {
|
||||
k := hash(state.Name(), state.QType(), state.Do())
|
||||
k := hash(state.Name(), state.QType())
|
||||
if i, ok := c.ncache.Get(k); ok {
|
||||
return i.(*item)
|
||||
}
|
||||
|
@ -127,3 +139,22 @@ func (c *Cache) exists(state request.Request) *item {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setDo sets the DO bit and UDP buffer size in the message m.
|
||||
func setDo(m *dns.Msg) {
|
||||
o := m.IsEdns0()
|
||||
if o != nil {
|
||||
o.SetDo()
|
||||
o.SetUDPSize(defaultUDPBufSize)
|
||||
return
|
||||
}
|
||||
|
||||
o = &dns.OPT{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeOPT}}
|
||||
o.SetDo()
|
||||
o.SetUDPSize(defaultUDPBufSize)
|
||||
m.Extra = append(m.Extra, o)
|
||||
}
|
||||
|
||||
// defaultUDPBufsize is the bufsize the cache plugin uses on outgoing requests that don't
|
||||
// have an OPT RR.
|
||||
const defaultUDPBufSize = 2048
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coredns/coredns/plugin/cache/freq"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
|
@ -55,7 +56,7 @@ func newItem(m *dns.Msg, now time.Time, d time.Duration) *item {
|
|||
// So we're forced to always set this to 1; regardless if the answer came from the cache or not.
|
||||
// On newer systems(e.g. ubuntu 16.04 with glib version 2.23), this issue is resolved.
|
||||
// So we may set this bit back to 0 in the future ?
|
||||
func (i *item) toMsg(m *dns.Msg, now time.Time) *dns.Msg {
|
||||
func (i *item) toMsg(m *dns.Msg, now time.Time, do bool) *dns.Msg {
|
||||
m1 := new(dns.Msg)
|
||||
m1.SetReply(m)
|
||||
|
||||
|
@ -64,6 +65,9 @@ func (i *item) toMsg(m *dns.Msg, now time.Time) *dns.Msg {
|
|||
// just set it to true.
|
||||
m1.Authoritative = true
|
||||
m1.AuthenticatedData = i.AuthenticatedData
|
||||
if !do {
|
||||
m1.AuthenticatedData = false // when DNSSEC was not wanted, it can't be authenticated data.
|
||||
}
|
||||
m1.RecursionAvailable = i.RecursionAvailable
|
||||
m1.Rcode = i.Rcode
|
||||
|
||||
|
@ -72,19 +76,10 @@ func (i *item) toMsg(m *dns.Msg, now time.Time) *dns.Msg {
|
|||
m1.Extra = make([]dns.RR, len(i.Extra))
|
||||
|
||||
ttl := uint32(i.ttl(now))
|
||||
for j, r := range i.Answer {
|
||||
m1.Answer[j] = dns.Copy(r)
|
||||
m1.Answer[j].Header().Ttl = ttl
|
||||
}
|
||||
for j, r := range i.Ns {
|
||||
m1.Ns[j] = dns.Copy(r)
|
||||
m1.Ns[j].Header().Ttl = ttl
|
||||
}
|
||||
// newItem skips OPT records, so we can just use i.Extra as is.
|
||||
for j, r := range i.Extra {
|
||||
m1.Extra[j] = dns.Copy(r)
|
||||
m1.Extra[j].Header().Ttl = ttl
|
||||
}
|
||||
m1.Answer = filterRRSlice(i.Answer, ttl, do, true)
|
||||
m1.Ns = filterRRSlice(i.Ns, ttl, do, true)
|
||||
m1.Extra = filterRRSlice(i.Extra, ttl, do, true)
|
||||
|
||||
return m1
|
||||
}
|
||||
|
||||
|
|
|
@ -4,49 +4,64 @@ import (
|
|||
"github.com/coredns/coredns/plugin"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
// cacheSize is total elements in the cache by cache type.
|
||||
cacheSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
cacheSize = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: "cache",
|
||||
Name: "entries",
|
||||
Help: "The number of elements in the cache.",
|
||||
}, []string{"server", "type"})
|
||||
// cacheRequests is a counter of all requests through the cache.
|
||||
cacheRequests = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: "cache",
|
||||
Name: "requests_total",
|
||||
Help: "The count of cache requests.",
|
||||
}, []string{"server"})
|
||||
// cacheHits is counter of cache hits by cache type.
|
||||
cacheHits = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
cacheHits = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: "cache",
|
||||
Name: "hits_total",
|
||||
Help: "The count of cache hits.",
|
||||
}, []string{"server", "type"})
|
||||
// cacheMisses is the counter of cache misses.
|
||||
cacheMisses = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
// cacheMisses is the counter of cache misses. - Deprecated
|
||||
cacheMisses = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: "cache",
|
||||
Name: "misses_total",
|
||||
Help: "The count of cache misses.",
|
||||
Help: "The count of cache misses. Deprecated, derive misses from cache hits/requests counters.",
|
||||
}, []string{"server"})
|
||||
// cachePrefetches is the number of time the cache has prefetched a cached item.
|
||||
cachePrefetches = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
cachePrefetches = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: "cache",
|
||||
Name: "prefetch_total",
|
||||
Help: "The number of time the cache has prefetched a cached item.",
|
||||
Help: "The number of times the cache has prefetched a cached item.",
|
||||
}, []string{"server"})
|
||||
// cacheDrops is the number responses that are not cached, because the reply is malformed.
|
||||
cacheDrops = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
cacheDrops = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: "cache",
|
||||
Name: "drops_total",
|
||||
Help: "The number responses that are not cached, because the reply is malformed.",
|
||||
}, []string{"server"})
|
||||
// servedStale is the number of requests served from stale cache entries.
|
||||
servedStale = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
servedStale = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: "cache",
|
||||
Name: "served_stale_total",
|
||||
Help: "The number of requests served from stale cache entries.",
|
||||
}, []string{"server"})
|
||||
// evictions is the counter of cache evictions.
|
||||
evictions = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: "cache",
|
||||
Name: "evictions_total",
|
||||
Help: "The count of cache evictions.",
|
||||
}, []string{"server", "type"})
|
||||
)
|
||||
|
|
|
@ -6,13 +6,11 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/core/dnsserver"
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/metrics"
|
||||
"github.com/coredns/coredns/plugin/pkg/cache"
|
||||
clog "github.com/coredns/coredns/plugin/pkg/log"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
)
|
||||
|
||||
var log = clog.NewWithPlugin("cache")
|
||||
|
@ -29,13 +27,6 @@ func setup(c *caddy.Controller) error {
|
|||
return ca
|
||||
})
|
||||
|
||||
c.OnStartup(func() error {
|
||||
metrics.MustRegister(c,
|
||||
cacheSize, cacheHits, cacheMisses,
|
||||
cachePrefetches, cacheDrops, servedStale)
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -50,10 +41,7 @@ func cacheParse(c *caddy.Controller) (*Cache, error) {
|
|||
j++
|
||||
|
||||
// cache [ttl] [zones..]
|
||||
origins := make([]string, len(c.ServerBlockKeys))
|
||||
copy(origins, c.ServerBlockKeys)
|
||||
args := c.RemainingArgs()
|
||||
|
||||
if len(args) > 0 {
|
||||
// first args may be just a number, then it is the ttl, if not it is a zone
|
||||
ttl, err := strconv.Atoi(args[0])
|
||||
|
@ -66,10 +54,8 @@ func cacheParse(c *caddy.Controller) (*Cache, error) {
|
|||
ca.nttl = time.Duration(ttl) * time.Second
|
||||
args = args[1:]
|
||||
}
|
||||
if len(args) > 0 {
|
||||
copy(origins, args)
|
||||
}
|
||||
}
|
||||
origins := plugin.OriginsFromArgsOrServerBlock(args, c.ServerBlockKeys)
|
||||
|
||||
// Refinements? In an extra block.
|
||||
for c.NextBlock() {
|
||||
|
@ -198,11 +184,7 @@ func cacheParse(c *caddy.Controller) (*Cache, error) {
|
|||
}
|
||||
}
|
||||
|
||||
for i := range origins {
|
||||
origins[i] = plugin.Host(origins[i]).Normalize()
|
||||
}
|
||||
ca.Zones = origins
|
||||
|
||||
ca.pcache = cache.New(ca.pcap)
|
||||
ca.ncache = cache.New(ca.ncap)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ The following metrics are exported:
|
|||
* `coredns_dns_request_size_bytes{server, zone, proto}` - size of the request in bytes.
|
||||
* `coredns_dns_do_requests_total{server, zone}` - queries that have the DO bit set
|
||||
* `coredns_dns_response_size_bytes{server, zone, proto}` - response size in bytes.
|
||||
* `coredns_dns_responses_total{server, zone, rcode}` - response per zone and rcode.
|
||||
* `coredns_dns_responses_total{server, zone, rcode, plugin}` - response per zone, rcode and plugin.
|
||||
* `coredns_plugin_enabled{server, zone, name}` - indicates whether a plugin is enabled on per server and zone basis.
|
||||
|
||||
Each counter has a label `zone` which is the zonename used for the request/response.
|
||||
|
@ -30,8 +30,10 @@ Extra labels used are:
|
|||
* `proto` which holds the transport of the response ("udp" or "tcp")
|
||||
* The address family (`family`) of the transport (1 = IP (IP version 4), 2 = IP6 (IP version 6)).
|
||||
* `type` which holds the query type. It holds most common types (A, AAAA, MX, SOA, CNAME, PTR, TXT,
|
||||
NS, SRV, DS, DNSKEY, RRSIG, NSEC, NSEC3, IXFR, AXFR and ANY) and "other" which lumps together all
|
||||
NS, SRV, DS, DNSKEY, RRSIG, NSEC, NSEC3, HTTPS, IXFR, AXFR and ANY) and "other" which lumps together all
|
||||
other types.
|
||||
* the `plugin` label holds the name of the plugin that made the write to the client. If the server
|
||||
did the write (on error for instance), the value is empty.
|
||||
|
||||
If monitoring is enabled, queries that do not enter the plugin chain are exported under the fake
|
||||
name "dropped" (without a closing dot - this is never a valid domain name).
|
||||
|
|
|
@ -2,10 +2,10 @@ package metrics
|
|||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/metrics/vars"
|
||||
"github.com/coredns/coredns/plugin/pkg/dnstest"
|
||||
"github.com/coredns/coredns/plugin/pkg/rcode"
|
||||
"github.com/coredns/coredns/request"
|
||||
|
||||
|
@ -23,13 +23,35 @@ func (m *Metrics) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg
|
|||
}
|
||||
|
||||
// Record response to get status code and size of the reply.
|
||||
rw := dnstest.NewRecorder(w)
|
||||
rw := NewRecorder(w)
|
||||
status, err := plugin.NextOrFailure(m.Name(), m.Next, ctx, rw, r)
|
||||
|
||||
vars.Report(WithServer(ctx), state, zone, rcode.ToString(rw.Rcode), rw.Len, rw.Start)
|
||||
rc := rw.Rcode
|
||||
if !plugin.ClientWrite(status) {
|
||||
// when no response was written, fallback to status returned from next plugin as this status
|
||||
// is actually used as rcode of DNS response
|
||||
// see https://github.com/coredns/coredns/blob/master/core/dnsserver/server.go#L318
|
||||
rc = status
|
||||
}
|
||||
plugin := m.authoritativePlugin(rw.Caller)
|
||||
vars.Report(WithServer(ctx), state, zone, rcode.ToString(rc), plugin, rw.Len, rw.Start)
|
||||
|
||||
return status, err
|
||||
}
|
||||
|
||||
// Name implements the Handler interface.
|
||||
func (m *Metrics) Name() string { return "prometheus" }
|
||||
|
||||
// authoritativePlugin returns which of made the write, if none is found the empty string is returned.
|
||||
func (m *Metrics) authoritativePlugin(caller [3]string) string {
|
||||
// a b and c contain the full path of the caller, the plugin name 2nd last elements
|
||||
// .../coredns/plugin/whoami/whoami.go --> whoami
|
||||
// this is likely FS specific, so use filepath.
|
||||
for _, c := range caller {
|
||||
plug := filepath.Base(filepath.Dir(c))
|
||||
if _, ok := m.plugins[plug]; ok {
|
||||
return plug
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -8,11 +8,12 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/metrics/vars"
|
||||
"github.com/coredns/coredns/plugin/pkg/reuseport"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
|
@ -31,29 +32,18 @@ type Metrics struct {
|
|||
zoneNames []string
|
||||
zoneMap map[string]struct{}
|
||||
zoneMu sync.RWMutex
|
||||
|
||||
plugins map[string]struct{} // all available plugins, used to determine which plugin made the client write
|
||||
}
|
||||
|
||||
// New returns a new instance of Metrics with the given address.
|
||||
func New(addr string) *Metrics {
|
||||
met := &Metrics{
|
||||
Addr: addr,
|
||||
Reg: prometheus.NewRegistry(),
|
||||
Reg: prometheus.DefaultRegisterer.(*prometheus.Registry),
|
||||
zoneMap: make(map[string]struct{}),
|
||||
plugins: pluginList(caddy.ListPlugins()),
|
||||
}
|
||||
// Add the default collectors
|
||||
met.MustRegister(prometheus.NewGoCollector())
|
||||
met.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
|
||||
|
||||
// Add all of our collectors
|
||||
met.MustRegister(buildInfo)
|
||||
met.MustRegister(vars.Panic)
|
||||
met.MustRegister(vars.RequestCount)
|
||||
met.MustRegister(vars.RequestDuration)
|
||||
met.MustRegister(vars.RequestSize)
|
||||
met.MustRegister(vars.RequestDo)
|
||||
met.MustRegister(vars.ResponseSize)
|
||||
met.MustRegister(vars.ResponseRcode)
|
||||
met.MustRegister(vars.PluginEnabled)
|
||||
|
||||
return met
|
||||
}
|
||||
|
@ -154,6 +144,19 @@ func keys(m map[string]struct{}) []string {
|
|||
return sx
|
||||
}
|
||||
|
||||
// pluginList iterates over the returned plugin map from caddy and removes the "dns." prefix from them.
|
||||
func pluginList(m map[string][]string) map[string]struct{} {
|
||||
pm := map[string]struct{}{}
|
||||
for _, p := range m["others"] {
|
||||
// only add 'dns.' plugins
|
||||
if len(p) > 3 {
|
||||
pm[p[4:]] = struct{}{}
|
||||
continue
|
||||
}
|
||||
}
|
||||
return pm
|
||||
}
|
||||
|
||||
// ListenAddr is assigned the address of the prometheus listener. Its use is mainly in tests where
|
||||
// we listen on "localhost:0" and need to retrieve the actual address.
|
||||
var ListenAddr string
|
||||
|
@ -162,7 +165,7 @@ var ListenAddr string
|
|||
// before erroring when it tries to close the metrics server
|
||||
const shutdownTimeout time.Duration = time.Second * 5
|
||||
|
||||
var buildInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
var buildInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Name: "build_info",
|
||||
Help: "A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built.",
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/coredns/coredns/plugin/pkg/dnstest"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// Recorder is a dnstest.Recorder specific to the metrics plugin.
|
||||
type Recorder struct {
|
||||
*dnstest.Recorder
|
||||
// CallerN holds the string return value of the call to runtime.Caller(N+1)
|
||||
Caller [3]string
|
||||
}
|
||||
|
||||
// NewRecorder makes and returns a new Recorder.
|
||||
func NewRecorder(w dns.ResponseWriter) *Recorder { return &Recorder{Recorder: dnstest.NewRecorder(w)} }
|
||||
|
||||
// WriteMsg records the status code and calls the
|
||||
// underlying ResponseWriter's WriteMsg method.
|
||||
func (r *Recorder) WriteMsg(res *dns.Msg) error {
|
||||
_, r.Caller[0], _, _ = runtime.Caller(1)
|
||||
_, r.Caller[1], _, _ = runtime.Caller(2)
|
||||
_, r.Caller[2], _, _ = runtime.Caller(3)
|
||||
r.Len += res.Len()
|
||||
r.Msg = res
|
||||
return r.ResponseWriter.WriteMsg(res)
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/coredns/coredns/core/dnsserver"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// MustRegister registers the prometheus Collectors when the metrics plugin is used.
|
||||
func MustRegister(c *caddy.Controller, cs ...prometheus.Collector) {
|
||||
m := dnsserver.GetConfig(c).Handler("prometheus")
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
x, ok := m.(*Metrics)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for _, c := range cs {
|
||||
x.MustRegister(c)
|
||||
}
|
||||
}
|
|
@ -4,14 +4,13 @@ import (
|
|||
"net"
|
||||
"runtime"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/core/dnsserver"
|
||||
"github.com/coredns/coredns/coremain"
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/metrics/vars"
|
||||
clog "github.com/coredns/coredns/plugin/pkg/log"
|
||||
"github.com/coredns/coredns/plugin/pkg/uniq"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -81,8 +80,9 @@ func parse(c *caddy.Controller) (*Metrics, error) {
|
|||
}
|
||||
i++
|
||||
|
||||
for _, z := range c.ServerBlockKeys {
|
||||
met.AddZone(plugin.Host(z).Normalize())
|
||||
zones := plugin.OriginsFromArgsOrServerBlock(nil /* args */, c.ServerBlockKeys)
|
||||
for _, z := range zones {
|
||||
met.AddZone(z)
|
||||
}
|
||||
args := c.RemainingArgs()
|
||||
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
package vars
|
||||
|
||||
import (
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
var monitorType = map[uint16]struct{}{
|
||||
dns.TypeAAAA: {},
|
||||
dns.TypeA: {},
|
||||
dns.TypeCNAME: {},
|
||||
dns.TypeDNSKEY: {},
|
||||
dns.TypeDS: {},
|
||||
dns.TypeMX: {},
|
||||
dns.TypeNSEC3: {},
|
||||
dns.TypeNSEC: {},
|
||||
dns.TypeNS: {},
|
||||
dns.TypePTR: {},
|
||||
dns.TypeRRSIG: {},
|
||||
dns.TypeSOA: {},
|
||||
dns.TypeSRV: {},
|
||||
dns.TypeTXT: {},
|
||||
dns.TypeHTTPS: {},
|
||||
// Meta Qtypes
|
||||
dns.TypeIXFR: {},
|
||||
dns.TypeAXFR: {},
|
||||
dns.TypeANY: {},
|
||||
}
|
||||
|
||||
// qTypeString returns the RR type based on monitorType. It returns the text representation
|
||||
// of those types. RR types not in that list will have "other" returned.
|
||||
func qTypeString(qtype uint16) string {
|
||||
if _, known := monitorType[qtype]; known {
|
||||
return dns.Type(qtype).String()
|
||||
}
|
||||
return "other"
|
||||
}
|
|
@ -4,14 +4,12 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coredns/coredns/request"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// Report reports the metrics data associated with request. This function is exported because it is also
|
||||
// called from core/dnsserver to report requests hitting the server that should not be handled and are thus
|
||||
// not sent down the plugin chain.
|
||||
func Report(server string, req request.Request, zone, rcode string, size int, start time.Time) {
|
||||
func Report(server string, req request.Request, zone, rcode, plugin string, size int, start time.Time) {
|
||||
// Proto and Family.
|
||||
net := req.Proto()
|
||||
fam := "1"
|
||||
|
@ -19,45 +17,17 @@ func Report(server string, req request.Request, zone, rcode string, size int, st
|
|||
fam = "2"
|
||||
}
|
||||
|
||||
typ := req.QType()
|
||||
|
||||
if req.Do() {
|
||||
RequestDo.WithLabelValues(server, zone).Inc()
|
||||
}
|
||||
|
||||
if _, known := monitorType[typ]; known {
|
||||
RequestCount.WithLabelValues(server, zone, net, fam, dns.Type(typ).String()).Inc()
|
||||
RequestDuration.WithLabelValues(server, zone, dns.Type(typ).String()).Observe(time.Since(start).Seconds())
|
||||
} else {
|
||||
RequestCount.WithLabelValues(server, zone, net, fam, other).Inc()
|
||||
RequestDuration.WithLabelValues(server, zone, other).Observe(time.Since(start).Seconds())
|
||||
}
|
||||
qType := qTypeString(req.QType())
|
||||
RequestCount.WithLabelValues(server, zone, net, fam, qType).Inc()
|
||||
|
||||
RequestDuration.WithLabelValues(server, zone).Observe(time.Since(start).Seconds())
|
||||
|
||||
ResponseSize.WithLabelValues(server, zone, net).Observe(float64(size))
|
||||
RequestSize.WithLabelValues(server, zone, net).Observe(float64(req.Len()))
|
||||
|
||||
ResponseRcode.WithLabelValues(server, zone, rcode).Inc()
|
||||
ResponseRcode.WithLabelValues(server, zone, rcode, plugin).Inc()
|
||||
}
|
||||
|
||||
var monitorType = map[uint16]struct{}{
|
||||
dns.TypeAAAA: {},
|
||||
dns.TypeA: {},
|
||||
dns.TypeCNAME: {},
|
||||
dns.TypeDNSKEY: {},
|
||||
dns.TypeDS: {},
|
||||
dns.TypeMX: {},
|
||||
dns.TypeNSEC3: {},
|
||||
dns.TypeNSEC: {},
|
||||
dns.TypeNS: {},
|
||||
dns.TypePTR: {},
|
||||
dns.TypeRRSIG: {},
|
||||
dns.TypeSOA: {},
|
||||
dns.TypeSRV: {},
|
||||
dns.TypeTXT: {},
|
||||
// Meta Qtypes
|
||||
dns.TypeIXFR: {},
|
||||
dns.TypeAXFR: {},
|
||||
dns.TypeANY: {},
|
||||
}
|
||||
|
||||
const other = "other"
|
||||
|
|
|
@ -4,41 +4,42 @@ import (
|
|||
"github.com/coredns/coredns/plugin"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
// Request* and Response* are the prometheus counters and gauges we are using for exporting metrics.
|
||||
var (
|
||||
RequestCount = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
RequestCount = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "requests_total",
|
||||
Help: "Counter of DNS requests made per zone, protocol and family.",
|
||||
}, []string{"server", "zone", "proto", "family", "type"})
|
||||
|
||||
RequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
RequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_duration_seconds",
|
||||
Buckets: plugin.TimeBuckets,
|
||||
Help: "Histogram of the time (in seconds) each request took.",
|
||||
}, []string{"server", "zone", "type"})
|
||||
Help: "Histogram of the time (in seconds) each request took per zone.",
|
||||
}, []string{"server", "zone"})
|
||||
|
||||
RequestSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
RequestSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_size_bytes",
|
||||
Help: "Size of the EDNS0 UDP buffer in bytes (64K for TCP).",
|
||||
Help: "Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol.",
|
||||
Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3},
|
||||
}, []string{"server", "zone", "proto"})
|
||||
|
||||
RequestDo = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
RequestDo = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "do_requests_total",
|
||||
Help: "Counter of DNS requests with DO bit set per zone.",
|
||||
}, []string{"server", "zone"})
|
||||
|
||||
ResponseSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
ResponseSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "response_size_bytes",
|
||||
|
@ -46,20 +47,20 @@ var (
|
|||
Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3},
|
||||
}, []string{"server", "zone", "proto"})
|
||||
|
||||
ResponseRcode = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
ResponseRcode = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "responses_total",
|
||||
Help: "Counter of response status codes.",
|
||||
}, []string{"server", "zone", "rcode"})
|
||||
}, []string{"server", "zone", "rcode", "plugin"})
|
||||
|
||||
Panic = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Panic = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Name: "panics_total",
|
||||
Help: "A metrics that counts the number of panics.",
|
||||
})
|
||||
|
||||
PluginEnabled = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
PluginEnabled = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Name: "plugin_enabled",
|
||||
Help: "A metric that indicates whether a plugin is enabled on per server and zone basis.",
|
||||
|
|
|
@ -3,10 +3,14 @@ package plugin
|
|||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/coredns/coredns/plugin/pkg/cidr"
|
||||
"github.com/coredns/coredns/plugin/pkg/log"
|
||||
"github.com/coredns/coredns/plugin/pkg/parse"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
|
@ -62,81 +66,132 @@ type (
|
|||
// Normalize will return the host portion of host, stripping
|
||||
// of any port or transport. The host will also be fully qualified and lowercased.
|
||||
// An empty string is returned on failure
|
||||
// Deprecated: use OriginsFromArgsOrServerBlock or NormalizeExact
|
||||
func (h Host) Normalize() string {
|
||||
// The error can be ignored here, because this function should only be called after the corefile has already been vetted.
|
||||
host, _ := h.MustNormalize()
|
||||
return host
|
||||
var caller string
|
||||
if _, file, line, ok := runtime.Caller(1); ok {
|
||||
caller = fmt.Sprintf("(%v line %d) ", file, line)
|
||||
}
|
||||
log.Warning("An external plugin " + caller + "is using the deprecated function Normalize. " +
|
||||
"This will be removed in a future versions of CoreDNS. The plugin should be updated to use " +
|
||||
"OriginsFromArgsOrServerBlock or NormalizeExact instead.")
|
||||
|
||||
s := string(h)
|
||||
_, s = parse.Transport(s)
|
||||
|
||||
// The error can be ignored here, because this function is called after the corefile has already been vetted.
|
||||
hosts, _, err := SplitHostPort(s)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return Name(hosts[0]).Normalize()
|
||||
}
|
||||
|
||||
// MustNormalize will return the host portion of host, stripping
|
||||
// of any port or transport. The host will also be fully qualified and lowercased.
|
||||
// An error is returned on error
|
||||
// Deprecated: use OriginsFromArgsOrServerBlock or NormalizeExact
|
||||
func (h Host) MustNormalize() (string, error) {
|
||||
var caller string
|
||||
if _, file, line, ok := runtime.Caller(1); ok {
|
||||
caller = fmt.Sprintf("(%v line %d) ", file, line)
|
||||
}
|
||||
log.Warning("An external plugin " + caller + "is using the deprecated function MustNormalize. " +
|
||||
"This will be removed in a future versions of CoreDNS. The plugin should be updated to use " +
|
||||
"OriginsFromArgsOrServerBlock or NormalizeExact instead.")
|
||||
|
||||
s := string(h)
|
||||
_, s = parse.Transport(s)
|
||||
|
||||
// The error can be ignored here, because this function is called after the corefile has already been vetted.
|
||||
host, _, _, err := SplitHostPort(s)
|
||||
hosts, _, err := SplitHostPort(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return Name(host).Normalize(), nil
|
||||
return Name(hosts[0]).Normalize(), nil
|
||||
}
|
||||
|
||||
// SplitHostPort splits s up in a host and port portion, taking reverse address notation into account.
|
||||
// String the string s should *not* be prefixed with any protocols, i.e. dns://. The returned ipnet is the
|
||||
// *net.IPNet that is used when the zone is a reverse and a netmask is given.
|
||||
func SplitHostPort(s string) (host, port string, ipnet *net.IPNet, err error) {
|
||||
// NormalizeExact will return the host portion of host, stripping
|
||||
// of any port or transport. The host will also be fully qualified and lowercased.
|
||||
// An empty slice is returned on failure
|
||||
func (h Host) NormalizeExact() []string {
|
||||
// The error can be ignored here, because this function should only be called after the corefile has already been vetted.
|
||||
s := string(h)
|
||||
_, s = parse.Transport(s)
|
||||
|
||||
hosts, _, err := SplitHostPort(s)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
for i := range hosts {
|
||||
hosts[i] = Name(hosts[i]).Normalize()
|
||||
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
|
||||
// SplitHostPort splits s up in a host(s) and port portion, taking reverse address notation into account.
|
||||
// String the string s should *not* be prefixed with any protocols, i.e. dns://. SplitHostPort can return
|
||||
// multiple hosts when a reverse notation on a non-octet boundary is given.
|
||||
func SplitHostPort(s string) (hosts []string, port string, err error) {
|
||||
// If there is: :[0-9]+ on the end we assume this is the port. This works for (ascii) domain
|
||||
// names and our reverse syntax, which always needs a /mask *before* the port.
|
||||
// So from the back, find first colon, and then check if it's a number.
|
||||
host = s
|
||||
|
||||
colon := strings.LastIndex(s, ":")
|
||||
if colon == len(s)-1 {
|
||||
return "", "", nil, fmt.Errorf("expecting data after last colon: %q", s)
|
||||
return nil, "", fmt.Errorf("expecting data after last colon: %q", s)
|
||||
}
|
||||
if colon != -1 {
|
||||
if p, err := strconv.Atoi(s[colon+1:]); err == nil {
|
||||
port = strconv.Itoa(p)
|
||||
host = s[:colon]
|
||||
s = s[:colon]
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(miek): this should take escaping into account.
|
||||
if len(host) > 255 {
|
||||
return "", "", nil, fmt.Errorf("specified zone is too long: %d > 255", len(host))
|
||||
if len(s) > 255 {
|
||||
return nil, "", fmt.Errorf("specified zone is too long: %d > 255", len(s))
|
||||
}
|
||||
|
||||
_, d := dns.IsDomainName(host)
|
||||
if !d {
|
||||
return "", "", nil, fmt.Errorf("zone is not a valid domain name: %s", host)
|
||||
if _, ok := dns.IsDomainName(s); !ok {
|
||||
return nil, "", fmt.Errorf("zone is not a valid domain name: %s", s)
|
||||
}
|
||||
|
||||
// Check if it parses as a reverse zone, if so we use that. Must be fully specified IP and mask.
|
||||
ip, n, err := net.ParseCIDR(host)
|
||||
ones, bits := 0, 0
|
||||
if err == nil {
|
||||
if rev, e := dns.ReverseAddr(ip.String()); e == nil {
|
||||
ones, bits = n.Mask.Size()
|
||||
// get the size, in bits, of each portion of hostname defined in the reverse address. (8 for IPv4, 4 for IPv6)
|
||||
sizeDigit := 8
|
||||
if len(n.IP) == net.IPv6len {
|
||||
sizeDigit = 4
|
||||
}
|
||||
// Get the first lower octet boundary to see what encompassing zone we should be authoritative for.
|
||||
mod := (bits - ones) % sizeDigit
|
||||
nearest := (bits - ones) + mod
|
||||
offset := 0
|
||||
var end bool
|
||||
for i := 0; i < nearest/sizeDigit; i++ {
|
||||
offset, end = dns.NextLabel(rev, offset)
|
||||
if end {
|
||||
break
|
||||
}
|
||||
}
|
||||
host = rev[offset:]
|
||||
}
|
||||
_, n, err := net.ParseCIDR(s)
|
||||
if err != nil {
|
||||
return []string{s}, port, nil
|
||||
}
|
||||
return host, port, n, nil
|
||||
|
||||
if s[0] == ':' || (s[0] == '0' && strings.Contains(s, ":")) {
|
||||
return nil, "", fmt.Errorf("invalid CIDR %s", s)
|
||||
}
|
||||
|
||||
// now check if multiple hosts must be returned.
|
||||
nets := cidr.Split(n)
|
||||
hosts = cidr.Reverse(nets)
|
||||
return hosts, port, nil
|
||||
}
|
||||
|
||||
// OriginsFromArgsOrServerBlock returns the normalized args if that slice
|
||||
// is not empty, otherwise the serverblock slice is returned (in a newly copied slice).
|
||||
func OriginsFromArgsOrServerBlock(args, serverblock []string) []string {
|
||||
if len(args) == 0 {
|
||||
s := make([]string, len(serverblock))
|
||||
copy(s, serverblock)
|
||||
for i := range s {
|
||||
s[i] = Host(s[i]).NormalizeExact()[0] // expansion of these already happened in dnsserver/register.go
|
||||
}
|
||||
return s
|
||||
}
|
||||
s := []string{}
|
||||
for i := range args {
|
||||
sx := Host(args[i]).NormalizeExact()
|
||||
if len(sx) == 0 {
|
||||
continue // silently ignores errors.
|
||||
}
|
||||
s = append(s, sx...)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
|
|
@ -45,9 +45,10 @@ func New(size int) *Cache {
|
|||
}
|
||||
|
||||
// Add adds a new element to the cache. If the element already exists it is overwritten.
|
||||
func (c *Cache) Add(key uint64, el interface{}) {
|
||||
// Returns true if an existing element was evicted to make room for this element.
|
||||
func (c *Cache) Add(key uint64, el interface{}) bool {
|
||||
shard := key & (shardSize - 1)
|
||||
c.shards[shard].Add(key, el)
|
||||
return c.shards[shard].Add(key, el)
|
||||
}
|
||||
|
||||
// Get looks up element index under key.
|
||||
|
@ -71,22 +72,33 @@ func (c *Cache) Len() int {
|
|||
return l
|
||||
}
|
||||
|
||||
// Walk walks each shard in the cache.
|
||||
func (c *Cache) Walk(f func(map[uint64]interface{}, uint64) bool) {
|
||||
for _, s := range c.shards {
|
||||
s.Walk(f)
|
||||
}
|
||||
}
|
||||
|
||||
// newShard returns a new shard with size.
|
||||
func newShard(size int) *shard { return &shard{items: make(map[uint64]interface{}), size: size} }
|
||||
|
||||
// Add adds element indexed by key into the cache. Any existing element is overwritten
|
||||
func (s *shard) Add(key uint64, el interface{}) {
|
||||
// Returns true if an existing element was evicted to make room for this element.
|
||||
func (s *shard) Add(key uint64, el interface{}) bool {
|
||||
eviction := false
|
||||
s.Lock()
|
||||
if len(s.items) >= s.size {
|
||||
if _, ok := s.items[key]; !ok {
|
||||
for k := range s.items {
|
||||
delete(s.items, k)
|
||||
eviction = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
s.items[key] = el
|
||||
s.Unlock()
|
||||
return eviction
|
||||
}
|
||||
|
||||
// Remove removes the element indexed by key from the cache.
|
||||
|
@ -122,4 +134,24 @@ func (s *shard) Len() int {
|
|||
return l
|
||||
}
|
||||
|
||||
// Walk walks the shard for each element the function f is executed while holding a write lock.
|
||||
func (s *shard) Walk(f func(map[uint64]interface{}, uint64) bool) {
|
||||
s.RLock()
|
||||
items := make([]uint64, len(s.items))
|
||||
i := 0
|
||||
for k := range s.items {
|
||||
items[i] = k
|
||||
i++
|
||||
}
|
||||
s.RUnlock()
|
||||
for _, k := range items {
|
||||
s.Lock()
|
||||
ok := f(s.items, k)
|
||||
s.Unlock()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const shardSize = 256
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
// Package cidr contains functions that deal with classless reverse zones in the DNS.
|
||||
package cidr
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/apparentlymart/go-cidr/cidr"
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// Split returns a slice of non-overlapping subnets that in union equal the subnet n,
|
||||
// and where each subnet falls on a reverse name segment boundary.
|
||||
// for ipv4 this is any multiple of 8 bits (/8, /16, /24 or /32)
|
||||
// for ipv6 this is any multiple of 4 bits
|
||||
func Split(n *net.IPNet) []string {
|
||||
boundary := 8
|
||||
nstr := n.String()
|
||||
if strings.Contains(nstr, ":") {
|
||||
boundary = 4
|
||||
}
|
||||
ones, _ := n.Mask.Size()
|
||||
if ones%boundary == 0 {
|
||||
return []string{n.String()}
|
||||
}
|
||||
|
||||
mask := int(math.Ceil(float64(ones)/float64(boundary))) * boundary
|
||||
networks := nets(n, mask)
|
||||
cidrs := make([]string, len(networks))
|
||||
for i := range networks {
|
||||
cidrs[i] = networks[i].String()
|
||||
}
|
||||
return cidrs
|
||||
}
|
||||
|
||||
// nets return a slice of prefixes with the desired mask subnetted from original network.
|
||||
func nets(network *net.IPNet, newPrefixLen int) []*net.IPNet {
|
||||
prefixLen, _ := network.Mask.Size()
|
||||
maxSubnets := int(math.Exp2(float64(newPrefixLen)) / math.Exp2(float64(prefixLen)))
|
||||
nets := []*net.IPNet{{network.IP, net.CIDRMask(newPrefixLen, 8*len(network.IP))}}
|
||||
|
||||
for i := 1; i < maxSubnets; i++ {
|
||||
next, exceeds := cidr.NextSubnet(nets[len(nets)-1], newPrefixLen)
|
||||
nets = append(nets, next)
|
||||
if exceeds {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nets
|
||||
}
|
||||
|
||||
// Reverse return the reverse zones that are authoritative for each net in ns.
|
||||
func Reverse(nets []string) []string {
|
||||
rev := make([]string, len(nets))
|
||||
for i := range nets {
|
||||
ip, n, _ := net.ParseCIDR(nets[i])
|
||||
r, err1 := dns.ReverseAddr(ip.String())
|
||||
if err1 != nil {
|
||||
continue
|
||||
}
|
||||
ones, bits := n.Mask.Size()
|
||||
// get the size, in bits, of each portion of hostname defined in the reverse address. (8 for IPv4, 4 for IPv6)
|
||||
sizeDigit := 8
|
||||
if len(n.IP) == net.IPv6len {
|
||||
sizeDigit = 4
|
||||
}
|
||||
// Get the first lower octet boundary to see what encompassing zone we should be authoritative for.
|
||||
mod := (bits - ones) % sizeDigit
|
||||
nearest := (bits - ones) + mod
|
||||
offset := 0
|
||||
var end bool
|
||||
for i := 0; i < nearest/sizeDigit; i++ {
|
||||
offset, end = dns.NextLabel(r, offset)
|
||||
if end {
|
||||
break
|
||||
}
|
||||
}
|
||||
rev[i] = r[offset:]
|
||||
}
|
||||
return rev
|
||||
}
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
|
@ -50,7 +49,6 @@ func NewRequest(method, url string, m *dns.Msg) (*http.Request, error) {
|
|||
default:
|
||||
return nil, fmt.Errorf("method not allowed: %s", method)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ResponseToMsg converts a http.Response to a dns message.
|
||||
|
@ -72,7 +70,6 @@ func RequestToMsg(req *http.Request) (*dns.Msg, error) {
|
|||
default:
|
||||
return nil, fmt.Errorf("method not allowed: %s", req.Method)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// requestToMsgPost extracts the dns message from the request body.
|
||||
|
@ -95,7 +92,7 @@ func requestToMsgGet(req *http.Request) (*dns.Msg, error) {
|
|||
}
|
||||
|
||||
func toMsg(r io.ReadCloser) (*dns.Msg, error) {
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ package log
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
golog "log"
|
||||
"os"
|
||||
"sync"
|
||||
|
@ -102,7 +102,7 @@ func Fatal(v ...interface{}) { log(fatal, v...); os.Exit(1) }
|
|||
func Fatalf(format string, v ...interface{}) { logf(fatal, format, v...); os.Exit(1) }
|
||||
|
||||
// Discard sets the log output to /dev/null.
|
||||
func Discard() { golog.SetOutput(ioutil.Discard) }
|
||||
func Discard() { golog.SetOutput(io.Discard) }
|
||||
|
||||
const (
|
||||
debug = "[DEBUG] "
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package parse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
@ -11,6 +12,9 @@ import (
|
|||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// ErrNoNameservers is returned by HostPortOrFile if no servers can be parsed.
|
||||
var ErrNoNameservers = errors.New("no nameservers found")
|
||||
|
||||
// Strips the zone, but preserves any port that comes after the zone
|
||||
func stripZone(host string) string {
|
||||
if strings.Contains(host, "%") {
|
||||
|
@ -70,7 +74,7 @@ func HostPortOrFile(s ...string) ([]string, error) {
|
|||
servers = append(servers, h)
|
||||
}
|
||||
if len(servers) == 0 {
|
||||
return servers, fmt.Errorf("no nameservers found")
|
||||
return servers, ErrNoNameservers
|
||||
}
|
||||
return servers, nil
|
||||
}
|
||||
|
|
|
@ -4,46 +4,35 @@ package parse
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/coredns/caddy"
|
||||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
|
||||
"github.com/caddyserver/caddy"
|
||||
)
|
||||
|
||||
// Transfer parses transfer statements: 'transfer [to|from] [address...]'.
|
||||
func Transfer(c *caddy.Controller, secondary bool) (tos, froms []string, err error) {
|
||||
// TransferIn parses transfer statements: 'transfer from [address...]'.
|
||||
func TransferIn(c *caddy.Controller) (froms []string, err error) {
|
||||
if !c.NextArg() {
|
||||
return nil, nil, c.ArgErr()
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
value := c.Val()
|
||||
switch value {
|
||||
case "to":
|
||||
tos = c.RemainingArgs()
|
||||
for i := range tos {
|
||||
if tos[i] != "*" {
|
||||
normalized, err := HostPort(tos[i], transport.Port)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tos[i] = normalized
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, c.Errf("unknown property %s", value)
|
||||
case "from":
|
||||
if !secondary {
|
||||
return nil, nil, fmt.Errorf("can't use `transfer from` when not being a secondary")
|
||||
}
|
||||
froms = c.RemainingArgs()
|
||||
if len(froms) == 0 {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
for i := range froms {
|
||||
if froms[i] != "*" {
|
||||
normalized, err := HostPort(froms[i], transport.Port)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
froms[i] = normalized
|
||||
} else {
|
||||
return nil, nil, fmt.Errorf("can't use '*' in transfer from")
|
||||
return nil, fmt.Errorf("can't use '*' in transfer from")
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
return froms, nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package trace
|
|||
|
||||
import (
|
||||
"github.com/coredns/coredns/plugin"
|
||||
|
||||
ot "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ func (f HandlerFunc) Name() string { return "handlerfunc" }
|
|||
// Error returns err with 'plugin/name: ' prefixed to it.
|
||||
func Error(name string, err error) error { return fmt.Errorf("%s/%s: %s", "plugin", name, err) }
|
||||
|
||||
// NextOrFailure calls next.ServeDNS when next is not nil, otherwise it will return, a ServerFailure and a nil error.
|
||||
// NextOrFailure calls next.ServeDNS when next is not nil, otherwise it will return, a ServerFailure and a `no next plugin found` error.
|
||||
func NextOrFailure(name string, next Handler, ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { // nolint: golint
|
||||
if next != nil {
|
||||
if span := ot.SpanFromContext(ctx); span != nil {
|
||||
|
@ -105,5 +105,8 @@ const Namespace = "coredns"
|
|||
// TimeBuckets is based on Prometheus client_golang prometheus.DefBuckets
|
||||
var TimeBuckets = prometheus.ExponentialBuckets(0.00025, 2, 16) // from 0.25ms to 8 seconds
|
||||
|
||||
// SlimTimeBuckets is low cardinality set of duration buckets.
|
||||
var SlimTimeBuckets = prometheus.ExponentialBuckets(0.00025, 10, 5) // from 0.25ms to 2.5 seconds
|
||||
|
||||
// ErrOnce is returned when a plugin doesn't support multiple setups per server.
|
||||
var ErrOnce = errors.New("this plugin can only be used once per Server Block")
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package plugin
|
||||
|
||||
import "github.com/caddyserver/caddy"
|
||||
import "github.com/coredns/caddy"
|
||||
|
||||
// Register registers your plugin with CoreDNS and allows it to be called when the server is running.
|
||||
func Register(name string, action caddy.SetupFunc) {
|
||||
|
|
|
@ -1,18 +1,17 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// TempFile will create a temporary file on disk and returns the name and a cleanup function to remove it later.
|
||||
func TempFile(dir, content string) (string, func(), error) {
|
||||
f, err := ioutil.TempFile(dir, "go-test-tmpfile")
|
||||
f, err := os.CreateTemp(dir, "go-test-tmpfile")
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if err := ioutil.WriteFile(f.Name(), []byte(content), 0644); err != nil {
|
||||
if err := os.WriteFile(f.Name(), []byte(content), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
rmFunc := func() { os.Remove(f.Name()) }
|
||||
|
@ -21,7 +20,7 @@ func TempFile(dir, content string) (string, func(), error) {
|
|||
|
||||
// WritePEMFiles creates a tmp dir with ca.pem, cert.pem, and key.pem and the func to remove it
|
||||
func WritePEMFiles(dir string) (string, func(), error) {
|
||||
tempDir, err := ioutil.TempDir(dir, "go-test-pemfiles")
|
||||
tempDir, err := os.MkdirTemp(dir, "go-test-pemfiles")
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -45,7 +44,7 @@ xGbtCkhVk2VQ+BiCWnjYXJ6ZMzabP7wiOFDP9Pvr2ik22PRItsW/TLfHFXM1jDmc
|
|||
I1rs/VUGKzcJGVIWbHrgjP68CTStGAvKgbsTqw7aLXTSqtPw88N9XVSyRg==
|
||||
-----END CERTIFICATE-----`
|
||||
path := filepath.Join(tempDir, "ca.pem")
|
||||
if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
if err := os.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
data = `-----BEGIN CERTIFICATE-----
|
||||
|
@ -66,7 +65,7 @@ zhDEPP4FhY+Sz+y1yWirphl7A1aZwhXVPcfWIGqpQ3jzNwUeocbH27kuLh+U4hQo
|
|||
qeg10RdFnw==
|
||||
-----END CERTIFICATE-----`
|
||||
path = filepath.Join(tempDir, "cert.pem")
|
||||
if err = ioutil.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
if err = os.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
|
@ -98,7 +97,7 @@ E/WObVJXDnBdViu0L9abE9iaTToBVri4cmlDlZagLuKVR+TFTCN/DSlVZTDkqkLI
|
|||
8chzqtkH6b2b2R73hyRysWjsomys34ma3mEEPTX/aXeAF2MSZ/EWT9yL
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
path = filepath.Join(tempDir, "key.pem")
|
||||
if err = ioutil.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
if err = os.WriteFile(path, []byte(data), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -29,14 +29,15 @@ func (p RRSet) Less(i, j int) bool { return p[i].String() < p[j].String() }
|
|||
// Case represents a test case that encapsulates various data from a query and response.
|
||||
// Note that is the TTL of a record is 303 we don't compare it with the TTL.
|
||||
type Case struct {
|
||||
Qname string
|
||||
Qtype uint16
|
||||
Rcode int
|
||||
Do bool
|
||||
Answer []dns.RR
|
||||
Ns []dns.RR
|
||||
Extra []dns.RR
|
||||
Error error
|
||||
Qname string
|
||||
Qtype uint16
|
||||
Rcode int
|
||||
Do bool
|
||||
AuthenticatedData bool
|
||||
Answer []dns.RR
|
||||
Ns []dns.RR
|
||||
Extra []dns.RR
|
||||
Error error
|
||||
}
|
||||
|
||||
// Msg returns a *dns.Msg embedded in c.
|
||||
|
@ -99,6 +100,9 @@ func DNSKEY(rr string) *dns.DNSKEY { r, _ := dns.NewRR(rr); return r.(*dns.DNSKE
|
|||
// DS returns a DS record from rr. It panics on errors.
|
||||
func DS(rr string) *dns.DS { r, _ := dns.NewRR(rr); return r.(*dns.DS) }
|
||||
|
||||
// NAPTR returns a NAPTR record from rr. It panics on errors.
|
||||
func NAPTR(rr string) *dns.NAPTR { r, _ := dns.NewRR(rr); return r.(*dns.NAPTR) }
|
||||
|
||||
// OPT returns an OPT record with UDP buffer size set to bufsize and the DO bit set to do.
|
||||
func OPT(bufsize int, do bool) *dns.OPT {
|
||||
o := new(dns.OPT)
|
||||
|
@ -112,7 +116,7 @@ func OPT(bufsize int, do bool) *dns.OPT {
|
|||
return o
|
||||
}
|
||||
|
||||
// Header test if the header in resp matches the header as defined in tc.
|
||||
// Header tests if the header in resp matches the header as defined in tc.
|
||||
func Header(tc Case, resp *dns.Msg) error {
|
||||
if resp.Rcode != tc.Rcode {
|
||||
return fmt.Errorf("rcode is %q, expected %q", dns.RcodeToString[resp.Rcode], dns.RcodeToString[tc.Rcode])
|
||||
|
@ -248,7 +252,7 @@ func Section(tc Case, sec sect, rr []dns.RR) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// CNAMEOrder makes sure that CNAMES do not appear after their target records
|
||||
// CNAMEOrder makes sure that CNAMES do not appear after their target records.
|
||||
func CNAMEOrder(res *dns.Msg) error {
|
||||
for i, c := range res.Answer {
|
||||
if c.Header().Rrtype != dns.TypeCNAME {
|
||||
|
|
|
@ -38,22 +38,22 @@ func (t *ResponseWriter) RemoteAddr() net.Addr {
|
|||
return &net.UDPAddr{IP: ip, Port: port, Zone: ""}
|
||||
}
|
||||
|
||||
// WriteMsg implement dns.ResponseWriter interface.
|
||||
// WriteMsg implements dns.ResponseWriter interface.
|
||||
func (t *ResponseWriter) WriteMsg(m *dns.Msg) error { return nil }
|
||||
|
||||
// Write implement dns.ResponseWriter interface.
|
||||
// Write implements dns.ResponseWriter interface.
|
||||
func (t *ResponseWriter) Write(buf []byte) (int, error) { return len(buf), nil }
|
||||
|
||||
// Close implement dns.ResponseWriter interface.
|
||||
// Close implements dns.ResponseWriter interface.
|
||||
func (t *ResponseWriter) Close() error { return nil }
|
||||
|
||||
// TsigStatus implement dns.ResponseWriter interface.
|
||||
// TsigStatus implements dns.ResponseWriter interface.
|
||||
func (t *ResponseWriter) TsigStatus() error { return nil }
|
||||
|
||||
// TsigTimersOnly implement dns.ResponseWriter interface.
|
||||
// TsigTimersOnly implements dns.ResponseWriter interface.
|
||||
func (t *ResponseWriter) TsigTimersOnly(bool) {}
|
||||
|
||||
// Hijack implement dns.ResponseWriter interface.
|
||||
// Hijack implements dns.ResponseWriter interface.
|
||||
func (t *ResponseWriter) Hijack() {}
|
||||
|
||||
// ResponseWriter6 returns fixed client and remote address in IPv6. The remote
|
||||
|
|
|
@ -77,7 +77,7 @@ func Scrape(url string) []*MetricFamily {
|
|||
return result
|
||||
}
|
||||
|
||||
// ScrapeMetricAsInt provide a sum of all metrics collected for the name and label provided.
|
||||
// ScrapeMetricAsInt provides a sum of all metrics collected for the name and label provided.
|
||||
// if the metric is not a numeric value, it will be counted a 0.
|
||||
func ScrapeMetricAsInt(addr string, name string, label string, nometricvalue int) int {
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ func (r *Request) Family() int {
|
|||
return 2
|
||||
}
|
||||
|
||||
// Do returns if the request has the DO (DNSSEC OK) bit set.
|
||||
// Do returns true if the request has the DO (DNSSEC OK) bit set.
|
||||
func (r *Request) Do() bool {
|
||||
if r.size != 0 {
|
||||
return r.do
|
||||
|
@ -338,6 +338,8 @@ func (r *Request) Clear() {
|
|||
r.port = ""
|
||||
r.localPort = ""
|
||||
r.family = 0
|
||||
r.size = 0
|
||||
r.do = false
|
||||
}
|
||||
|
||||
// Match checks if the reply matches the qname and qtype from the request, it returns
|
||||
|
|
|
@ -26,8 +26,8 @@ var (
|
|||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space[:])
|
||||
h.Write(data)
|
||||
h.Write(space[:]) //nolint:errcheck
|
||||
h.Write(data) //nolint:errcheck
|
||||
s := h.Sum(nil)
|
||||
var uuid UUID
|
||||
copy(uuid[:], s)
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
// Copyright 2021 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
// NullUUID represents a UUID that may be null.
|
||||
// NullUUID implements the SQL driver.Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var u uuid.NullUUID
|
||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||
// ...
|
||||
// if u.Valid {
|
||||
// // use u.UUID
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
type NullUUID struct {
|
||||
UUID UUID
|
||||
Valid bool // Valid is true if UUID is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the SQL driver.Scanner interface.
|
||||
func (nu *NullUUID) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
nu.UUID, nu.Valid = Nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
err := nu.UUID.Scan(value)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nu NullUUID) Value() (driver.Value, error) {
|
||||
if !nu.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// Delegate to UUID Value function
|
||||
return nu.UUID.Value()
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID[:], nil
|
||||
}
|
||||
|
||||
return []byte(nil), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(nu.UUID[:], data)
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID.MarshalText()
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
nu.UUID = id
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return json.Marshal(nu.UUID)
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||
if bytes.Equal(data, jsonNull) {
|
||||
*nu = NullUUID{}
|
||||
return nil // valid null UUID
|
||||
}
|
||||
err := json.Unmarshal(data, &nu.UUID)
|
||||
nu.Valid = err == nil
|
||||
return err
|
||||
}
|
|
@ -9,7 +9,7 @@ import (
|
|||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
|
@ -33,7 +34,27 @@ const (
|
|||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
const randPoolSize = 16 * 16
|
||||
|
||||
var (
|
||||
rander = rand.Reader // random function
|
||||
poolEnabled = false
|
||||
poolMu sync.Mutex
|
||||
poolPos = randPoolSize // protected with poolMu
|
||||
pool [randPoolSize]byte // protected with poolMu
|
||||
)
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||
func IsInvalidLengthError(err error) bool {
|
||||
_, ok := err.(invalidLengthError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
|
@ -68,7 +89,7 @@ func Parse(s string) (UUID, error) {
|
|||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
|
||||
return uuid, invalidLengthError{len(s)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
|
@ -112,7 +133,7 @@ func ParseBytes(b []byte) (UUID, error) {
|
|||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
|
||||
return uuid, invalidLengthError{len(b)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
|
@ -243,3 +264,31 @@ func SetRand(r io.Reader) {
|
|||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// EnableRandPool enables internal randomness pool used for Random
|
||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||
// the random number generator on demand in batches. Enabling the pool
|
||||
// may improve the UUID generation throughput significantly.
|
||||
//
|
||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||
// for security sensitive applications.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func EnableRandPool() {
|
||||
poolEnabled = true
|
||||
}
|
||||
|
||||
// DisableRandPool disables the randomness pool if it was previously
|
||||
// enabled with EnableRandPool.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func DisableRandPool() {
|
||||
poolEnabled = false
|
||||
defer poolMu.Unlock()
|
||||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
|
|
|
@ -14,11 +14,21 @@ func New() UUID {
|
|||
return Must(NewRandom())
|
||||
}
|
||||
|
||||
// NewString creates a new random UUID and returns it as a string or panics.
|
||||
// NewString is equivalent to the expression
|
||||
//
|
||||
// uuid.New().String()
|
||||
func NewString() string {
|
||||
return Must(NewRandom()).String()
|
||||
}
|
||||
|
||||
// NewRandom returns a Random (Version 4) UUID.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
|
@ -27,7 +37,10 @@ func New() UUID {
|
|||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
return NewRandomFromReader(rander)
|
||||
if !poolEnabled {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
return newRandomFromPool()
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
|
@ -41,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) {
|
|||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func newRandomFromPool() (UUID, error) {
|
||||
var uuid UUID
|
||||
poolMu.Lock()
|
||||
if poolPos == randPoolSize {
|
||||
_, err := io.ReadFull(rander, pool[:])
|
||||
if err != nil {
|
||||
poolMu.Unlock()
|
||||
return Nil, err
|
||||
}
|
||||
poolPos = 0
|
||||
}
|
||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||
poolPos += 16
|
||||
poolMu.Unlock()
|
||||
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
# gorilla/mux
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
|
||||
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
|
||||
[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux)
|
||||
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
|
||||
|
||||
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
|
||||
![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png)
|
||||
|
||||
https://www.gorillatoolkit.org/pkg/mux
|
||||
|
||||
|
@ -26,6 +25,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
|
|||
* [Examples](#examples)
|
||||
* [Matching Routes](#matching-routes)
|
||||
* [Static Files](#static-files)
|
||||
* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.)
|
||||
* [Registered URLs](#registered-urls)
|
||||
* [Walking Routes](#walking-routes)
|
||||
* [Graceful Shutdown](#graceful-shutdown)
|
||||
|
@ -212,6 +212,93 @@ func main() {
|
|||
}
|
||||
```
|
||||
|
||||
### Serving Single Page Applications
|
||||
|
||||
Most of the time it makes sense to serve your SPA on a separate web server from your API,
|
||||
but sometimes it's desirable to serve them both from one place. It's possible to write a simple
|
||||
handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage
|
||||
mux's powerful routing for your API endpoints.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// spaHandler implements the http.Handler interface, so we can use it
|
||||
// to respond to HTTP requests. The path to the static directory and
|
||||
// path to the index file within that static directory are used to
|
||||
// serve the SPA in the given static directory.
|
||||
type spaHandler struct {
|
||||
staticPath string
|
||||
indexPath string
|
||||
}
|
||||
|
||||
// ServeHTTP inspects the URL path to locate a file within the static dir
|
||||
// on the SPA handler. If a file is found, it will be served. If not, the
|
||||
// file located at the index path on the SPA handler will be served. This
|
||||
// is suitable behavior for serving an SPA (single page application).
|
||||
func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// get the absolute path to prevent directory traversal
|
||||
path, err := filepath.Abs(r.URL.Path)
|
||||
if err != nil {
|
||||
// if we failed to get the absolute path respond with a 400 bad request
|
||||
// and stop
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// prepend the path with the path to the static directory
|
||||
path = filepath.Join(h.staticPath, path)
|
||||
|
||||
// check whether a file exists at the given path
|
||||
_, err = os.Stat(path)
|
||||
if os.IsNotExist(err) {
|
||||
// file does not exist, serve index.html
|
||||
http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
|
||||
return
|
||||
} else if err != nil {
|
||||
// if we got an error (that wasn't that the file doesn't exist) stating the
|
||||
// file, return a 500 internal server error and stop
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise, use http.FileServer to serve the static dir
|
||||
http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func main() {
|
||||
router := mux.NewRouter()
|
||||
|
||||
router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
|
||||
// an example API handler
|
||||
json.NewEncoder(w).Encode(map[string]bool{"ok": true})
|
||||
})
|
||||
|
||||
spa := spaHandler{staticPath: "build", indexPath: "index.html"}
|
||||
router.PathPrefix("/").Handler(spa)
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: router,
|
||||
Addr: "127.0.0.1:8000",
|
||||
// Good practice: enforce timeouts for servers you create!
|
||||
WriteTimeout: 15 * time.Second,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
log.Fatal(srv.ListenAndServe())
|
||||
}
|
||||
```
|
||||
|
||||
### Registered URLs
|
||||
|
||||
Now let's see how to build registered URLs.
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
package mux
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func contextGet(r *http.Request, key interface{}) interface{} {
|
||||
return r.Context().Value(key)
|
||||
}
|
||||
|
||||
func contextSet(r *http.Request, key, val interface{}) *http.Request {
|
||||
if val == nil {
|
||||
return r
|
||||
}
|
||||
|
||||
return r.WithContext(context.WithValue(r.Context(), key, val))
|
||||
}
|
|
@ -58,22 +58,17 @@ func CORSMethodMiddleware(r *Router) MiddlewareFunc {
|
|||
func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
|
||||
var allMethods []string
|
||||
|
||||
err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
|
||||
for _, m := range route.matchers {
|
||||
if _, ok := m.(*routeRegexp); ok {
|
||||
if m.Match(req, &RouteMatch{}) {
|
||||
methods, err := route.GetMethods()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allMethods = append(allMethods, methods...)
|
||||
}
|
||||
break
|
||||
for _, route := range r.routes {
|
||||
var match RouteMatch
|
||||
if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch {
|
||||
methods, err := route.GetMethods()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return allMethods, err
|
||||
allMethods = append(allMethods, methods...)
|
||||
}
|
||||
}
|
||||
|
||||
return allMethods, nil
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
package mux
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
@ -58,8 +59,7 @@ type Router struct {
|
|||
|
||||
// If true, do not clear the request context after handling the request.
|
||||
//
|
||||
// Deprecated: No effect when go1.7+ is used, since the context is stored
|
||||
// on the request itself.
|
||||
// Deprecated: No effect, since the context is stored on the request itself.
|
||||
KeepContext bool
|
||||
|
||||
// Slice of middlewares to be called after a match is found
|
||||
|
@ -111,10 +111,8 @@ func copyRouteConf(r routeConf) routeConf {
|
|||
c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
|
||||
}
|
||||
|
||||
c.matchers = make([]matcher, 0, len(r.matchers))
|
||||
for _, m := range r.matchers {
|
||||
c.matchers = append(c.matchers, m)
|
||||
}
|
||||
c.matchers = make([]matcher, len(r.matchers))
|
||||
copy(c.matchers, r.matchers)
|
||||
|
||||
return c
|
||||
}
|
||||
|
@ -197,8 +195,8 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||
var handler http.Handler
|
||||
if r.Match(req, &match) {
|
||||
handler = match.Handler
|
||||
req = setVars(req, match.Vars)
|
||||
req = setCurrentRoute(req, match.Route)
|
||||
req = requestWithVars(req, match.Vars)
|
||||
req = requestWithRoute(req, match.Route)
|
||||
}
|
||||
|
||||
if handler == nil && match.MatchErr == ErrMethodMismatch {
|
||||
|
@ -428,7 +426,7 @@ const (
|
|||
|
||||
// Vars returns the route variables for the current request, if any.
|
||||
func Vars(r *http.Request) map[string]string {
|
||||
if rv := contextGet(r, varsKey); rv != nil {
|
||||
if rv := r.Context().Value(varsKey); rv != nil {
|
||||
return rv.(map[string]string)
|
||||
}
|
||||
return nil
|
||||
|
@ -437,21 +435,22 @@ func Vars(r *http.Request) map[string]string {
|
|||
// CurrentRoute returns the matched route for the current request, if any.
|
||||
// This only works when called inside the handler of the matched route
|
||||
// because the matched route is stored in the request context which is cleared
|
||||
// after the handler returns, unless the KeepContext option is set on the
|
||||
// Router.
|
||||
// after the handler returns.
|
||||
func CurrentRoute(r *http.Request) *Route {
|
||||
if rv := contextGet(r, routeKey); rv != nil {
|
||||
if rv := r.Context().Value(routeKey); rv != nil {
|
||||
return rv.(*Route)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setVars(r *http.Request, val interface{}) *http.Request {
|
||||
return contextSet(r, varsKey, val)
|
||||
func requestWithVars(r *http.Request, vars map[string]string) *http.Request {
|
||||
ctx := context.WithValue(r.Context(), varsKey, vars)
|
||||
return r.WithContext(ctx)
|
||||
}
|
||||
|
||||
func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
|
||||
return contextSet(r, routeKey, val)
|
||||
func requestWithRoute(r *http.Request, route *Route) *http.Request {
|
||||
ctx := context.WithValue(r.Context(), routeKey, route)
|
||||
return r.WithContext(ctx)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
|
|
@ -181,21 +181,21 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
|
|||
}
|
||||
}
|
||||
return r.regexp.MatchString(host)
|
||||
} else {
|
||||
if r.regexpType == regexpTypeQuery {
|
||||
return r.matchQueryString(req)
|
||||
}
|
||||
path := req.URL.Path
|
||||
if r.options.useEncodedPath {
|
||||
path = req.URL.EscapedPath()
|
||||
}
|
||||
return r.regexp.MatchString(path)
|
||||
}
|
||||
|
||||
if r.regexpType == regexpTypeQuery {
|
||||
return r.matchQueryString(req)
|
||||
}
|
||||
path := req.URL.Path
|
||||
if r.options.useEncodedPath {
|
||||
path = req.URL.EscapedPath()
|
||||
}
|
||||
return r.regexp.MatchString(path)
|
||||
}
|
||||
|
||||
// url builds a URL part using the given values.
|
||||
func (r *routeRegexp) url(values map[string]string) (string, error) {
|
||||
urlValues := make([]interface{}, len(r.varsN))
|
||||
urlValues := make([]interface{}, len(r.varsN), len(r.varsN))
|
||||
for k, v := range r.varsN {
|
||||
value, ok := values[v]
|
||||
if !ok {
|
||||
|
@ -230,14 +230,51 @@ func (r *routeRegexp) getURLQuery(req *http.Request) string {
|
|||
return ""
|
||||
}
|
||||
templateKey := strings.SplitN(r.template, "=", 2)[0]
|
||||
for key, vals := range req.URL.Query() {
|
||||
if key == templateKey && len(vals) > 0 {
|
||||
return key + "=" + vals[0]
|
||||
}
|
||||
val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey)
|
||||
if ok {
|
||||
return templateKey + "=" + val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0].
|
||||
// If key was not found, empty string and false is returned.
|
||||
func findFirstQueryKey(rawQuery, key string) (value string, ok bool) {
|
||||
query := []byte(rawQuery)
|
||||
for len(query) > 0 {
|
||||
foundKey := query
|
||||
if i := bytes.IndexAny(foundKey, "&;"); i >= 0 {
|
||||
foundKey, query = foundKey[:i], foundKey[i+1:]
|
||||
} else {
|
||||
query = query[:0]
|
||||
}
|
||||
if len(foundKey) == 0 {
|
||||
continue
|
||||
}
|
||||
var value []byte
|
||||
if i := bytes.IndexByte(foundKey, '='); i >= 0 {
|
||||
foundKey, value = foundKey[:i], foundKey[i+1:]
|
||||
}
|
||||
if len(foundKey) < len(key) {
|
||||
// Cannot possibly be key.
|
||||
continue
|
||||
}
|
||||
keyString, err := url.QueryUnescape(string(foundKey))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if keyString != key {
|
||||
continue
|
||||
}
|
||||
valueString, err := url.QueryUnescape(string(value))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return valueString, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (r *routeRegexp) matchQueryString(req *http.Request) bool {
|
||||
return r.regexp.MatchString(r.getURLQuery(req))
|
||||
}
|
||||
|
@ -288,6 +325,12 @@ func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
|
|||
// Store host variables.
|
||||
if v.host != nil {
|
||||
host := getHost(req)
|
||||
if v.host.wildcardHostPort {
|
||||
// Don't be strict on the port match
|
||||
if i := strings.Index(host, ":"); i != -1 {
|
||||
host = host[:i]
|
||||
}
|
||||
}
|
||||
matches := v.host.regexp.FindStringSubmatchIndex(host)
|
||||
if len(matches) > 0 {
|
||||
extractVars(host, matches, v.host.varsN, m.Vars)
|
||||
|
|
|
@ -74,7 +74,7 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if match.MatchErr == ErrMethodMismatch {
|
||||
if match.MatchErr == ErrMethodMismatch && r.handler != nil {
|
||||
// We found a route which matches request method, clear MatchErr
|
||||
match.MatchErr = nil
|
||||
// Then override the mis-matched handler
|
||||
|
@ -412,11 +412,30 @@ func (r *Route) Queries(pairs ...string) *Route {
|
|||
type schemeMatcher []string
|
||||
|
||||
func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
|
||||
return matchInArray(m, r.URL.Scheme)
|
||||
scheme := r.URL.Scheme
|
||||
// https://golang.org/pkg/net/http/#Request
|
||||
// "For [most] server requests, fields other than Path and RawQuery will be
|
||||
// empty."
|
||||
// Since we're an http muxer, the scheme is either going to be http or https
|
||||
// though, so we can just set it based on the tls termination state.
|
||||
if scheme == "" {
|
||||
if r.TLS == nil {
|
||||
scheme = "http"
|
||||
} else {
|
||||
scheme = "https"
|
||||
}
|
||||
}
|
||||
return matchInArray(m, scheme)
|
||||
}
|
||||
|
||||
// Schemes adds a matcher for URL schemes.
|
||||
// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
|
||||
// If the request's URL has a scheme set, it will be matched against.
|
||||
// Generally, the URL scheme will only be set if a previous handler set it,
|
||||
// such as the ProxyHeaders handler from gorilla/handlers.
|
||||
// If unset, the scheme will be determined based on the request's TLS
|
||||
// termination state.
|
||||
// The first argument to Schemes will be used when constructing a route URL.
|
||||
func (r *Route) Schemes(schemes ...string) *Route {
|
||||
for k, v := range schemes {
|
||||
schemes[k] = strings.ToLower(v)
|
||||
|
@ -493,8 +512,8 @@ func (r *Route) Subrouter() *Router {
|
|||
// This also works for host variables:
|
||||
//
|
||||
// r := mux.NewRouter()
|
||||
// r.Host("{subdomain}.domain.com").
|
||||
// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||
// Host("{subdomain}.domain.com").
|
||||
// Name("article")
|
||||
//
|
||||
// // url.String() will be "http://news.domain.com/articles/technology/42"
|
||||
|
@ -502,6 +521,13 @@ func (r *Route) Subrouter() *Router {
|
|||
// "category", "technology",
|
||||
// "id", "42")
|
||||
//
|
||||
// The scheme of the resulting url will be the first argument that was passed to Schemes:
|
||||
//
|
||||
// // url.String() will be "https://example.com"
|
||||
// r := mux.NewRouter()
|
||||
// url, err := r.Host("example.com")
|
||||
// .Schemes("https", "http").URL()
|
||||
//
|
||||
// All variables defined in the route are required, and their values must
|
||||
// conform to the corresponding patterns.
|
||||
func (r *Route) URL(pairs ...string) (*url.URL, error) {
|
||||
|
@ -635,7 +661,7 @@ func (r *Route) GetQueriesRegexp() ([]string, error) {
|
|||
if r.regexp.queries == nil {
|
||||
return nil, errors.New("mux: route doesn't have queries")
|
||||
}
|
||||
var queries []string
|
||||
queries := make([]string, 0, len(r.regexp.queries))
|
||||
for _, query := range r.regexp.queries {
|
||||
queries = append(queries, query.regexp.String())
|
||||
}
|
||||
|
@ -654,7 +680,7 @@ func (r *Route) GetQueriesTemplates() ([]string, error) {
|
|||
if r.regexp.queries == nil {
|
||||
return nil, errors.New("mux: route doesn't have queries")
|
||||
}
|
||||
var queries []string
|
||||
queries := make([]string, 0, len(r.regexp.queries))
|
||||
for _, query := range r.regexp.queries {
|
||||
queries = append(queries, query.template)
|
||||
}
|
||||
|
|
|
@ -15,5 +15,5 @@ import "net/http"
|
|||
// can be set by making a route that captures the required variables,
|
||||
// starting a server and sending the request to that server.
|
||||
func SetURLVars(r *http.Request, val map[string]string) *http.Request {
|
||||
return setVars(r, val)
|
||||
return requestWithVars(r, val)
|
||||
}
|
||||
|
|
|
@ -8,8 +8,6 @@
|
|||
|
||||
A high-performance 100% compatible drop-in replacement of "encoding/json"
|
||||
|
||||
You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
|
||||
|
||||
# Benchmark
|
||||
|
||||
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
|
||||
|
|
|
@ -288,6 +288,9 @@ non_decimal_loop:
|
|||
return iter.readFloat64SlowPath()
|
||||
}
|
||||
value = (value << 3) + (value << 1) + uint64(ind)
|
||||
if value > maxFloat64 {
|
||||
return iter.readFloat64SlowPath()
|
||||
}
|
||||
}
|
||||
}
|
||||
return iter.readFloat64SlowPath()
|
||||
|
|
|
@ -9,6 +9,7 @@ var intDigits []int8
|
|||
|
||||
const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
|
||||
const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
|
||||
const maxFloat64 = 1<<53 - 1
|
||||
|
||||
func init() {
|
||||
intDigits = make([]int8, 256)
|
||||
|
@ -339,7 +340,7 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) {
|
|||
}
|
||||
|
||||
func (iter *Iterator) assertInteger() {
|
||||
if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
|
||||
if iter.head < iter.tail && iter.buf[iter.head] == '.' {
|
||||
iter.ReportError("assertInteger", "can not decode float as int")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ func (iter *Iterator) ReadVal(obj interface{}) {
|
|||
decoder := iter.cfg.getDecoderFromCache(cacheKey)
|
||||
if decoder == nil {
|
||||
typ := reflect2.TypeOf(obj)
|
||||
if typ.Kind() != reflect.Ptr {
|
||||
if typ == nil || typ.Kind() != reflect.Ptr {
|
||||
iter.ReportError("ReadVal", "can only unmarshal into pointer")
|
||||
return
|
||||
}
|
||||
|
|
|
@ -33,11 +33,19 @@ type jsonRawMessageCodec struct {
|
|||
}
|
||||
|
||||
func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
|
||||
if iter.ReadNil() {
|
||||
*((*json.RawMessage)(ptr)) = nil
|
||||
} else {
|
||||
*((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes()
|
||||
}
|
||||
}
|
||||
|
||||
func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
|
||||
if *((*json.RawMessage)(ptr)) == nil {
|
||||
stream.WriteNil()
|
||||
} else {
|
||||
stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
|
||||
}
|
||||
}
|
||||
|
||||
func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
|
@ -48,11 +56,19 @@ type jsoniterRawMessageCodec struct {
|
|||
}
|
||||
|
||||
func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
|
||||
if iter.ReadNil() {
|
||||
*((*RawMessage)(ptr)) = nil
|
||||
} else {
|
||||
*((*RawMessage)(ptr)) = iter.SkipAndReturnBytes()
|
||||
}
|
||||
}
|
||||
|
||||
func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteRaw(string(*((*RawMessage)(ptr))))
|
||||
if *((*RawMessage)(ptr)) == nil {
|
||||
stream.WriteNil()
|
||||
} else {
|
||||
stream.WriteRaw(string(*((*RawMessage)(ptr))))
|
||||
}
|
||||
}
|
||||
|
||||
func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
|
|
|
@ -1075,6 +1075,11 @@ type stringModeNumberDecoder struct {
|
|||
}
|
||||
|
||||
func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if iter.WhatIsNext() == NilValue {
|
||||
decoder.elemDecoder.Decode(ptr, iter)
|
||||
return
|
||||
}
|
||||
|
||||
c := iter.nextToken()
|
||||
if c != '"' {
|
||||
iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- tip
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
script:
|
||||
- go generate ./... && test `git ls-files --modified | wc -l` = 0
|
||||
- go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
|
@ -1,7 +1,7 @@
|
|||
# Makefile for releasing.
|
||||
#
|
||||
# The release is controlled from version.go. The version found there is
|
||||
# used to tag the git repo, we're not building any artifects so there is nothing
|
||||
# used to tag the git repo, we're not building any artifacts so there is nothing
|
||||
# to upload to github.
|
||||
#
|
||||
# * Up the version in version.go
|
||||
|
|
|
@ -26,7 +26,6 @@ avoiding breaking changes wherever reasonable. We support the last two versions
|
|||
A not-so-up-to-date-list-that-may-be-actually-current:
|
||||
|
||||
* https://github.com/coredns/coredns
|
||||
* https://cloudflare.com
|
||||
* https://github.com/abh/geodns
|
||||
* https://github.com/baidu/bfe
|
||||
* http://www.statdns.com/
|
||||
|
@ -42,11 +41,9 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
|||
* https://github.com/StalkR/dns-reverse-proxy
|
||||
* https://github.com/tianon/rawdns
|
||||
* https://mesosphere.github.io/mesos-dns/
|
||||
* https://pulse.turbobytes.com/
|
||||
* https://github.com/fcambus/statzone
|
||||
* https://github.com/benschw/dns-clb-go
|
||||
* https://github.com/corny/dnscheck for <http://public-dns.info/>
|
||||
* https://namesmith.io
|
||||
* https://github.com/miekg/unbound
|
||||
* https://github.com/miekg/exdns
|
||||
* https://dnslookup.org
|
||||
|
@ -55,24 +52,31 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
|||
* https://github.com/mehrdadrad/mylg
|
||||
* https://github.com/bamarni/dockness
|
||||
* https://github.com/fffaraz/microdns
|
||||
* http://kelda.io
|
||||
* https://github.com/ipdcode/hades <https://jd.com>
|
||||
* https://github.com/StackExchange/dnscontrol/
|
||||
* https://www.dnsperf.com/
|
||||
* https://dnssectest.net/
|
||||
* https://dns.apebits.com
|
||||
* https://github.com/oif/apex
|
||||
* https://github.com/jedisct1/dnscrypt-proxy
|
||||
* https://github.com/jedisct1/rpdns
|
||||
* https://github.com/xor-gate/sshfp
|
||||
* https://github.com/rs/dnstrace
|
||||
* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
|
||||
* https://github.com/semihalev/sdns
|
||||
* https://render.com
|
||||
* https://github.com/peterzen/goresolver
|
||||
* https://github.com/folbricht/routedns
|
||||
* https://domainr.com/
|
||||
* https://zonedb.org/
|
||||
* https://router7.org/
|
||||
* https://github.com/fortio/dnsping
|
||||
* https://github.com/Luzilla/dnsbl_exporter
|
||||
* https://github.com/bodgit/tsig
|
||||
* https://github.com/v2fly/v2ray-core (test only)
|
||||
* https://kuma.io/
|
||||
* https://www.misaka.io/services/dns
|
||||
* https://ping.sx/dig
|
||||
* https://fleetdeck.io/
|
||||
|
||||
|
||||
Send pull request if you want to be listed here.
|
||||
|
||||
|
@ -169,6 +173,9 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
|||
* 7873 - Domain Name System (DNS) Cookies
|
||||
* 8080 - EdDSA for DNSSEC
|
||||
* 8499 - DNS Terminology
|
||||
* 8659 - DNS Certification Authority Authorization (CAA) Resource Record
|
||||
* 8914 - Extended DNS Errors
|
||||
* 8976 - Message Digest for DNS Zones (ZONEMD RR)
|
||||
|
||||
## Loosely Based Upon
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction
|
|||
//
|
||||
// * Zero bit isn't zero
|
||||
//
|
||||
// * has more than 1 question in the question section
|
||||
// * does not have exactly 1 question in the question section
|
||||
//
|
||||
// * has more than 1 RR in the Answer section
|
||||
//
|
||||
|
@ -25,6 +25,7 @@ var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
|
|||
// MsgAcceptAction represents the action to be taken.
|
||||
type MsgAcceptAction int
|
||||
|
||||
// Allowed returned values from a MsgAcceptFunc.
|
||||
const (
|
||||
MsgAccept MsgAcceptAction = iota // Accept the message
|
||||
MsgReject // Reject the message with a RcodeFormatError
|
||||
|
|
|
@ -23,6 +23,7 @@ type Conn struct {
|
|||
net.Conn // a net.Conn holding the connection
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
|
||||
tsigRequestMAC string
|
||||
}
|
||||
|
||||
|
@ -34,12 +35,13 @@ type Client struct {
|
|||
Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
|
||||
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
|
||||
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
|
||||
// Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
|
||||
// Client.Dialer) or context.Context.Deadline (see ExchangeContext)
|
||||
Timeout time.Duration
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
|
||||
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
||||
group singleflight
|
||||
}
|
||||
|
@ -80,6 +82,12 @@ func (c *Client) writeTimeout() time.Duration {
|
|||
|
||||
// Dial connects to the address on the named network.
|
||||
func (c *Client) Dial(address string) (conn *Conn, err error) {
|
||||
return c.DialContext(context.Background(), address)
|
||||
}
|
||||
|
||||
// DialContext connects to the address on the named network, with a context.Context.
|
||||
// For TLS over TCP (DoT) the context isn't used yet. This will be enabled when Go 1.18 is released.
|
||||
func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, err error) {
|
||||
// create a new dialer with the appropriate timeout
|
||||
var d net.Dialer
|
||||
if c.Dialer == nil {
|
||||
|
@ -99,14 +107,22 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
|
|||
if useTLS {
|
||||
network = strings.TrimSuffix(network, "-tls")
|
||||
|
||||
// TODO(miekg): Enable after Go 1.18 is released, to be able to support two prev. releases.
|
||||
/*
|
||||
tlsDialer := tls.Dialer{
|
||||
NetDialer: &d,
|
||||
Config: c.TLSConfig,
|
||||
}
|
||||
conn.Conn, err = tlsDialer.DialContext(ctx, network, address)
|
||||
*/
|
||||
conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
|
||||
} else {
|
||||
conn.Conn, err = d.Dial(network, address)
|
||||
conn.Conn, err = d.DialContext(ctx, network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn.UDPSize = c.UDPSize
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
|
@ -124,7 +140,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
|
|||
// of 512 bytes
|
||||
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
|
||||
// attribute appropriately
|
||||
|
||||
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
|
||||
co, err := c.Dial(address)
|
||||
|
||||
|
@ -138,24 +153,34 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
|
|||
// ExchangeWithConn has the same behavior as Exchange, just with a predetermined connection
|
||||
// that will be used instead of creating a new one.
|
||||
// Usage pattern with a *dns.Client:
|
||||
//
|
||||
// c := new(dns.Client)
|
||||
// // connection management logic goes here
|
||||
//
|
||||
// conn := c.Dial(address)
|
||||
// in, rtt, err := c.ExchangeWithConn(message, conn)
|
||||
//
|
||||
// This allows users of the library to implement their own connection management,
|
||||
// as opposed to Exchange, which will always use new connections and incur the added overhead
|
||||
// that entails when using "tcp" and especially "tcp-tls" clients.
|
||||
// This allows users of the library to implement their own connection management,
|
||||
// as opposed to Exchange, which will always use new connections and incur the added overhead
|
||||
// that entails when using "tcp" and especially "tcp-tls" clients.
|
||||
//
|
||||
// When the singleflight is set for this client the context is _not_ forwarded to the (shared) exchange, to
|
||||
// prevent one cancelation from canceling all outstanding requests.
|
||||
func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
return c.exchangeWithConnContext(context.Background(), m, conn)
|
||||
}
|
||||
|
||||
func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
if !c.SingleInflight {
|
||||
return c.exchange(m, conn)
|
||||
return c.exchangeContext(ctx, m, conn)
|
||||
}
|
||||
|
||||
q := m.Question[0]
|
||||
key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
|
||||
r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
|
||||
return c.exchange(m, conn)
|
||||
// When we're doing singleflight we don't want one context cancelation, cancel _all_ outstanding queries.
|
||||
// Hence we ignore the context and use Background().
|
||||
return c.exchangeContext(context.Background(), m, conn)
|
||||
})
|
||||
if r != nil && shared {
|
||||
r = r.Copy()
|
||||
|
@ -164,8 +189,7 @@ func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration
|
|||
return r, rtt, err
|
||||
}
|
||||
|
||||
func (c *Client) exchange(m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
|
||||
func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
opt := m.IsEdns0()
|
||||
// If EDNS0 is used use that for size.
|
||||
if opt != nil && opt.UDPSize() >= MinMsgSize {
|
||||
|
@ -176,18 +200,41 @@ func (c *Client) exchange(m *Msg, co *Conn) (r *Msg, rtt time.Duration, err erro
|
|||
co.UDPSize = c.UDPSize
|
||||
}
|
||||
|
||||
co.TsigSecret = c.TsigSecret
|
||||
t := time.Now()
|
||||
// write with the appropriate write timeout
|
||||
co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
|
||||
t := time.Now()
|
||||
writeDeadline := t.Add(c.getTimeoutForRequest(c.writeTimeout()))
|
||||
readDeadline := t.Add(c.getTimeoutForRequest(c.readTimeout()))
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
if deadline.Before(writeDeadline) {
|
||||
writeDeadline = deadline
|
||||
}
|
||||
if deadline.Before(readDeadline) {
|
||||
readDeadline = deadline
|
||||
}
|
||||
}
|
||||
co.SetWriteDeadline(writeDeadline)
|
||||
co.SetReadDeadline(readDeadline)
|
||||
|
||||
co.TsigSecret, co.TsigProvider = c.TsigSecret, c.TsigProvider
|
||||
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
if _, ok := co.Conn.(net.PacketConn); ok {
|
||||
for {
|
||||
r, err = co.ReadMsg()
|
||||
// Ignore replies with mismatched IDs because they might be
|
||||
// responses to earlier queries that timed out.
|
||||
if err != nil || r.Id == m.Id {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
}
|
||||
rtt = time.Since(t)
|
||||
return r, rtt, err
|
||||
|
@ -212,11 +259,15 @@ func (co *Conn) ReadMsg() (*Msg, error) {
|
|||
return m, err
|
||||
}
|
||||
if t := m.IsTsig(); t != nil {
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return m, ErrSecret
|
||||
if co.TsigProvider != nil {
|
||||
err = tsigVerifyProvider(p, co.TsigProvider, co.tsigRequestMAC, false)
|
||||
} else {
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return m, ErrSecret
|
||||
}
|
||||
// Need to work on the original message p, as that was used to calculate the tsig.
|
||||
err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
}
|
||||
// Need to work on the original message p, as that was used to calculate the tsig.
|
||||
err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
@ -294,10 +345,14 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
|
|||
var out []byte
|
||||
if t := m.IsTsig(); t != nil {
|
||||
mac := ""
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return ErrSecret
|
||||
if co.TsigProvider != nil {
|
||||
out, mac, err = tsigGenerateProvider(m, co.TsigProvider, co.tsigRequestMAC, false)
|
||||
} else {
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return ErrSecret
|
||||
}
|
||||
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
}
|
||||
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
// Set for the next read, although only used in zone transfers
|
||||
co.tsigRequestMAC = mac
|
||||
} else {
|
||||
|
@ -320,11 +375,10 @@ func (co *Conn) Write(p []byte) (int, error) {
|
|||
return co.Conn.Write(p)
|
||||
}
|
||||
|
||||
l := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(l, uint16(len(p)))
|
||||
|
||||
n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
|
||||
return int(n), err
|
||||
msg := make([]byte, 2+len(p))
|
||||
binary.BigEndian.PutUint16(msg, uint16(len(p)))
|
||||
copy(msg[2:], p)
|
||||
return co.Conn.Write(msg)
|
||||
}
|
||||
|
||||
// Return the appropriate timeout for a specific request
|
||||
|
@ -360,7 +414,7 @@ func Dial(network, address string) (conn *Conn, err error) {
|
|||
func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
|
||||
client := Client{Net: "udp"}
|
||||
r, _, err = client.ExchangeContext(ctx, m, a)
|
||||
// ignorint rtt to leave the original ExchangeContext API unchanged, but
|
||||
// ignoring rtt to leave the original ExchangeContext API unchanged, but
|
||||
// this function will go away
|
||||
return r, err
|
||||
}
|
||||
|
@ -416,15 +470,11 @@ func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout
|
|||
// context, if present. If there is both a context deadline and a configured
|
||||
// timeout on the client, the earliest of the two takes effect.
|
||||
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var timeout time.Duration
|
||||
if deadline, ok := ctx.Deadline(); !ok {
|
||||
timeout = 0
|
||||
} else {
|
||||
timeout = time.Until(deadline)
|
||||
conn, err := c.DialContext(ctx, a)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// not passing the context to the underlying calls, as the API does not support
|
||||
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
|
||||
// TODO(tmthrgd,miekg): this is a race condition.
|
||||
c.Dialer = &net.Dialer{Timeout: timeout}
|
||||
return c.Exchange(m, a)
|
||||
defer conn.Close()
|
||||
|
||||
return c.exchangeWithConnContext(ctx, m, conn)
|
||||
}
|
||||
|
|
|
@ -349,10 +349,7 @@ func ReverseAddr(addr string) (arpa string, err error) {
|
|||
// Add it, in reverse, to the buffer
|
||||
for i := len(ip) - 1; i >= 0; i-- {
|
||||
v := ip[i]
|
||||
buf = append(buf, hexDigit[v&0xF])
|
||||
buf = append(buf, '.')
|
||||
buf = append(buf, hexDigit[v>>4])
|
||||
buf = append(buf, '.')
|
||||
buf = append(buf, hexDigit[v&0xF], '.', hexDigit[v>>4], '.')
|
||||
}
|
||||
// Append "ip6.arpa." and return (buf already has the final .)
|
||||
buf = append(buf, "ip6.arpa."...)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package dns
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
||||
|
@ -111,7 +114,7 @@ func (h *RR_Header) parse(c *zlexer, origin string) *ParseError {
|
|||
|
||||
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
|
||||
func (rr *RFC3597) ToRFC3597(r RR) error {
|
||||
buf := make([]byte, Len(r)*2)
|
||||
buf := make([]byte, Len(r))
|
||||
headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -126,9 +129,30 @@ func (rr *RFC3597) ToRFC3597(r RR) error {
|
|||
}
|
||||
|
||||
_, err = rr.unpack(buf, headerEnd)
|
||||
return err
|
||||
}
|
||||
|
||||
// fromRFC3597 converts an unknown RR representation from RFC 3597 to the known RR type.
|
||||
func (rr *RFC3597) fromRFC3597(r RR) error {
|
||||
hdr := r.Header()
|
||||
*hdr = rr.Hdr
|
||||
|
||||
// Can't overflow uint16 as the length of Rdata is validated in (*RFC3597).parse.
|
||||
// We can only get here when rr was constructed with that method.
|
||||
hdr.Rdlength = uint16(hex.DecodedLen(len(rr.Rdata)))
|
||||
|
||||
if noRdata(*hdr) {
|
||||
// Dynamic update.
|
||||
return nil
|
||||
}
|
||||
|
||||
// rr.pack requires an extra allocation and a copy so we just decode Rdata
|
||||
// manually, it's simpler anyway.
|
||||
msg, err := hex.DecodeString(rr.Rdata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
_, err = r.unpack(msg, 0)
|
||||
return err
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue