TUN-528: Move cloudflared into a separate repo
This commit is contained in:
parent
e8c621a648
commit
d06fc520c7
|
@ -1,6 +1,8 @@
|
||||||
|
.GOPATH/
|
||||||
bin/
|
bin/
|
||||||
tmp/
|
tmp/
|
||||||
guide/public
|
guide/public
|
||||||
|
/.GOPATH
|
||||||
/bin
|
/bin
|
||||||
.idea
|
.idea
|
||||||
.vscode
|
.vscode
|
||||||
|
|
|
@ -0,0 +1,403 @@
|
||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "code.cfops.it/go/brotli"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "18c9f6c67e3dfc12e0ddaca748d2887f97a7ac28"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/BurntSushi/toml"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "b26d9c308763d68093482582cea63d69be07a0f0"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/beorn7/perks"
|
||||||
|
packages = ["quantile"]
|
||||||
|
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/certifi/gocertifi"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "deb3ae2ef2610fde3330947281941c562861188b"
|
||||||
|
version = "2018.01.18"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/coredns/coredns"
|
||||||
|
packages = [
|
||||||
|
"core/dnsserver",
|
||||||
|
"coremain",
|
||||||
|
"pb",
|
||||||
|
"plugin",
|
||||||
|
"plugin/cache",
|
||||||
|
"plugin/cache/freq",
|
||||||
|
"plugin/etcd/msg",
|
||||||
|
"plugin/metrics",
|
||||||
|
"plugin/metrics/vars",
|
||||||
|
"plugin/pkg/cache",
|
||||||
|
"plugin/pkg/dnstest",
|
||||||
|
"plugin/pkg/dnsutil",
|
||||||
|
"plugin/pkg/edns",
|
||||||
|
"plugin/pkg/fuzz",
|
||||||
|
"plugin/pkg/log",
|
||||||
|
"plugin/pkg/nonwriter",
|
||||||
|
"plugin/pkg/rcode",
|
||||||
|
"plugin/pkg/response",
|
||||||
|
"plugin/pkg/trace",
|
||||||
|
"plugin/pkg/uniq",
|
||||||
|
"plugin/test",
|
||||||
|
"request"
|
||||||
|
]
|
||||||
|
revision = "f78f30231df90da6184d5f811ecf9c06b0160c2b"
|
||||||
|
version = "v1.1.4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/coreos/go-systemd"
|
||||||
|
packages = ["daemon"]
|
||||||
|
revision = "39ca1b05acc7ad1220e09f133283b8859a8b71ab"
|
||||||
|
version = "v17"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/davecgh/go-spew"
|
||||||
|
packages = ["spew"]
|
||||||
|
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/equinox-io/equinox"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal/go-update",
|
||||||
|
"internal/go-update/internal/binarydist",
|
||||||
|
"internal/go-update/internal/osext",
|
||||||
|
"internal/osext",
|
||||||
|
"proto"
|
||||||
|
]
|
||||||
|
revision = "f24972fa72facf59d05c91c848b65eac38815915"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/facebookgo/grace"
|
||||||
|
packages = ["gracenet"]
|
||||||
|
revision = "75cf19382434e82df4dd84953f566b8ad23d6e9e"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/flynn/go-shlex"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "3f9db97f856818214da2e1057f8ad84803971cff"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/getsentry/raven-go"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "ed7bcb39ff10f39ab08e317ce16df282845852fa"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/golang-collections/collections"
|
||||||
|
packages = ["queue"]
|
||||||
|
revision = "604e922904d35e97f98a774db7881f049cd8d970"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
packages = [
|
||||||
|
"proto",
|
||||||
|
"ptypes",
|
||||||
|
"ptypes/any",
|
||||||
|
"ptypes/duration",
|
||||||
|
"ptypes/timestamp"
|
||||||
|
]
|
||||||
|
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/google/uuid"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "064e2069ce9c359c118179501254f67d7d37ba24"
|
||||||
|
version = "0.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gorilla/websocket"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/grpc-ecosystem/grpc-opentracing"
|
||||||
|
packages = ["go/otgrpc"]
|
||||||
|
revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/mattn/go-colorable"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072"
|
||||||
|
version = "v0.0.9"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/mattn/go-isatty"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
|
||||||
|
version = "v0.0.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||||
|
packages = ["pbutil"]
|
||||||
|
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/mholt/caddy"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"caddyfile",
|
||||||
|
"telemetry"
|
||||||
|
]
|
||||||
|
revision = "1f7b5abc80679fb71ee0e04ed98cbe284b1fc181"
|
||||||
|
version = "v0.11.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/miekg/dns"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1"
|
||||||
|
version = "v1.0.8"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/mitchellh/go-homedir"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/opentracing/opentracing-go"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"ext",
|
||||||
|
"log"
|
||||||
|
]
|
||||||
|
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||||
|
version = "v1.0.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pkg/errors"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||||
|
version = "v0.8.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pmezard/go-difflib"
|
||||||
|
packages = ["difflib"]
|
||||||
|
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
packages = [
|
||||||
|
"prometheus",
|
||||||
|
"prometheus/promhttp"
|
||||||
|
]
|
||||||
|
revision = "967789050ba94deca04a5e84cce8ad472ce313c1"
|
||||||
|
version = "v0.9.0-pre1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/client_model"
|
||||||
|
packages = ["go"]
|
||||||
|
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/common"
|
||||||
|
packages = [
|
||||||
|
"expfmt",
|
||||||
|
"internal/bitbucket.org/ww/goautoneg",
|
||||||
|
"model"
|
||||||
|
]
|
||||||
|
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/procfs"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal/util",
|
||||||
|
"nfs",
|
||||||
|
"xfs"
|
||||||
|
]
|
||||||
|
revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/rifflock/lfshook"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "bf539943797a1f34c1f502d07de419b5238ae6c6"
|
||||||
|
version = "v2.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/sirupsen/logrus"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||||
|
version = "v1.0.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
packages = ["assert"]
|
||||||
|
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||||
|
version = "v1.2.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
|
packages = [
|
||||||
|
"ed25519",
|
||||||
|
"ed25519/internal/edwards25519",
|
||||||
|
"ssh/terminal"
|
||||||
|
]
|
||||||
|
revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = [
|
||||||
|
"bpf",
|
||||||
|
"context",
|
||||||
|
"http/httpguts",
|
||||||
|
"http2",
|
||||||
|
"http2/hpack",
|
||||||
|
"idna",
|
||||||
|
"internal/iana",
|
||||||
|
"internal/socket",
|
||||||
|
"internal/timeseries",
|
||||||
|
"ipv4",
|
||||||
|
"ipv6",
|
||||||
|
"trace",
|
||||||
|
"websocket"
|
||||||
|
]
|
||||||
|
revision = "32a936f46389aa10549d60bd7833e54b01685d09"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/sync"
|
||||||
|
packages = ["errgroup"]
|
||||||
|
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
packages = [
|
||||||
|
"unix",
|
||||||
|
"windows",
|
||||||
|
"windows/registry",
|
||||||
|
"windows/svc",
|
||||||
|
"windows/svc/eventlog",
|
||||||
|
"windows/svc/mgr"
|
||||||
|
]
|
||||||
|
revision = "ce36f3865eeb42541ce3f87f32f8462c5687befa"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "golang.org/x/text"
|
||||||
|
packages = [
|
||||||
|
"collate",
|
||||||
|
"collate/build",
|
||||||
|
"internal/colltab",
|
||||||
|
"internal/gen",
|
||||||
|
"internal/tag",
|
||||||
|
"internal/triegen",
|
||||||
|
"internal/ucd",
|
||||||
|
"language",
|
||||||
|
"secure/bidirule",
|
||||||
|
"transform",
|
||||||
|
"unicode/bidi",
|
||||||
|
"unicode/cldr",
|
||||||
|
"unicode/norm",
|
||||||
|
"unicode/rangetable"
|
||||||
|
]
|
||||||
|
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
packages = ["googleapis/rpc/status"]
|
||||||
|
revision = "ff3583edef7de132f219f0efc00e097cabcc0ec0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"balancer",
|
||||||
|
"balancer/base",
|
||||||
|
"balancer/roundrobin",
|
||||||
|
"codes",
|
||||||
|
"connectivity",
|
||||||
|
"credentials",
|
||||||
|
"encoding",
|
||||||
|
"encoding/proto",
|
||||||
|
"grpclog",
|
||||||
|
"internal",
|
||||||
|
"internal/backoff",
|
||||||
|
"internal/channelz",
|
||||||
|
"internal/grpcrand",
|
||||||
|
"keepalive",
|
||||||
|
"metadata",
|
||||||
|
"naming",
|
||||||
|
"peer",
|
||||||
|
"resolver",
|
||||||
|
"resolver/dns",
|
||||||
|
"resolver/passthrough",
|
||||||
|
"stats",
|
||||||
|
"status",
|
||||||
|
"tap",
|
||||||
|
"transport"
|
||||||
|
]
|
||||||
|
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||||
|
version = "v1.13.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "altsrc-parse-durations"
|
||||||
|
name = "gopkg.in/urfave/cli.v2"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"altsrc"
|
||||||
|
]
|
||||||
|
revision = "d604b6ffeee878fbf084fd2761466b6649989cee"
|
||||||
|
source = "https://github.com/cbranch/cli"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "gopkg.in/yaml.v2"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||||
|
version = "v2.2.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "zombiezen.com/go/capnproto2"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"encoding/text",
|
||||||
|
"internal/fulfiller",
|
||||||
|
"internal/nodemap",
|
||||||
|
"internal/packed",
|
||||||
|
"internal/queue",
|
||||||
|
"internal/schema",
|
||||||
|
"internal/strquote",
|
||||||
|
"pogs",
|
||||||
|
"rpc",
|
||||||
|
"rpc/internal/refcount",
|
||||||
|
"schemas",
|
||||||
|
"server",
|
||||||
|
"std/capnp/rpc"
|
||||||
|
]
|
||||||
|
revision = "7cfd211c19c7f5783c695f3654efa46f0df259c3"
|
||||||
|
source = "https://github.com/zombiezen/go-capnproto2"
|
||||||
|
version = "v2.17.1"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
inputs-digest = "42fdf43f93aac410675bb8134097b51c90c110dc4c77595b8d2fb7c7876bd3d2"
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
|
@ -0,0 +1,45 @@
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/facebookgo/grace"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/getsentry/raven-go"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/pkg/errors"
|
||||||
|
version = "0.8.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
version = "0.9.0-pre1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/sirupsen/logrus"
|
||||||
|
version = "1.0.3"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
version = "1.2.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "golang.org/x/sync"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "gopkg.in/urfave/cli.v2"
|
||||||
|
source = "https://github.com/cbranch/cli"
|
||||||
|
branch = "altsrc-parse-durations"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "zombiezen.com/go/capnproto2"
|
||||||
|
source = "https://github.com/zombiezen/go-capnproto2"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/gorilla/websocket"
|
||||||
|
version = "1.2.0"
|
||||||
|
|
|
@ -0,0 +1,320 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/origin"
|
||||||
|
"github.com/cloudflare/cloudflared/tlsconfig"
|
||||||
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
"github.com/cloudflare/cloudflared/validation"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
"gopkg.in/urfave/cli.v2/altsrc"
|
||||||
|
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultConfigFiles = []string{"config.yml", "config.yaml"}
|
||||||
|
|
||||||
|
// Launchd doesn't set root env variables, so there is default
|
||||||
|
// Windows default config dir was ~/cloudflare-warp in documentation; let's keep it compatible
|
||||||
|
defaultConfigDirs = []string{"~/.cloudflared", "~/.cloudflare-warp", "~/cloudflare-warp", "/usr/local/etc/cloudflared", "/etc/cloudflared"}
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultCredentialFile = "cert.pem"
|
||||||
|
|
||||||
|
func fileExists(path string) (bool, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
// ignore missing files
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the first path that contains a cert.pem file. If none of the defaultConfigDirs
|
||||||
|
// (differs by OS for legacy reasons) contains a cert.pem file, return empty string
|
||||||
|
func findDefaultOriginCertPath() string {
|
||||||
|
for _, defaultConfigDir := range defaultConfigDirs {
|
||||||
|
originCertPath, _ := homedir.Expand(filepath.Join(defaultConfigDir, defaultCredentialFile))
|
||||||
|
if ok, _ := fileExists(originCertPath); ok {
|
||||||
|
return originCertPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the first path that contains a config file. If none of the combination of
|
||||||
|
// defaultConfigDirs (differs by OS for legacy reasons) and defaultConfigFiles
|
||||||
|
// contains a config file, return empty string
|
||||||
|
func findDefaultConfigPath() string {
|
||||||
|
for _, configDir := range defaultConfigDirs {
|
||||||
|
for _, configFile := range defaultConfigFiles {
|
||||||
|
dirPath, err := homedir.Expand(configDir)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
path := filepath.Join(dirPath, configFile)
|
||||||
|
if ok, _ := fileExists(path); ok {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func findInputSourceContext(context *cli.Context) (altsrc.InputSourceContext, error) {
|
||||||
|
if context.String("config") != "" {
|
||||||
|
return altsrc.NewYamlSourceFromFile(context.String("config"))
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateRandomClientID() string {
|
||||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
id := make([]byte, 32)
|
||||||
|
r.Read(id)
|
||||||
|
return hex.EncodeToString(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func enoughOptionsSet(c *cli.Context) bool {
|
||||||
|
// For cloudflared to work, the user needs to at least provide a hostname,
|
||||||
|
// or runs as stand alone DNS proxy .
|
||||||
|
// When using sudo, use -E flag to preserve env vars
|
||||||
|
if c.NumFlags() == 0 && c.NArg() == 0 && os.Getenv("TUNNEL_HOSTNAME") == "" && os.Getenv("TUNNEL_DNS") == "" {
|
||||||
|
if isRunningFromTerminal() {
|
||||||
|
logger.Errorf("No arguments were provided. You need to at least specify the hostname for this tunnel. See %s", quickStartUrl)
|
||||||
|
logger.Infof("If you want to run Argo Tunnel client as a stand alone DNS proxy, run with --proxy-dns option or set TUNNEL_DNS environment variable.")
|
||||||
|
} else {
|
||||||
|
logger.Errorf("You need to specify all the options in a configuration file, or use environment variables. See %s and %s", serviceUrl, argumentsUrl)
|
||||||
|
logger.Infof("If you want to run Argo Tunnel client as a stand alone DNS proxy, specify proxy-dns option in the configuration file, or set TUNNEL_DNS environment variable.")
|
||||||
|
}
|
||||||
|
cli.ShowAppHelp(c)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleDeprecatedOptions(c *cli.Context) error {
|
||||||
|
// Fail if the user provided an old authentication method
|
||||||
|
if c.IsSet("api-key") || c.IsSet("api-email") || c.IsSet("api-ca-key") {
|
||||||
|
logger.Error("You don't need to give us your api-key anymore. Please use the new login method. Just run cloudflared login")
|
||||||
|
return fmt.Errorf("Client provided deprecated options")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate url. It can be either from --url or argument
|
||||||
|
func validateUrl(c *cli.Context) (string, error) {
|
||||||
|
var url = c.String("url")
|
||||||
|
if c.NArg() > 0 {
|
||||||
|
if c.IsSet("url") {
|
||||||
|
return "", errors.New("Specified origin urls using both --url and argument. Decide which one you want, I can only support one.")
|
||||||
|
}
|
||||||
|
url = c.Args().Get(0)
|
||||||
|
}
|
||||||
|
validUrl, err := validation.ValidateUrl(url)
|
||||||
|
return validUrl, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func logClientOptions(c *cli.Context) {
|
||||||
|
flags := make(map[string]interface{})
|
||||||
|
for _, flag := range c.LocalFlagNames() {
|
||||||
|
flags[flag] = c.Generic(flag)
|
||||||
|
}
|
||||||
|
if len(flags) > 0 {
|
||||||
|
logger.Infof("Flags %v", flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
envs := make(map[string]string)
|
||||||
|
// Find env variables for Argo Tunnel
|
||||||
|
for _, env := range os.Environ() {
|
||||||
|
// All Argo Tunnel env variables start with TUNNEL_
|
||||||
|
if strings.Contains(env, "TUNNEL_") {
|
||||||
|
vars := strings.Split(env, "=")
|
||||||
|
if len(vars) == 2 {
|
||||||
|
envs[vars[0]] = vars[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(envs) > 0 {
|
||||||
|
logger.Infof("Environmental variables %v", envs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dnsProxyStandAlone(c *cli.Context) bool {
|
||||||
|
return c.IsSet("proxy-dns") && (!c.IsSet("hostname") && !c.IsSet("tag") && !c.IsSet("hello-world"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOriginCert(c *cli.Context) ([]byte, error) {
|
||||||
|
if c.String("origincert") == "" {
|
||||||
|
logger.Warnf("Cannot determine default origin certificate path. No file %s in %v", defaultCredentialFile, defaultConfigDirs)
|
||||||
|
if isRunningFromTerminal() {
|
||||||
|
logger.Errorf("You need to specify the origin certificate path with --origincert option, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", argumentsUrl)
|
||||||
|
return nil, fmt.Errorf("Client didn't specify origincert path when running from terminal")
|
||||||
|
} else {
|
||||||
|
logger.Errorf("You need to specify the origin certificate path by specifying the origincert option in the configuration file, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", serviceUrl)
|
||||||
|
return nil, fmt.Errorf("Client didn't specify origincert path")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check that the user has acquired a certificate using the login command
|
||||||
|
originCertPath, err := homedir.Expand(c.String("origincert"))
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("Cannot resolve path %s", c.String("origincert"))
|
||||||
|
return nil, fmt.Errorf("Cannot resolve path %s", c.String("origincert"))
|
||||||
|
}
|
||||||
|
ok, err := fileExists(originCertPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("Cannot check if origin cert exists at path %s", c.String("origincert"))
|
||||||
|
return nil, fmt.Errorf("Cannot check if origin cert exists at path %s", c.String("origincert"))
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
logger.Errorf(`Cannot find a valid certificate for your origin at the path:
|
||||||
|
|
||||||
|
%s
|
||||||
|
|
||||||
|
If the path above is wrong, specify the path with the -origincert option.
|
||||||
|
If you don't have a certificate signed by Cloudflare, run the command:
|
||||||
|
|
||||||
|
%s login
|
||||||
|
`, originCertPath, os.Args[0])
|
||||||
|
return nil, fmt.Errorf("Cannot find a valid certificate at the path %s", originCertPath)
|
||||||
|
}
|
||||||
|
// Easier to send the certificate as []byte via RPC than decoding it at this point
|
||||||
|
originCert, err := ioutil.ReadFile(originCertPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("Cannot read %s to load origin certificate", originCertPath)
|
||||||
|
return nil, fmt.Errorf("Cannot read %s to load origin certificate", originCertPath)
|
||||||
|
}
|
||||||
|
return originCert, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareTunnelConfig(c *cli.Context, buildInfo *origin.BuildInfo, logger, protoLogger *logrus.Logger) (*origin.TunnelConfig, error) {
|
||||||
|
hostname, err := validation.ValidateHostname(c.String("hostname"))
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Invalid hostname")
|
||||||
|
return nil, errors.Wrap(err, "Invalid hostname")
|
||||||
|
}
|
||||||
|
clientID := c.String("id")
|
||||||
|
if !c.IsSet("id") {
|
||||||
|
clientID = generateRandomClientID()
|
||||||
|
}
|
||||||
|
|
||||||
|
tags, err := NewTagSliceFromCLI(c.StringSlice("tag"))
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Tag parse failure")
|
||||||
|
return nil, errors.Wrap(err, "Tag parse failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = append(tags, tunnelpogs.Tag{Name: "ID", Value: clientID})
|
||||||
|
|
||||||
|
url, err := validateUrl(c)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Error validating url")
|
||||||
|
return nil, errors.Wrap(err, "Error validating url")
|
||||||
|
}
|
||||||
|
logger.Infof("Proxying tunnel requests to %s", url)
|
||||||
|
|
||||||
|
originCert, err := getOriginCert(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "Error getting origin cert")
|
||||||
|
}
|
||||||
|
|
||||||
|
originCertPool, err := loadCertPool(c, logger)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Error loading cert pool")
|
||||||
|
return nil, errors.Wrap(err, "Error loading cert pool")
|
||||||
|
}
|
||||||
|
|
||||||
|
tunnelMetrics := origin.NewTunnelMetrics()
|
||||||
|
httpTransport := &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
DialContext: (&net.Dialer{
|
||||||
|
Timeout: c.Duration("proxy-connect-timeout"),
|
||||||
|
KeepAlive: c.Duration("proxy-tcp-keepalive"),
|
||||||
|
DualStack: !c.Bool("proxy-no-happy-eyeballs"),
|
||||||
|
}).DialContext,
|
||||||
|
MaxIdleConns: c.Int("proxy-keepalive-connections"),
|
||||||
|
IdleConnTimeout: c.Duration("proxy-keepalive-timeout"),
|
||||||
|
TLSHandshakeTimeout: c.Duration("proxy-tls-timeout"),
|
||||||
|
ExpectContinueTimeout: 1 * time.Second,
|
||||||
|
TLSClientConfig: &tls.Config{RootCAs: originCertPool, InsecureSkipVerify: c.IsSet("no-tls-verify")},
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.IsSet("hello-world") && c.IsSet("origin-server-name") {
|
||||||
|
httpTransport.TLSClientConfig.ServerName = c.String("origin-server-name")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &origin.TunnelConfig{
|
||||||
|
EdgeAddrs: c.StringSlice("edge"),
|
||||||
|
OriginUrl: url,
|
||||||
|
Hostname: hostname,
|
||||||
|
OriginCert: originCert,
|
||||||
|
TlsConfig: tlsconfig.CreateTunnelConfig(c, c.StringSlice("edge")),
|
||||||
|
ClientTlsConfig: httpTransport.TLSClientConfig,
|
||||||
|
Retries: c.Uint("retries"),
|
||||||
|
HeartbeatInterval: c.Duration("heartbeat-interval"),
|
||||||
|
MaxHeartbeats: c.Uint64("heartbeat-count"),
|
||||||
|
ClientID: clientID,
|
||||||
|
BuildInfo: buildInfo,
|
||||||
|
ReportedVersion: Version,
|
||||||
|
LBPool: c.String("lb-pool"),
|
||||||
|
Tags: tags,
|
||||||
|
HAConnections: c.Int("ha-connections"),
|
||||||
|
HTTPTransport: httpTransport,
|
||||||
|
Metrics: tunnelMetrics,
|
||||||
|
MetricsUpdateFreq: c.Duration("metrics-update-freq"),
|
||||||
|
ProtocolLogger: protoLogger,
|
||||||
|
Logger: logger,
|
||||||
|
IsAutoupdated: c.Bool("is-autoupdated"),
|
||||||
|
GracePeriod: c.Duration("grace-period"),
|
||||||
|
RunFromTerminal: isRunningFromTerminal(),
|
||||||
|
NoChunkedEncoding: c.Bool("no-chunked-encoding"),
|
||||||
|
CompressionQuality: c.Uint64("compression-quality"),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadCertPool(c *cli.Context, logger *logrus.Logger) (*x509.CertPool, error) {
|
||||||
|
const originCAPoolFlag = "origin-ca-pool"
|
||||||
|
originCAPoolFilename := c.String(originCAPoolFlag)
|
||||||
|
var originCustomCAPool []byte
|
||||||
|
|
||||||
|
if originCAPoolFilename != "" {
|
||||||
|
var err error
|
||||||
|
originCustomCAPool, err = ioutil.ReadFile(originCAPoolFilename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, fmt.Sprintf("unable to read the file %s for --%s", originCAPoolFilename, originCAPoolFlag))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
originCertPool, err := tlsconfig.LoadOriginCertPool(originCustomCAPool)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error loading the certificate pool")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Windows users should be notified that they can use the flag
|
||||||
|
if runtime.GOOS == "windows" && originCAPoolFilename == "" {
|
||||||
|
logger.Infof("cloudflared does not support loading the system root certificate pool on Windows. Please use the --%s to specify it", originCAPoolFlag)
|
||||||
|
}
|
||||||
|
|
||||||
|
return originCertPool, nil
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
// +build !windows,!darwin,!linux
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
cli "gopkg.in/urfave/cli.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
|
app.Run(os.Args)
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/hello"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
func helloWorld(c *cli.Context) error {
|
||||||
|
address := fmt.Sprintf(":%d", c.Int("port"))
|
||||||
|
listener, err := hello.CreateTLSListener(address)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer listener.Close()
|
||||||
|
err = hello.StartHelloWorldServer(logger, listener, nil)
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,35 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCreateListenerHostAndPortSuccess(t *testing.T) {
|
||||||
|
listener, err := createListener("localhost:1234")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if listener.Addr().String() == "" {
|
||||||
|
t.Fatal("Fail to find available port")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateListenerOnlyHostSuccess(t *testing.T) {
|
||||||
|
listener, err := createListener("localhost:")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if listener.Addr().String() == "" {
|
||||||
|
t.Fatal("Fail to find available port")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateListenerOnlyPortSuccess(t *testing.T) {
|
||||||
|
listener, err := createListener(":8888")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if listener.Addr().String() == "" {
|
||||||
|
t.Fatal("Fail to find available port")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,292 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
cli "gopkg.in/urfave/cli.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
|
app.Commands = append(app.Commands, &cli.Command{
|
||||||
|
Name: "service",
|
||||||
|
Usage: "Manages the Argo Tunnel system service",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
&cli.Command{
|
||||||
|
Name: "install",
|
||||||
|
Usage: "Install Argo Tunnel as a system service",
|
||||||
|
Action: installLinuxService,
|
||||||
|
},
|
||||||
|
&cli.Command{
|
||||||
|
Name: "uninstall",
|
||||||
|
Usage: "Uninstall the Argo Tunnel service",
|
||||||
|
Action: uninstallLinuxService,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
app.Run(os.Args)
|
||||||
|
}
|
||||||
|
|
||||||
|
const serviceConfigDir = "/etc/cloudflared"
|
||||||
|
|
||||||
|
var systemdTemplates = []ServiceTemplate{
|
||||||
|
{
|
||||||
|
Path: "/etc/systemd/system/cloudflared.service",
|
||||||
|
Content: `[Unit]
|
||||||
|
Description=Argo Tunnel
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
TimeoutStartSec=0
|
||||||
|
Type=notify
|
||||||
|
ExecStart={{ .Path }} --config /etc/cloudflared/config.yml --origincert /etc/cloudflared/cert.pem --no-autoupdate
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5s
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: "/etc/systemd/system/cloudflared-update.service",
|
||||||
|
Content: `[Unit]
|
||||||
|
Description=Update Argo Tunnel
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/bin/bash -c '{{ .Path }} update; code=$?; if [ $code -eq 64 ]; then systemctl restart cloudflared; exit 0; fi; exit $code'
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Path: "/etc/systemd/system/cloudflared-update.timer",
|
||||||
|
Content: `[Unit]
|
||||||
|
Description=Update Argo Tunnel
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnUnitActiveSec=1d
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var sysvTemplate = ServiceTemplate{
|
||||||
|
Path: "/etc/init.d/cloudflared",
|
||||||
|
FileMode: 0755,
|
||||||
|
Content: `# For RedHat and cousins:
|
||||||
|
# chkconfig: 2345 99 01
|
||||||
|
# description: Argo Tunnel agent
|
||||||
|
# processname: {{.Path}}
|
||||||
|
### BEGIN INIT INFO
|
||||||
|
# Provides: {{.Path}}
|
||||||
|
# Required-Start:
|
||||||
|
# Required-Stop:
|
||||||
|
# Default-Start: 2 3 4 5
|
||||||
|
# Default-Stop: 0 1 6
|
||||||
|
# Short-Description: Argo Tunnel
|
||||||
|
# Description: Argo Tunnel agent
|
||||||
|
### END INIT INFO
|
||||||
|
name=$(basename $(readlink -f $0))
|
||||||
|
cmd="{{.Path}} --config /etc/cloudflared/config.yml --origincert /etc/cloudflared/cert.pem --pidfile /var/run/$name.pid --autoupdate-freq 24h0m0s"
|
||||||
|
pid_file="/var/run/$name.pid"
|
||||||
|
stdout_log="/var/log/$name.log"
|
||||||
|
stderr_log="/var/log/$name.err"
|
||||||
|
[ -e /etc/sysconfig/$name ] && . /etc/sysconfig/$name
|
||||||
|
get_pid() {
|
||||||
|
cat "$pid_file"
|
||||||
|
}
|
||||||
|
is_running() {
|
||||||
|
[ -f "$pid_file" ] && ps $(get_pid) > /dev/null 2>&1
|
||||||
|
}
|
||||||
|
case "$1" in
|
||||||
|
start)
|
||||||
|
if is_running; then
|
||||||
|
echo "Already started"
|
||||||
|
else
|
||||||
|
echo "Starting $name"
|
||||||
|
$cmd >> "$stdout_log" 2>> "$stderr_log" &
|
||||||
|
echo $! > "$pid_file"
|
||||||
|
if ! is_running; then
|
||||||
|
echo "Unable to start, see $stdout_log and $stderr_log"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
stop)
|
||||||
|
if is_running; then
|
||||||
|
echo -n "Stopping $name.."
|
||||||
|
kill $(get_pid)
|
||||||
|
for i in {1..10}
|
||||||
|
do
|
||||||
|
if ! is_running; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo -n "."
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
if is_running; then
|
||||||
|
echo "Not stopped; may still be shutting down or shutdown may have failed"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Stopped"
|
||||||
|
if [ -f "$pid_file" ]; then
|
||||||
|
rm "$pid_file"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Not running"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
restart)
|
||||||
|
$0 stop
|
||||||
|
if is_running; then
|
||||||
|
echo "Unable to stop, will not attempt to start"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
$0 start
|
||||||
|
;;
|
||||||
|
status)
|
||||||
|
if is_running; then
|
||||||
|
echo "Running"
|
||||||
|
else
|
||||||
|
echo "Stopped"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $0 {start|stop|restart|status}"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
exit 0
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSystemd() bool {
|
||||||
|
if _, err := os.Stat("/run/systemd/system"); err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func installLinuxService(c *cli.Context) error {
|
||||||
|
etPath, err := os.Executable()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error determining executable path: %v", err)
|
||||||
|
}
|
||||||
|
templateArgs := ServiceTemplateArgs{Path: etPath}
|
||||||
|
|
||||||
|
defaultConfigDir := filepath.Dir(c.String("config"))
|
||||||
|
defaultConfigFile := filepath.Base(c.String("config"))
|
||||||
|
if err = copyCredentials(serviceConfigDir, defaultConfigDir, defaultConfigFile, defaultCredentialFile); err != nil {
|
||||||
|
logger.WithError(err).Infof("Failed to copy user configuration. Before running the service, ensure that %s contains two files, %s and %s",
|
||||||
|
serviceConfigDir, defaultCredentialFile, defaultConfigFiles[0])
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case isSystemd():
|
||||||
|
logger.Infof("Using Systemd")
|
||||||
|
return installSystemd(&templateArgs)
|
||||||
|
default:
|
||||||
|
logger.Infof("Using Sysv")
|
||||||
|
return installSysv(&templateArgs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func installSystemd(templateArgs *ServiceTemplateArgs) error {
|
||||||
|
for _, serviceTemplate := range systemdTemplates {
|
||||||
|
err := serviceTemplate.Generate(templateArgs)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("error generating service template")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := runCommand("systemctl", "enable", "cloudflared.service"); err != nil {
|
||||||
|
logger.WithError(err).Infof("systemctl enable cloudflared.service error")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := runCommand("systemctl", "start", "cloudflared-update.timer"); err != nil {
|
||||||
|
logger.WithError(err).Infof("systemctl start cloudflared-update.timer error")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Infof("systemctl daemon-reload")
|
||||||
|
return runCommand("systemctl", "daemon-reload")
|
||||||
|
}
|
||||||
|
|
||||||
|
func installSysv(templateArgs *ServiceTemplateArgs) error {
|
||||||
|
confPath, err := sysvTemplate.ResolvePath()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("error resolving system path")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := sysvTemplate.Generate(templateArgs); err != nil {
|
||||||
|
logger.WithError(err).Infof("error generating system template")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, i := range [...]string{"2", "3", "4", "5"} {
|
||||||
|
if err := os.Symlink(confPath, "/etc/rc"+i+".d/S50et"); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, i := range [...]string{"0", "1", "6"} {
|
||||||
|
if err := os.Symlink(confPath, "/etc/rc"+i+".d/K02et"); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func uninstallLinuxService(c *cli.Context) error {
|
||||||
|
switch {
|
||||||
|
case isSystemd():
|
||||||
|
logger.Infof("Using Systemd")
|
||||||
|
return uninstallSystemd()
|
||||||
|
default:
|
||||||
|
logger.Infof("Using Sysv")
|
||||||
|
return uninstallSysv()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func uninstallSystemd() error {
|
||||||
|
if err := runCommand("systemctl", "disable", "cloudflared.service"); err != nil {
|
||||||
|
logger.WithError(err).Infof("systemctl disable cloudflared.service error")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := runCommand("systemctl", "stop", "cloudflared-update.timer"); err != nil {
|
||||||
|
logger.WithError(err).Infof("systemctl stop cloudflared-update.timer error")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, serviceTemplate := range systemdTemplates {
|
||||||
|
if err := serviceTemplate.Remove(); err != nil {
|
||||||
|
logger.WithError(err).Infof("error removing service template")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logger.Infof("Successfully uninstall cloudflared service")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func uninstallSysv() error {
|
||||||
|
if err := sysvTemplate.Remove(); err != nil {
|
||||||
|
logger.WithError(err).Infof("error removing service template")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, i := range [...]string{"2", "3", "4", "5"} {
|
||||||
|
if err := os.Remove("/etc/rc" + i + ".d/S50et"); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, i := range [...]string{"0", "1", "6"} {
|
||||||
|
if err := os.Remove("/etc/rc" + i + ".d/K02et"); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logger.Infof("Successfully uninstall cloudflared service")
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,68 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/log"
|
||||||
|
|
||||||
|
"github.com/rifflock/lfshook"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var logger = log.CreateLogger()
|
||||||
|
|
||||||
|
func configMainLogger(c *cli.Context) error {
|
||||||
|
logLevel, err := logrus.ParseLevel(c.String("loglevel"))
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Unknown logging level specified")
|
||||||
|
return errors.Wrap(err, "Unknown logging level specified")
|
||||||
|
}
|
||||||
|
logger.SetLevel(logLevel)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func configProtoLogger(c *cli.Context) (*logrus.Logger, error) {
|
||||||
|
protoLogLevel, err := logrus.ParseLevel(c.String("proto-loglevel"))
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Fatal("Unknown protocol logging level specified")
|
||||||
|
return nil, errors.Wrap(err, "Unknown protocol logging level specified")
|
||||||
|
}
|
||||||
|
protoLogger := logrus.New()
|
||||||
|
protoLogger.Level = protoLogLevel
|
||||||
|
return protoLogger, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func initLogFile(c *cli.Context, loggers ...*logrus.Logger) error {
|
||||||
|
filePath, err := homedir.Expand(c.String("logfile"))
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "Cannot resolve logfile path")
|
||||||
|
}
|
||||||
|
|
||||||
|
fileMode := os.O_WRONLY | os.O_APPEND | os.O_CREATE | os.O_TRUNC
|
||||||
|
// do not truncate log file if the client has been autoupdated
|
||||||
|
if c.Bool("is-autoupdated") {
|
||||||
|
fileMode = os.O_WRONLY | os.O_APPEND | os.O_CREATE
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(filePath, fileMode, 0664)
|
||||||
|
if err != nil {
|
||||||
|
errors.Wrap(err, fmt.Sprintf("Cannot open file %s", filePath))
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
pathMap := lfshook.PathMap{
|
||||||
|
logrus.InfoLevel: filePath,
|
||||||
|
logrus.ErrorLevel: filePath,
|
||||||
|
logrus.FatalLevel: filePath,
|
||||||
|
logrus.PanicLevel: filePath,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range loggers {
|
||||||
|
l.Hooks.Add(lfshook.NewHook(pathMap, &logrus.JSONFormatter{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,194 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/base32"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
homedir "github.com/mitchellh/go-homedir"
|
||||||
|
cli "gopkg.in/urfave/cli.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const baseLoginURL = "https://dash.cloudflare.com/warp"
|
||||||
|
const baseCertStoreURL = "https://login.cloudflarewarp.com"
|
||||||
|
const clientTimeout = time.Minute * 20
|
||||||
|
|
||||||
|
func login(c *cli.Context) error {
|
||||||
|
configPath, err := homedir.Expand(defaultConfigDirs[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ok, err := fileExists(configPath)
|
||||||
|
if !ok && err == nil {
|
||||||
|
// create config directory if doesn't already exist
|
||||||
|
err = os.Mkdir(configPath, 0700)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
path := filepath.Join(configPath, defaultCredentialFile)
|
||||||
|
fileInfo, err := os.Stat(path)
|
||||||
|
if err == nil && fileInfo.Size() > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, `You have an existing certificate at %s which login would overwrite.
|
||||||
|
If this is intentional, please move or delete that file then run this command again.
|
||||||
|
`, path)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil && err.(*os.PathError).Err != syscall.ENOENT {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// for local debugging
|
||||||
|
baseURL := baseCertStoreURL
|
||||||
|
if c.IsSet("url") {
|
||||||
|
baseURL = c.String("url")
|
||||||
|
}
|
||||||
|
// Generate a random post URL
|
||||||
|
certURL := baseURL + generateRandomPath()
|
||||||
|
loginURL, err := url.Parse(baseLoginURL)
|
||||||
|
if err != nil {
|
||||||
|
// shouldn't happen, URL is hardcoded
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
loginURL.RawQuery = "callback=" + url.QueryEscape(certURL)
|
||||||
|
|
||||||
|
err = open(loginURL.String())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, `Please open the following URL and log in with your Cloudflare account:
|
||||||
|
|
||||||
|
%s
|
||||||
|
|
||||||
|
Leave cloudflared running to install the certificate automatically.
|
||||||
|
`, loginURL.String())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(os.Stderr, `A browser window should have opened at the following URL:
|
||||||
|
|
||||||
|
%s
|
||||||
|
|
||||||
|
If the browser failed to open, open it yourself and visit the URL above.
|
||||||
|
|
||||||
|
`, loginURL.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if download(certURL, path) {
|
||||||
|
fmt.Fprintf(os.Stderr, `You have successfully logged in.
|
||||||
|
If you wish to copy your credentials to a server, they have been saved to:
|
||||||
|
%s
|
||||||
|
`, path)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(os.Stderr, `Failed to write the certificate due to the following error:
|
||||||
|
%v
|
||||||
|
|
||||||
|
Your browser will download the certificate instead. You will have to manually
|
||||||
|
copy it to the following path:
|
||||||
|
|
||||||
|
%s
|
||||||
|
|
||||||
|
`, err, path)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateRandomPath generates a random URL to associate with the certificate.
|
||||||
|
func generateRandomPath() string {
|
||||||
|
randomBytes := make([]byte, 40)
|
||||||
|
_, err := rand.Read(randomBytes)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return "/" + base32.StdEncoding.EncodeToString(randomBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// open opens the specified URL in the default browser of the user.
|
||||||
|
func open(url string) error {
|
||||||
|
var cmd string
|
||||||
|
var args []string
|
||||||
|
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "windows":
|
||||||
|
cmd = "cmd"
|
||||||
|
args = []string{"/c", "start"}
|
||||||
|
case "darwin":
|
||||||
|
cmd = "open"
|
||||||
|
default: // "linux", "freebsd", "openbsd", "netbsd"
|
||||||
|
cmd = "xdg-open"
|
||||||
|
}
|
||||||
|
args = append(args, url)
|
||||||
|
return exec.Command(cmd, args...).Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func download(certURL, filePath string) bool {
|
||||||
|
client := &http.Client{Timeout: clientTimeout}
|
||||||
|
// attempt a (long-running) certificate get
|
||||||
|
for i := 0; i < 20; i++ {
|
||||||
|
ok, err := tryDownload(client, certURL, filePath)
|
||||||
|
if ok {
|
||||||
|
putSuccess(client, certURL)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Error fetching certificate")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryDownload(client *http.Client, certURL, filePath string) (ok bool, err error) {
|
||||||
|
resp, err := client.Get(certURL)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode == 404 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
return false, fmt.Errorf("Unexpected HTTP error code %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
if resp.Header.Get("Content-Type") != "application/x-pem-file" {
|
||||||
|
return false, fmt.Errorf("Unexpected content type %s", resp.Header.Get("Content-Type"))
|
||||||
|
}
|
||||||
|
// write response
|
||||||
|
file, err := os.Create(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
written, err := io.Copy(file, resp.Body)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return false, err
|
||||||
|
case resp.ContentLength != written && resp.ContentLength != -1:
|
||||||
|
return false, fmt.Errorf("Short read (%d bytes) from server while writing certificate", written)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func putSuccess(client *http.Client, certURL string) {
|
||||||
|
// indicate success to the relay server
|
||||||
|
req, err := http.NewRequest("PUT", certURL+"/ok", nil)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("HTTP request error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("HTTP error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
logger.Errorf("Unexpected HTTP error code %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,190 @@
|
||||||
|
// +build darwin
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
launchdIdentifier = "com.cloudflare.cloudflared"
|
||||||
|
)
|
||||||
|
|
||||||
|
func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
|
app.Commands = append(app.Commands, &cli.Command{
|
||||||
|
Name: "service",
|
||||||
|
Usage: "Manages the Argo Tunnel launch agent",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
{
|
||||||
|
Name: "install",
|
||||||
|
Usage: "Install Argo Tunnel as an user launch agent",
|
||||||
|
Action: installLaunchd,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "uninstall",
|
||||||
|
Usage: "Uninstall the Argo Tunnel launch agent",
|
||||||
|
Action: uninstallLaunchd,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
app.Run(os.Args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLaunchdTemplate(installPath, stdoutPath, stderrPath string) *ServiceTemplate {
|
||||||
|
return &ServiceTemplate{
|
||||||
|
Path: installPath,
|
||||||
|
Content: fmt.Sprintf(`<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||||
|
<plist version="1.0">
|
||||||
|
<dict>
|
||||||
|
<key>Label</key>
|
||||||
|
<string>%s</string>
|
||||||
|
<key>ProgramArguments</key>
|
||||||
|
<array>
|
||||||
|
<string>{{ .Path }}</string>
|
||||||
|
</array>
|
||||||
|
<key>RunAtLoad</key>
|
||||||
|
<true/>
|
||||||
|
<key>StandardOutPath</key>
|
||||||
|
<string>%s</string>
|
||||||
|
<key>StandardErrorPath</key>
|
||||||
|
<string>%s</string>
|
||||||
|
<key>KeepAlive</key>
|
||||||
|
<dict>
|
||||||
|
<key>SuccessfulExit</key>
|
||||||
|
<false/>
|
||||||
|
</dict>
|
||||||
|
<key>ThrottleInterval</key>
|
||||||
|
<integer>20</integer>
|
||||||
|
</dict>
|
||||||
|
</plist>`, launchdIdentifier, stdoutPath, stderrPath),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRootUser() bool {
|
||||||
|
return os.Geteuid() == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func installPath() (string, error) {
|
||||||
|
// User is root, use /Library/LaunchDaemons instead of home directory
|
||||||
|
if isRootUser() {
|
||||||
|
return fmt.Sprintf("/Library/LaunchDaemons/%s.plist", launchdIdentifier), nil
|
||||||
|
}
|
||||||
|
userHomeDir, err := userHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s/Library/LaunchAgents/%s.plist", userHomeDir, launchdIdentifier), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stdoutPath() (string, error) {
|
||||||
|
if isRootUser() {
|
||||||
|
return fmt.Sprintf("/Library/Logs/%s.out.log", launchdIdentifier), nil
|
||||||
|
}
|
||||||
|
userHomeDir, err := userHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s/Library/Logs/%s.out.log", userHomeDir, launchdIdentifier), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stderrPath() (string, error) {
|
||||||
|
if isRootUser() {
|
||||||
|
return fmt.Sprintf("/Library/Logs/%s.err.log", launchdIdentifier), nil
|
||||||
|
}
|
||||||
|
userHomeDir, err := userHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s/Library/Logs/%s.err.log", userHomeDir, launchdIdentifier), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func installLaunchd(c *cli.Context) error {
|
||||||
|
if isRootUser() {
|
||||||
|
logger.Infof("Installing Argo Tunnel client as a system launch daemon. " +
|
||||||
|
"Argo Tunnel client will run at boot")
|
||||||
|
} else {
|
||||||
|
logger.Infof("Installing Argo Tunnel client as an user launch agent. " +
|
||||||
|
"Note that Argo Tunnel client will only run when the user is logged in. " +
|
||||||
|
"If you want to run Argo Tunnel client at boot, install with root permission. " +
|
||||||
|
"For more information, visit https://developers.cloudflare.com/argo-tunnel/reference/service/")
|
||||||
|
}
|
||||||
|
etPath, err := os.Executable()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("Error determining executable path")
|
||||||
|
return fmt.Errorf("Error determining executable path: %v", err)
|
||||||
|
}
|
||||||
|
installPath, err := installPath()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "Error determining install path")
|
||||||
|
}
|
||||||
|
stdoutPath, err := stdoutPath()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error determining stdout path")
|
||||||
|
}
|
||||||
|
stderrPath, err := stderrPath()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error determining stderr path")
|
||||||
|
}
|
||||||
|
launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("error creating launchd template")
|
||||||
|
return errors.Wrap(err, "error creating launchd template")
|
||||||
|
}
|
||||||
|
templateArgs := ServiceTemplateArgs{Path: etPath}
|
||||||
|
err = launchdTemplate.Generate(&templateArgs)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("error generating launchd template")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
plistPath, err := launchdTemplate.ResolvePath()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("error resolving launchd template path")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Infof("Outputs are logged to %s and %s", stderrPath, stdoutPath)
|
||||||
|
return runCommand("launchctl", "load", plistPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func uninstallLaunchd(c *cli.Context) error {
|
||||||
|
if isRootUser() {
|
||||||
|
logger.Infof("Uninstalling Argo Tunnel as a system launch daemon")
|
||||||
|
} else {
|
||||||
|
logger.Infof("Uninstalling Argo Tunnel as an user launch agent")
|
||||||
|
}
|
||||||
|
installPath, err := installPath()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error determining install path")
|
||||||
|
}
|
||||||
|
stdoutPath, err := stdoutPath()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error determining stdout path")
|
||||||
|
}
|
||||||
|
stderrPath, err := stderrPath()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error determining stderr path")
|
||||||
|
}
|
||||||
|
launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error creating launchd template")
|
||||||
|
}
|
||||||
|
plistPath, err := launchdTemplate.ResolvePath()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("error resolving launchd template path")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = runCommand("launchctl", "unload", plistPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("error unloading")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Infof("Outputs are logged to %s and %s", stderrPath, stdoutPath)
|
||||||
|
return launchdTemplate.Remove()
|
||||||
|
}
|
|
@ -0,0 +1,564 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/hello"
|
||||||
|
"github.com/cloudflare/cloudflared/metrics"
|
||||||
|
"github.com/cloudflare/cloudflared/origin"
|
||||||
|
"github.com/cloudflare/cloudflared/tunneldns"
|
||||||
|
|
||||||
|
"github.com/getsentry/raven-go"
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
"gopkg.in/urfave/cli.v2/altsrc"
|
||||||
|
|
||||||
|
"github.com/coreos/go-systemd/daemon"
|
||||||
|
"github.com/facebookgo/grace/gracenet"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b:3e8827f6f9f740738eb11138f7bebb68@sentry.io/189878"
|
||||||
|
developerPortal = "https://developers.cloudflare.com/argo-tunnel"
|
||||||
|
quickStartUrl = developerPortal + "/quickstart/quickstart/"
|
||||||
|
serviceUrl = developerPortal + "/reference/service/"
|
||||||
|
argumentsUrl = developerPortal + "/reference/arguments/"
|
||||||
|
licenseUrl = developerPortal + "/licence/"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
Version = "DEV"
|
||||||
|
BuildTime = "unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
metrics.RegisterBuildInfo(BuildTime, Version)
|
||||||
|
raven.SetDSN(sentryDSN)
|
||||||
|
raven.SetRelease(Version)
|
||||||
|
|
||||||
|
// Force shutdown channel used by the app. When closed, app must terminate.
|
||||||
|
// Windows service manager closes this channel when it receives shutdown command.
|
||||||
|
shutdownC := make(chan struct{})
|
||||||
|
// Graceful shutdown channel used by the app. When closed, app must terminate.
|
||||||
|
// Windows service manager closes this channel when it receives stop command.
|
||||||
|
graceShutdownC := make(chan struct{})
|
||||||
|
|
||||||
|
app := &cli.App{}
|
||||||
|
app.Name = "cloudflared"
|
||||||
|
app.Copyright = fmt.Sprintf(`(c) %d Cloudflare Inc.
|
||||||
|
Use is subject to the license agreement at %s`, time.Now().Year(), licenseUrl)
|
||||||
|
app.Usage = "Cloudflare reverse tunnelling proxy agent"
|
||||||
|
app.ArgsUsage = "origin-url"
|
||||||
|
app.Version = fmt.Sprintf("%s (built %s)", Version, BuildTime)
|
||||||
|
app.Description = `A reverse tunnel proxy agent that connects to Cloudflare's infrastructure.
|
||||||
|
Upon connecting, you are assigned a unique subdomain on cftunnel.com.
|
||||||
|
You need to specify a hostname on a zone you control.
|
||||||
|
A DNS record will be created to CNAME your hostname to the unique subdomain on cftunnel.com.
|
||||||
|
|
||||||
|
Requests made to Cloudflare's servers for your hostname will be proxied
|
||||||
|
through the tunnel to your local webserver.`
|
||||||
|
app.Flags = []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "config",
|
||||||
|
Usage: "Specifies a config file in YAML format.",
|
||||||
|
Value: findDefaultConfigPath(),
|
||||||
|
},
|
||||||
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
|
Name: "autoupdate-freq",
|
||||||
|
Usage: "Autoupdate frequency. Default is 24h.",
|
||||||
|
Value: time.Hour * 24,
|
||||||
|
}),
|
||||||
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
|
Name: "no-autoupdate",
|
||||||
|
Usage: "Disable periodic check for updates, restarting the server with the new version.",
|
||||||
|
Value: false,
|
||||||
|
}),
|
||||||
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
|
Name: "is-autoupdated",
|
||||||
|
Usage: "Signal the new process that Argo Tunnel client has been autoupdated",
|
||||||
|
Value: false,
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||||
|
Name: "edge",
|
||||||
|
Usage: "Address of the Cloudflare tunnel server.",
|
||||||
|
EnvVars: []string{"TUNNEL_EDGE"},
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "cacert",
|
||||||
|
Usage: "Certificate Authority authenticating the Cloudflare tunnel connection.",
|
||||||
|
EnvVars: []string{"TUNNEL_CACERT"},
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
|
Name: "no-tls-verify",
|
||||||
|
Usage: "Disables TLS verification of the certificate presented by your origin. Will allow any certificate from the origin to be accepted. Note: The connection from your machine to Cloudflare's Edge is still encrypted.",
|
||||||
|
EnvVars: []string{"NO_TLS_VERIFY"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "origincert",
|
||||||
|
Usage: "Path to the certificate generated for your origin when you run cloudflared login.",
|
||||||
|
EnvVars: []string{"TUNNEL_ORIGIN_CERT"},
|
||||||
|
Value: findDefaultOriginCertPath(),
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "origin-ca-pool",
|
||||||
|
Usage: "Path to the CA for the certificate of your origin. This option should be used only if your certificate is not signed by Cloudflare.",
|
||||||
|
EnvVars: []string{"TUNNEL_ORIGIN_CA_POOL"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "url",
|
||||||
|
Value: "https://localhost:8080",
|
||||||
|
Usage: "Connect to the local webserver at `URL`.",
|
||||||
|
EnvVars: []string{"TUNNEL_URL"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "hostname",
|
||||||
|
Usage: "Set a hostname on a Cloudflare zone to route traffic through this tunnel.",
|
||||||
|
EnvVars: []string{"TUNNEL_HOSTNAME"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "origin-server-name",
|
||||||
|
Usage: "Hostname on the origin server certificate.",
|
||||||
|
EnvVars: []string{"TUNNEL_ORIGIN_SERVER_NAME"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "id",
|
||||||
|
Usage: "A unique identifier used to tie connections to this tunnel instance.",
|
||||||
|
EnvVars: []string{"TUNNEL_ID"},
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "lb-pool",
|
||||||
|
Usage: "The name of a (new/existing) load balancing pool to add this origin to.",
|
||||||
|
EnvVars: []string{"TUNNEL_LB_POOL"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "api-key",
|
||||||
|
Usage: "This parameter has been deprecated since version 2017.10.1.",
|
||||||
|
EnvVars: []string{"TUNNEL_API_KEY"},
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "api-email",
|
||||||
|
Usage: "This parameter has been deprecated since version 2017.10.1.",
|
||||||
|
EnvVars: []string{"TUNNEL_API_EMAIL"},
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "api-ca-key",
|
||||||
|
Usage: "This parameter has been deprecated since version 2017.10.1.",
|
||||||
|
EnvVars: []string{"TUNNEL_API_CA_KEY"},
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "metrics",
|
||||||
|
Value: "localhost:",
|
||||||
|
Usage: "Listen address for metrics reporting.",
|
||||||
|
EnvVars: []string{"TUNNEL_METRICS"},
|
||||||
|
}),
|
||||||
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
|
Name: "metrics-update-freq",
|
||||||
|
Usage: "Frequency to update tunnel metrics",
|
||||||
|
Value: time.Second * 5,
|
||||||
|
EnvVars: []string{"TUNNEL_METRICS_UPDATE_FREQ"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||||
|
Name: "tag",
|
||||||
|
Usage: "Custom tags used to identify this tunnel, in format `KEY=VALUE`. Multiple tags may be specified",
|
||||||
|
EnvVars: []string{"TUNNEL_TAG"},
|
||||||
|
}),
|
||||||
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
|
Name: "heartbeat-interval",
|
||||||
|
Usage: "Minimum idle time before sending a heartbeat.",
|
||||||
|
Value: time.Second * 5,
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewUint64Flag(&cli.Uint64Flag{
|
||||||
|
Name: "heartbeat-count",
|
||||||
|
Usage: "Minimum number of unacked heartbeats to send before closing the connection.",
|
||||||
|
Value: 5,
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "loglevel",
|
||||||
|
Value: "info",
|
||||||
|
Usage: "Application logging level {panic, fatal, error, warn, info, debug}",
|
||||||
|
EnvVars: []string{"TUNNEL_LOGLEVEL"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "proto-loglevel",
|
||||||
|
Value: "warn",
|
||||||
|
Usage: "Protocol logging level {panic, fatal, error, warn, info, debug}",
|
||||||
|
EnvVars: []string{"TUNNEL_PROTO_LOGLEVEL"},
|
||||||
|
}),
|
||||||
|
altsrc.NewUintFlag(&cli.UintFlag{
|
||||||
|
Name: "retries",
|
||||||
|
Value: 5,
|
||||||
|
Usage: "Maximum number of retries for connection/protocol errors.",
|
||||||
|
EnvVars: []string{"TUNNEL_RETRIES"},
|
||||||
|
}),
|
||||||
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
|
Name: "hello-world",
|
||||||
|
Value: false,
|
||||||
|
Usage: "Run Hello World Server",
|
||||||
|
EnvVars: []string{"TUNNEL_HELLO_WORLD"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "pidfile",
|
||||||
|
Usage: "Write the application's PID to this file after first successful connection.",
|
||||||
|
EnvVars: []string{"TUNNEL_PIDFILE"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "logfile",
|
||||||
|
Usage: "Save application log to this file for reporting issues.",
|
||||||
|
EnvVars: []string{"TUNNEL_LOGFILE"},
|
||||||
|
}),
|
||||||
|
altsrc.NewIntFlag(&cli.IntFlag{
|
||||||
|
Name: "ha-connections",
|
||||||
|
Value: 4,
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
|
Name: "proxy-connect-timeout",
|
||||||
|
Usage: "HTTP proxy timeout for establishing a new connection",
|
||||||
|
Value: time.Second * 30,
|
||||||
|
}),
|
||||||
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
|
Name: "proxy-tls-timeout",
|
||||||
|
Usage: "HTTP proxy timeout for completing a TLS handshake",
|
||||||
|
Value: time.Second * 10,
|
||||||
|
}),
|
||||||
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
|
Name: "proxy-tcp-keepalive",
|
||||||
|
Usage: "HTTP proxy TCP keepalive duration",
|
||||||
|
Value: time.Second * 30,
|
||||||
|
}),
|
||||||
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
|
Name: "proxy-no-happy-eyeballs",
|
||||||
|
Usage: "HTTP proxy should disable \"happy eyeballs\" for IPv4/v6 fallback",
|
||||||
|
}),
|
||||||
|
altsrc.NewIntFlag(&cli.IntFlag{
|
||||||
|
Name: "proxy-keepalive-connections",
|
||||||
|
Usage: "HTTP proxy maximum keepalive connection pool size",
|
||||||
|
Value: 100,
|
||||||
|
}),
|
||||||
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
|
Name: "proxy-keepalive-timeout",
|
||||||
|
Usage: "HTTP proxy timeout for closing an idle connection",
|
||||||
|
Value: time.Second * 90,
|
||||||
|
}),
|
||||||
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
|
Name: "proxy-dns",
|
||||||
|
Usage: "Run a DNS over HTTPS proxy server.",
|
||||||
|
EnvVars: []string{"TUNNEL_DNS"},
|
||||||
|
}),
|
||||||
|
altsrc.NewIntFlag(&cli.IntFlag{
|
||||||
|
Name: "proxy-dns-port",
|
||||||
|
Value: 53,
|
||||||
|
Usage: "Listen on given port for the DNS over HTTPS proxy server.",
|
||||||
|
EnvVars: []string{"TUNNEL_DNS_PORT"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: "proxy-dns-address",
|
||||||
|
Usage: "Listen address for the DNS over HTTPS proxy server.",
|
||||||
|
Value: "localhost",
|
||||||
|
EnvVars: []string{"TUNNEL_DNS_ADDRESS"},
|
||||||
|
}),
|
||||||
|
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||||
|
Name: "proxy-dns-upstream",
|
||||||
|
Usage: "Upstream endpoint URL, you can specify multiple endpoints for redundancy.",
|
||||||
|
Value: cli.NewStringSlice("https://1.1.1.1/dns-query", "https://1.0.0.1/dns-query"),
|
||||||
|
EnvVars: []string{"TUNNEL_DNS_UPSTREAM"},
|
||||||
|
}),
|
||||||
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
|
Name: "grace-period",
|
||||||
|
Usage: "Duration to accept new requests after cloudflared receives first SIGINT/SIGTERM. A second SIGINT/SIGTERM will force cloudflared to shutdown immediately.",
|
||||||
|
Value: time.Second * 30,
|
||||||
|
EnvVars: []string{"TUNNEL_GRACE_PERIOD"},
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewUintFlag(&cli.UintFlag{
|
||||||
|
Name: "compression-quality",
|
||||||
|
Value: 0,
|
||||||
|
Usage: "Use cross-stream compression instead HTTP compression. 0-off, 1-low, 2-medium, >=3-high",
|
||||||
|
EnvVars: []string{"TUNNEL_COMPRESSION_LEVEL"},
|
||||||
|
Hidden: true,
|
||||||
|
}),
|
||||||
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
|
Name: "no-chunked-encoding",
|
||||||
|
Usage: "Disables chunked transfer encoding; useful if you are running a WSGI server.",
|
||||||
|
EnvVars: []string{"TUNNEL_NO_CHUNKED_ENCODING"},
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
app.Action = func(c *cli.Context) (err error) {
|
||||||
|
tags := make(map[string]string)
|
||||||
|
tags["hostname"] = c.String("hostname")
|
||||||
|
raven.SetTagsContext(tags)
|
||||||
|
raven.CapturePanic(func() { err = startServer(c, shutdownC, graceShutdownC) }, nil)
|
||||||
|
if err != nil {
|
||||||
|
raven.CaptureError(err, nil)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
app.Before = func(context *cli.Context) error {
|
||||||
|
if context.String("config") == "" {
|
||||||
|
logger.Warnf("Cannot determine default configuration path. No file %v in %v", defaultConfigFiles, defaultConfigDirs)
|
||||||
|
}
|
||||||
|
inputSource, err := findInputSourceContext(context)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("Cannot load configuration from %s", context.String("config"))
|
||||||
|
return err
|
||||||
|
} else if inputSource != nil {
|
||||||
|
err := altsrc.ApplyInputSourceValues(context, inputSource, app.Flags)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("Cannot apply configuration from %s", context.String("config"))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Infof("Applied configuration from %s", context.String("config"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
app.Commands = []*cli.Command{
|
||||||
|
{
|
||||||
|
Name: "update",
|
||||||
|
Action: update,
|
||||||
|
Usage: "Update the agent if a new version exists",
|
||||||
|
ArgsUsage: " ",
|
||||||
|
Description: `Looks for a new version on the offical download server.
|
||||||
|
If a new version exists, updates the agent binary and quits.
|
||||||
|
Otherwise, does nothing.
|
||||||
|
|
||||||
|
To determine if an update happened in a script, check for error code 64.`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "login",
|
||||||
|
Action: login,
|
||||||
|
Usage: "Generate a configuration file with your login details",
|
||||||
|
ArgsUsage: " ",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "url",
|
||||||
|
Hidden: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hello",
|
||||||
|
Action: helloWorld,
|
||||||
|
Usage: "Run a simple \"Hello World\" server for testing Argo Tunnel.",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "port",
|
||||||
|
Usage: "Listen on the selected port.",
|
||||||
|
Value: 8080,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ArgsUsage: " ", // can't be the empty string or we get the default output
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "proxy-dns",
|
||||||
|
Action: tunneldns.Run,
|
||||||
|
Usage: "Run a DNS over HTTPS proxy server.",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "metrics",
|
||||||
|
Value: "localhost:",
|
||||||
|
Usage: "Listen address for metrics reporting.",
|
||||||
|
EnvVars: []string{"TUNNEL_METRICS"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "address",
|
||||||
|
Usage: "Listen address for the DNS over HTTPS proxy server.",
|
||||||
|
Value: "localhost",
|
||||||
|
EnvVars: []string{"TUNNEL_DNS_ADDRESS"},
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "port",
|
||||||
|
Usage: "Listen on given port for the DNS over HTTPS proxy server.",
|
||||||
|
Value: 53,
|
||||||
|
EnvVars: []string{"TUNNEL_DNS_PORT"},
|
||||||
|
},
|
||||||
|
&cli.StringSliceFlag{
|
||||||
|
Name: "upstream",
|
||||||
|
Usage: "Upstream endpoint URL, you can specify multiple endpoints for redundancy.",
|
||||||
|
Value: cli.NewStringSlice("https://1.1.1.1/dns-query", "https://1.0.0.1/dns-query"),
|
||||||
|
EnvVars: []string{"TUNNEL_DNS_UPSTREAM"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ArgsUsage: " ", // can't be the empty string or we get the default output
|
||||||
|
},
|
||||||
|
}
|
||||||
|
runApp(app, shutdownC, graceShutdownC)
|
||||||
|
}
|
||||||
|
|
||||||
|
func startServer(c *cli.Context, shutdownC, graceShutdownC chan struct{}) error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
listeners := gracenet.Net{}
|
||||||
|
errC := make(chan error)
|
||||||
|
connectedSignal := make(chan struct{})
|
||||||
|
dnsReadySignal := make(chan struct{})
|
||||||
|
|
||||||
|
// check whether client provides enough flags or env variables. If not, print help.
|
||||||
|
if ok := enoughOptionsSet(c); !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := configMainLogger(c); err != nil {
|
||||||
|
return errors.Wrap(err, "Error configuring logger")
|
||||||
|
}
|
||||||
|
|
||||||
|
protoLogger, err := configProtoLogger(c)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "Error configuring protocol logger")
|
||||||
|
}
|
||||||
|
if c.String("logfile") != "" {
|
||||||
|
if err := initLogFile(c, logger, protoLogger); err != nil {
|
||||||
|
logger.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := handleDeprecatedOptions(c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
buildInfo := origin.GetBuildInfo()
|
||||||
|
logger.Infof("Build info: %+v", *buildInfo)
|
||||||
|
logger.Infof("Version %s", Version)
|
||||||
|
logClientOptions(c)
|
||||||
|
|
||||||
|
if c.IsSet("proxy-dns") {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
errC <- runDNSProxyServer(c, dnsReadySignal, shutdownC)
|
||||||
|
}()
|
||||||
|
} else {
|
||||||
|
close(dnsReadySignal)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for proxy-dns to come up (if used)
|
||||||
|
<-dnsReadySignal
|
||||||
|
|
||||||
|
// update needs to be after DNS proxy is up to resolve equinox server address
|
||||||
|
if isAutoupdateEnabled(c) {
|
||||||
|
logger.Infof("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq"))
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
errC <- autoupdate(c.Duration("autoupdate-freq"), &listeners, shutdownC)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
metricsListener, err := listeners.Listen("tcp", c.String("metrics"))
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Error opening metrics server listener")
|
||||||
|
return errors.Wrap(err, "Error opening metrics server listener")
|
||||||
|
}
|
||||||
|
defer metricsListener.Close()
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
errC <- metrics.ServeMetrics(metricsListener, shutdownC, logger)
|
||||||
|
}()
|
||||||
|
|
||||||
|
go notifySystemd(connectedSignal)
|
||||||
|
if c.IsSet("pidfile") {
|
||||||
|
go writePidFile(connectedSignal, c.String("pidfile"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve DNS proxy stand-alone if no hostname or tag or app is going to run
|
||||||
|
if dnsProxyStandAlone(c) {
|
||||||
|
close(connectedSignal)
|
||||||
|
// no grace period, handle SIGINT/SIGTERM immediately
|
||||||
|
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.IsSet("hello-world") {
|
||||||
|
helloListener, err := hello.CreateTLSListener("127.0.0.1:")
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Cannot start Hello World Server")
|
||||||
|
return errors.Wrap(err, "Cannot start Hello World Server")
|
||||||
|
}
|
||||||
|
defer helloListener.Close()
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
hello.StartHelloWorldServer(logger, helloListener, shutdownC)
|
||||||
|
}()
|
||||||
|
c.Set("url", "https://"+helloListener.Addr().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
tunnelConfig, err := prepareTunnelConfig(c, buildInfo, logger, protoLogger)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
errC <- origin.StartTunnelDaemon(tunnelConfig, graceShutdownC, connectedSignal)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, c.Duration("grace-period"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitToShutdown(wg *sync.WaitGroup,
|
||||||
|
errC chan error,
|
||||||
|
shutdownC, graceShutdownC chan struct{},
|
||||||
|
gracePeriod time.Duration,
|
||||||
|
) error {
|
||||||
|
var err error
|
||||||
|
if gracePeriod > 0 {
|
||||||
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceShutdownC, gracePeriod)
|
||||||
|
} else {
|
||||||
|
err = waitForSignal(errC, shutdownC)
|
||||||
|
close(graceShutdownC)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Quitting due to error")
|
||||||
|
} else {
|
||||||
|
logger.Info("Quitting...")
|
||||||
|
}
|
||||||
|
// Wait for clean exit, discarding all errors
|
||||||
|
go func() {
|
||||||
|
for range errC {
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func notifySystemd(waitForSignal chan struct{}) {
|
||||||
|
<-waitForSignal
|
||||||
|
daemon.SdNotify(false, "READY=1")
|
||||||
|
}
|
||||||
|
|
||||||
|
func writePidFile(waitForSignal chan struct{}, pidFile string) {
|
||||||
|
<-waitForSignal
|
||||||
|
file, err := os.Create(pidFile)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("Unable to write pid to %s", pidFile)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
fmt.Fprintf(file, "%d", os.Getpid())
|
||||||
|
}
|
||||||
|
|
||||||
|
func userHomeDir() (string, error) {
|
||||||
|
// This returns the home dir of the executing user using OS-specific method
|
||||||
|
// for discovering the home dir. It's not recommended to call this function
|
||||||
|
// when the user has root permission as $HOME depends on what options the user
|
||||||
|
// use with sudo.
|
||||||
|
homeDir, err := homedir.Dir()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Cannot determine home directory for the user")
|
||||||
|
return "", errors.Wrap(err, "Cannot determine home directory for the user")
|
||||||
|
}
|
||||||
|
return homeDir, nil
|
||||||
|
}
|
|
@ -0,0 +1,33 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/cloudflare/cloudflared/tunneldns"
|
||||||
|
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func runDNSProxyServer(c *cli.Context, dnsReadySignal, shutdownC chan struct{}) error {
|
||||||
|
port := c.Int("proxy-dns-port")
|
||||||
|
if port <= 0 || port > 65535 {
|
||||||
|
logger.Errorf("The 'proxy-dns-port' must be a valid port number in <1, 65535> range.")
|
||||||
|
return errors.New("The 'proxy-dns-port' must be a valid port number in <1, 65535> range.")
|
||||||
|
}
|
||||||
|
listener, err := tunneldns.CreateListener(c.String("proxy-dns-address"), uint16(port), c.StringSlice("proxy-dns-upstream"))
|
||||||
|
if err != nil {
|
||||||
|
close(dnsReadySignal)
|
||||||
|
listener.Stop()
|
||||||
|
logger.WithError(err).Error("Cannot create the DNS over HTTPS proxy server")
|
||||||
|
return errors.Wrap(err, "Cannot create the DNS over HTTPS proxy server")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = listener.Start(dnsReadySignal)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Cannot start the DNS over HTTPS proxy server")
|
||||||
|
return errors.Wrap(err, "Cannot start the DNS over HTTPS proxy server")
|
||||||
|
}
|
||||||
|
<-shutdownC
|
||||||
|
listener.Stop()
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,192 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ServiceTemplate struct {
|
||||||
|
Path string
|
||||||
|
Content string
|
||||||
|
FileMode os.FileMode
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServiceTemplateArgs struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *ServiceTemplate) ResolvePath() (string, error) {
|
||||||
|
resolvedPath, err := homedir.Expand(st.Path)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error resolving path %s: %v", st.Path, err)
|
||||||
|
}
|
||||||
|
return resolvedPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *ServiceTemplate) Generate(args *ServiceTemplateArgs) error {
|
||||||
|
tmpl, err := template.New(st.Path).Parse(st.Content)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error generating %s template: %v", st.Path, err)
|
||||||
|
}
|
||||||
|
resolvedPath, err := st.ResolvePath()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
err = tmpl.Execute(&buffer, args)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error generating %s: %v", st.Path, err)
|
||||||
|
}
|
||||||
|
fileMode := os.FileMode(0644)
|
||||||
|
if st.FileMode != 0 {
|
||||||
|
fileMode = st.FileMode
|
||||||
|
}
|
||||||
|
err = ioutil.WriteFile(resolvedPath, buffer.Bytes(), fileMode)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error writing %s: %v", resolvedPath, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *ServiceTemplate) Remove() error {
|
||||||
|
resolvedPath, err := st.ResolvePath()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = os.Remove(resolvedPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error deleting %s: %v", resolvedPath, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCommand(command string, args ...string) error {
|
||||||
|
cmd := exec.Command(command, args...)
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("error getting stderr pipe")
|
||||||
|
return fmt.Errorf("error getting stderr pipe: %v", err)
|
||||||
|
}
|
||||||
|
err = cmd.Start()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("error starting %s", command)
|
||||||
|
return fmt.Errorf("error starting %s: %v", command, err)
|
||||||
|
}
|
||||||
|
commandErr, _ := ioutil.ReadAll(stderr)
|
||||||
|
if len(commandErr) > 0 {
|
||||||
|
logger.Errorf("%s: %s", command, commandErr)
|
||||||
|
}
|
||||||
|
err = cmd.Wait()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("%s returned error", command)
|
||||||
|
return fmt.Errorf("%s returned with error: %v", command, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureConfigDirExists(configDir string) error {
|
||||||
|
ok, err := fileExists(configDir)
|
||||||
|
if !ok && err == nil {
|
||||||
|
err = os.Mkdir(configDir, 0700)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// openFile opens the file at path. If create is set and the file exists, returns nil, true, nil
|
||||||
|
func openFile(path string, create bool) (file *os.File, exists bool, err error) {
|
||||||
|
expandedPath, err := homedir.Expand(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
if create {
|
||||||
|
fileInfo, err := os.Stat(expandedPath)
|
||||||
|
if err == nil && fileInfo.Size() > 0 {
|
||||||
|
return nil, true, nil
|
||||||
|
}
|
||||||
|
file, err = os.OpenFile(expandedPath, os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
} else {
|
||||||
|
file, err = os.Open(expandedPath)
|
||||||
|
}
|
||||||
|
return file, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyCertificate(srcConfigDir, destConfigDir, credentialFile string) error {
|
||||||
|
destCredentialPath := filepath.Join(destConfigDir, credentialFile)
|
||||||
|
destFile, exists, err := openFile(destCredentialPath, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if exists {
|
||||||
|
// credentials already exist, do nothing
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer destFile.Close()
|
||||||
|
|
||||||
|
srcCredentialPath := filepath.Join(srcConfigDir, credentialFile)
|
||||||
|
srcFile, _, err := openFile(srcCredentialPath, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer srcFile.Close()
|
||||||
|
|
||||||
|
// Copy certificate
|
||||||
|
_, err = io.Copy(destFile, srcFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to copy %s to %s: %v", srcCredentialPath, destCredentialPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyCredentials(serviceConfigDir, defaultConfigDir, defaultConfigFile, defaultCredentialFile string) error {
|
||||||
|
if err := ensureConfigDirExists(serviceConfigDir); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := copyCertificate(defaultConfigDir, serviceConfigDir, defaultCredentialFile); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy or create config
|
||||||
|
destConfigPath := filepath.Join(serviceConfigDir, defaultConfigFile)
|
||||||
|
destFile, exists, err := openFile(destConfigPath, true)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Infof("cannot open %s", destConfigPath)
|
||||||
|
return err
|
||||||
|
} else if exists {
|
||||||
|
// config already exists, do nothing
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer destFile.Close()
|
||||||
|
|
||||||
|
srcConfigPath := filepath.Join(defaultConfigDir, defaultConfigFile)
|
||||||
|
srcFile, _, err := openFile(srcConfigPath, false)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Your service needs a config file that at least specifies the hostname option.")
|
||||||
|
fmt.Println("Type in a hostname now, or leave it blank and create the config file later.")
|
||||||
|
fmt.Print("Hostname: ")
|
||||||
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
input, _ := reader.ReadString('\n')
|
||||||
|
if input == "" {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Fprintf(destFile, "hostname: %s\n", input)
|
||||||
|
} else {
|
||||||
|
defer srcFile.Close()
|
||||||
|
_, err = io.Copy(destFile, srcFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to copy %s to %s: %v", srcConfigPath, destConfigPath, err)
|
||||||
|
}
|
||||||
|
logger.Infof("Copied %s to %s", srcConfigPath, destConfigPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// waitForSignal notifies all routines to shutdownC immediately by closing the
|
||||||
|
// shutdownC when one of the routines in main exits, or when this process receives
|
||||||
|
// SIGTERM/SIGINT
|
||||||
|
func waitForSignal(errC chan error, shutdownC chan struct{}) error {
|
||||||
|
signals := make(chan os.Signal, 10)
|
||||||
|
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
|
||||||
|
defer signal.Stop(signals)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-errC:
|
||||||
|
close(shutdownC)
|
||||||
|
return err
|
||||||
|
case <-signals:
|
||||||
|
close(shutdownC)
|
||||||
|
case <-shutdownC:
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForSignalWithGraceShutdown notifies all routines to shutdown immediately
|
||||||
|
// by closing the shutdownC when one of the routines in main exits.
|
||||||
|
// When this process recieves SIGTERM/SIGINT, it closes the graceShutdownC to
|
||||||
|
// notify certain routines to start graceful shutdown. When grace period is over,
|
||||||
|
// or when some routine exits, it notifies the rest of the routines to shutdown
|
||||||
|
// immediately by closing shutdownC.
|
||||||
|
// In the case of handling commands from Windows Service Manager, closing graceShutdownC
|
||||||
|
// initiate graceful shutdown.
|
||||||
|
func waitForSignalWithGraceShutdown(errC chan error,
|
||||||
|
shutdownC, graceShutdownC chan struct{},
|
||||||
|
gracePeriod time.Duration,
|
||||||
|
) error {
|
||||||
|
signals := make(chan os.Signal, 10)
|
||||||
|
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
|
||||||
|
defer signal.Stop(signals)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-errC:
|
||||||
|
close(graceShutdownC)
|
||||||
|
close(shutdownC)
|
||||||
|
return err
|
||||||
|
case <-signals:
|
||||||
|
close(graceShutdownC)
|
||||||
|
waitForGracePeriod(signals, errC, shutdownC, gracePeriod)
|
||||||
|
case <-graceShutdownC:
|
||||||
|
waitForGracePeriod(signals, errC, shutdownC, gracePeriod)
|
||||||
|
case <-shutdownC:
|
||||||
|
close(graceShutdownC)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForGracePeriod(signals chan os.Signal,
|
||||||
|
errC chan error,
|
||||||
|
shutdownC chan struct{},
|
||||||
|
gracePeriod time.Duration,
|
||||||
|
) {
|
||||||
|
logger.Infof("Initiating graceful shutdown...")
|
||||||
|
// Unregister signal handler early, so the client can send a second SIGTERM/SIGINT
|
||||||
|
// to force shutdown cloudflared
|
||||||
|
signal.Stop(signals)
|
||||||
|
graceTimerTick := time.Tick(gracePeriod)
|
||||||
|
// send close signal via shutdownC when grace period expires or when an
|
||||||
|
// error is encountered.
|
||||||
|
select {
|
||||||
|
case <-graceTimerTick:
|
||||||
|
case <-errC:
|
||||||
|
}
|
||||||
|
close(shutdownC)
|
||||||
|
}
|
|
@ -0,0 +1,152 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
const tick = 100 * time.Millisecond
|
||||||
|
|
||||||
|
var (
|
||||||
|
serverErr = fmt.Errorf("server error")
|
||||||
|
shutdownErr = fmt.Errorf("receive shutdown")
|
||||||
|
graceShutdownErr = fmt.Errorf("receive grace shutdown")
|
||||||
|
)
|
||||||
|
|
||||||
|
func testChannelClosed(t *testing.T, c chan struct{}) {
|
||||||
|
select {
|
||||||
|
case <-c:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
t.Fatal("Channel should be closed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWaitForSignal(t *testing.T) {
|
||||||
|
// Test handling server error
|
||||||
|
errC := make(chan error)
|
||||||
|
shutdownC := make(chan struct{})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errC <- serverErr
|
||||||
|
}()
|
||||||
|
|
||||||
|
// received error, shutdownC should be closed
|
||||||
|
err := waitForSignal(errC, shutdownC)
|
||||||
|
assert.Equal(t, serverErr, err)
|
||||||
|
testChannelClosed(t, shutdownC)
|
||||||
|
|
||||||
|
// Test handling SIGTERM & SIGINT
|
||||||
|
for _, sig := range []syscall.Signal{syscall.SIGTERM, syscall.SIGINT} {
|
||||||
|
errC = make(chan error)
|
||||||
|
shutdownC = make(chan struct{})
|
||||||
|
|
||||||
|
go func(shutdownC chan struct{}) {
|
||||||
|
<-shutdownC
|
||||||
|
errC <- shutdownErr
|
||||||
|
}(shutdownC)
|
||||||
|
|
||||||
|
go func(sig syscall.Signal) {
|
||||||
|
// sleep for a tick to prevent sending signal before calling waitForSignal
|
||||||
|
time.Sleep(tick)
|
||||||
|
syscall.Kill(syscall.Getpid(), sig)
|
||||||
|
}(sig)
|
||||||
|
|
||||||
|
err = waitForSignal(errC, shutdownC)
|
||||||
|
assert.Equal(t, nil, err)
|
||||||
|
assert.Equal(t, shutdownErr, <-errC)
|
||||||
|
testChannelClosed(t, shutdownC)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWaitForSignalWithGraceShutdown(t *testing.T) {
|
||||||
|
// Test server returning error
|
||||||
|
errC := make(chan error)
|
||||||
|
shutdownC := make(chan struct{})
|
||||||
|
graceshutdownC := make(chan struct{})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errC <- serverErr
|
||||||
|
}()
|
||||||
|
|
||||||
|
// received error, both shutdownC and graceshutdownC should be closed
|
||||||
|
err := waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick)
|
||||||
|
assert.Equal(t, serverErr, err)
|
||||||
|
testChannelClosed(t, shutdownC)
|
||||||
|
testChannelClosed(t, graceshutdownC)
|
||||||
|
|
||||||
|
// shutdownC closed, graceshutdownC should also be closed and no error
|
||||||
|
errC = make(chan error)
|
||||||
|
shutdownC = make(chan struct{})
|
||||||
|
graceshutdownC = make(chan struct{})
|
||||||
|
close(shutdownC)
|
||||||
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
testChannelClosed(t, shutdownC)
|
||||||
|
testChannelClosed(t, graceshutdownC)
|
||||||
|
|
||||||
|
// graceshutdownC closed, shutdownC should also be closed and no error
|
||||||
|
errC = make(chan error)
|
||||||
|
shutdownC = make(chan struct{})
|
||||||
|
graceshutdownC = make(chan struct{})
|
||||||
|
close(graceshutdownC)
|
||||||
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
testChannelClosed(t, shutdownC)
|
||||||
|
testChannelClosed(t, graceshutdownC)
|
||||||
|
|
||||||
|
// Test handling SIGTERM & SIGINT
|
||||||
|
for _, sig := range []syscall.Signal{syscall.SIGTERM, syscall.SIGINT} {
|
||||||
|
errC := make(chan error)
|
||||||
|
shutdownC = make(chan struct{})
|
||||||
|
graceshutdownC = make(chan struct{})
|
||||||
|
|
||||||
|
go func(shutdownC, graceshutdownC chan struct{}) {
|
||||||
|
<-graceshutdownC
|
||||||
|
<-shutdownC
|
||||||
|
errC <- graceShutdownErr
|
||||||
|
}(shutdownC, graceshutdownC)
|
||||||
|
|
||||||
|
go func(sig syscall.Signal) {
|
||||||
|
// sleep for a tick to prevent sending signal before calling waitForSignalWithGraceShutdown
|
||||||
|
time.Sleep(tick)
|
||||||
|
syscall.Kill(syscall.Getpid(), sig)
|
||||||
|
}(sig)
|
||||||
|
|
||||||
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick)
|
||||||
|
assert.Equal(t, nil, err)
|
||||||
|
assert.Equal(t, graceShutdownErr, <-errC)
|
||||||
|
testChannelClosed(t, shutdownC)
|
||||||
|
testChannelClosed(t, graceshutdownC)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test handling SIGTERM & SIGINT, server send error before end of grace period
|
||||||
|
for _, sig := range []syscall.Signal{syscall.SIGTERM, syscall.SIGINT} {
|
||||||
|
errC := make(chan error)
|
||||||
|
shutdownC = make(chan struct{})
|
||||||
|
graceshutdownC = make(chan struct{})
|
||||||
|
|
||||||
|
go func(shutdownC, graceshutdownC chan struct{}) {
|
||||||
|
<-graceshutdownC
|
||||||
|
errC <- graceShutdownErr
|
||||||
|
<-shutdownC
|
||||||
|
errC <- shutdownErr
|
||||||
|
}(shutdownC, graceshutdownC)
|
||||||
|
|
||||||
|
go func(sig syscall.Signal) {
|
||||||
|
// sleep for a tick to prevent sending signal before calling waitForSignalWithGraceShutdown
|
||||||
|
time.Sleep(tick)
|
||||||
|
syscall.Kill(syscall.Getpid(), sig)
|
||||||
|
}(sig)
|
||||||
|
|
||||||
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick)
|
||||||
|
assert.Equal(t, nil, err)
|
||||||
|
assert.Equal(t, shutdownErr, <-errC)
|
||||||
|
testChannelClosed(t, shutdownC)
|
||||||
|
testChannelClosed(t, graceshutdownC)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Restrict key names to characters allowed in an HTTP header name.
|
||||||
|
// Restrict key values to printable characters (what is recognised as data in an HTTP header value).
|
||||||
|
var tagRegexp = regexp.MustCompile("^([a-zA-Z0-9!#$%&'*+\\-.^_`|~]+)=([[:print:]]+)$")
|
||||||
|
|
||||||
|
func NewTagFromCLI(compoundTag string) (tunnelpogs.Tag, bool) {
|
||||||
|
matches := tagRegexp.FindStringSubmatch(compoundTag)
|
||||||
|
if len(matches) == 0 {
|
||||||
|
return tunnelpogs.Tag{}, false
|
||||||
|
}
|
||||||
|
return tunnelpogs.Tag{Name: matches[1], Value: matches[2]}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTagSliceFromCLI(tags []string) ([]tunnelpogs.Tag, error) {
|
||||||
|
var tagSlice []tunnelpogs.Tag
|
||||||
|
for _, compoundTag := range tags {
|
||||||
|
if tag, ok := NewTagFromCLI(compoundTag); ok {
|
||||||
|
tagSlice = append(tagSlice, tag)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("Cannot parse tag value %s", compoundTag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tagSlice, nil
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSingleTag(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
Input string
|
||||||
|
Output tunnelpogs.Tag
|
||||||
|
Fail bool
|
||||||
|
}{
|
||||||
|
{Input: "x=y", Output: tunnelpogs.Tag{Name: "x", Value: "y"}},
|
||||||
|
{Input: "More-Complex=Tag Values", Output: tunnelpogs.Tag{Name: "More-Complex", Value: "Tag Values"}},
|
||||||
|
{Input: "First=Equals=Wins", Output: tunnelpogs.Tag{Name: "First", Value: "Equals=Wins"}},
|
||||||
|
{Input: "x=", Fail: true},
|
||||||
|
{Input: "=y", Fail: true},
|
||||||
|
{Input: "=", Fail: true},
|
||||||
|
{Input: "No spaces allowed=in key names", Fail: true},
|
||||||
|
{Input: "omg\nwtf=bbq", Fail: true},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
tag, ok := NewTagFromCLI(testCase.Input)
|
||||||
|
assert.Equalf(t, !testCase.Fail, ok, "mismatched success for test case %d", i)
|
||||||
|
assert.Equalf(t, testCase.Output, tag, "mismatched output for test case %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTagSlice(t *testing.T) {
|
||||||
|
tagSlice, err := NewTagSliceFromCLI([]string{"a=b", "c=d", "e=f"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, tagSlice, 3)
|
||||||
|
assert.Equal(t, "a", tagSlice[0].Name)
|
||||||
|
assert.Equal(t, "b", tagSlice[0].Value)
|
||||||
|
assert.Equal(t, "c", tagSlice[1].Name)
|
||||||
|
assert.Equal(t, "d", tagSlice[1].Value)
|
||||||
|
assert.Equal(t, "e", tagSlice[2].Name)
|
||||||
|
assert.Equal(t, "f", tagSlice[2].Value)
|
||||||
|
|
||||||
|
tagSlice, err = NewTagSliceFromCLI([]string{"a=b", "=", "e=f"})
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
|
@ -0,0 +1,115 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
|
||||||
|
"github.com/equinox-io/equinox"
|
||||||
|
"github.com/facebookgo/grace/gracenet"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
appID = "app_idCzgxYerVD"
|
||||||
|
noUpdateInShellMessage = "cloudflared will not automatically update when run from the shell. To enable auto-updates, run cloudflared as a service: https://developers.cloudflare.com/argo-tunnel/reference/service/"
|
||||||
|
noUpdateOnWindowsMessage = "cloudflared will not automatically update on Windows systems."
|
||||||
|
)
|
||||||
|
|
||||||
|
var publicKey = []byte(`
|
||||||
|
-----BEGIN ECDSA PUBLIC KEY-----
|
||||||
|
MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE4OWZocTVZ8Do/L6ScLdkV+9A0IYMHoOf
|
||||||
|
dsCmJ/QZ6aw0w9qkkwEpne1Lmo6+0pGexZzFZOH6w5amShn+RXt7qkSid9iWlzGq
|
||||||
|
EKx0BZogHSor9Wy5VztdFaAaVbsJiCbO
|
||||||
|
-----END ECDSA PUBLIC KEY-----
|
||||||
|
`)
|
||||||
|
|
||||||
|
type ReleaseInfo struct {
|
||||||
|
Updated bool
|
||||||
|
Version string
|
||||||
|
Error error
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkForUpdates() ReleaseInfo {
|
||||||
|
var opts equinox.Options
|
||||||
|
if err := opts.SetPublicKeyPEM(publicKey); err != nil {
|
||||||
|
return ReleaseInfo{Error: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := equinox.Check(appID, opts)
|
||||||
|
switch {
|
||||||
|
case err == equinox.NotAvailableErr:
|
||||||
|
return ReleaseInfo{}
|
||||||
|
case err != nil:
|
||||||
|
return ReleaseInfo{Error: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = resp.Apply()
|
||||||
|
if err != nil {
|
||||||
|
return ReleaseInfo{Error: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ReleaseInfo{Updated: true, Version: resp.ReleaseVersion}
|
||||||
|
}
|
||||||
|
|
||||||
|
func update(_ *cli.Context) error {
|
||||||
|
if updateApplied() {
|
||||||
|
os.Exit(64)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoupdate(freq time.Duration, listeners *gracenet.Net, shutdownC chan struct{}) error {
|
||||||
|
tickC := time.Tick(freq)
|
||||||
|
for {
|
||||||
|
if updateApplied() {
|
||||||
|
os.Args = append(os.Args, "--is-autoupdated=true")
|
||||||
|
pid, err := listeners.StartProcess()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("Unable to restart server automatically")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// stop old process after autoupdate. Otherwise we create a new process
|
||||||
|
// after each update
|
||||||
|
logger.Infof("PID of the new process is %d", pid)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-tickC:
|
||||||
|
case <-shutdownC:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateApplied() bool {
|
||||||
|
releaseInfo := checkForUpdates()
|
||||||
|
if releaseInfo.Updated {
|
||||||
|
logger.Infof("Updated to version %s", releaseInfo.Version)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if releaseInfo.Error != nil {
|
||||||
|
logger.WithError(releaseInfo.Error).Error("Update check failed")
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isAutoupdateEnabled(c *cli.Context) bool {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
logger.Info(noUpdateOnWindowsMessage)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if isRunningFromTerminal() {
|
||||||
|
logger.Info(noUpdateInShellMessage)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return !c.Bool("no-autoupdate") && c.Duration("autoupdate-freq") != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRunningFromTerminal() bool {
|
||||||
|
return terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||||
|
}
|
|
@ -0,0 +1,252 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
// Copypasta from the example files:
|
||||||
|
// https://github.com/golang/sys/blob/master/windows/svc/example
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
cli "gopkg.in/urfave/cli.v2"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
"golang.org/x/sys/windows/svc"
|
||||||
|
"golang.org/x/sys/windows/svc/eventlog"
|
||||||
|
"golang.org/x/sys/windows/svc/mgr"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
windowsServiceName = "Cloudflared"
|
||||||
|
windowsServiceDescription = "Argo Tunnel agent"
|
||||||
|
|
||||||
|
recoverActionDelay = time.Second * 20
|
||||||
|
failureCountResetPeriod = time.Hour * 24
|
||||||
|
|
||||||
|
// not defined in golang.org/x/sys/windows package
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms681988(v=vs.85).aspx
|
||||||
|
serviceConfigFailureActionsFlag = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
|
app.Commands = append(app.Commands, &cli.Command{
|
||||||
|
Name: "service",
|
||||||
|
Usage: "Manages the Argo Tunnel Windows service",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
&cli.Command{
|
||||||
|
Name: "install",
|
||||||
|
Usage: "Install Argo Tunnel as a Windows service",
|
||||||
|
Action: installWindowsService,
|
||||||
|
},
|
||||||
|
&cli.Command{
|
||||||
|
Name: "uninstall",
|
||||||
|
Usage: "Uninstall the Argo Tunnel service",
|
||||||
|
Action: uninstallWindowsService,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
isIntSess, err := svc.IsAnInteractiveSession()
|
||||||
|
if err != nil {
|
||||||
|
logger.Fatalf("failed to determine if we are running in an interactive session: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isIntSess {
|
||||||
|
app.Run(os.Args)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
elog, err := eventlog.Open(windowsServiceName)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("Cannot open event log for %s", windowsServiceName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer elog.Close()
|
||||||
|
|
||||||
|
elog.Info(1, fmt.Sprintf("%s service starting", windowsServiceName))
|
||||||
|
// Run executes service name by calling windowsService which is a Handler
|
||||||
|
// interface that implements Execute method.
|
||||||
|
// It will set service status to stop after Execute returns
|
||||||
|
err = svc.Run(windowsServiceName, &windowsService{app: app, elog: elog, shutdownC: shutdownC, graceShutdownC: graceShutdownC})
|
||||||
|
if err != nil {
|
||||||
|
elog.Error(1, fmt.Sprintf("%s service failed: %v", windowsServiceName, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
elog.Info(1, fmt.Sprintf("%s service stopped", windowsServiceName))
|
||||||
|
}
|
||||||
|
|
||||||
|
type windowsService struct {
|
||||||
|
app *cli.App
|
||||||
|
elog *eventlog.Log
|
||||||
|
shutdownC chan struct{}
|
||||||
|
graceShutdownC chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// called by the package code at the start of the service
|
||||||
|
func (s *windowsService) Execute(args []string, r <-chan svc.ChangeRequest, statusChan chan<- svc.Status) (ssec bool, errno uint32) {
|
||||||
|
const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown
|
||||||
|
statusChan <- svc.Status{State: svc.StartPending}
|
||||||
|
errC := make(chan error)
|
||||||
|
go func() {
|
||||||
|
errC <- s.app.Run(args)
|
||||||
|
}()
|
||||||
|
statusChan <- svc.Status{State: svc.Running, Accepts: cmdsAccepted}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case c := <-r:
|
||||||
|
switch c.Cmd {
|
||||||
|
case svc.Interrogate:
|
||||||
|
s.elog.Info(1, fmt.Sprintf("control request 1 #%d", c))
|
||||||
|
statusChan <- c.CurrentStatus
|
||||||
|
case svc.Stop:
|
||||||
|
s.elog.Info(1, "received stop control request")
|
||||||
|
close(s.graceShutdownC)
|
||||||
|
statusChan <- svc.Status{State: svc.StopPending}
|
||||||
|
case svc.Shutdown:
|
||||||
|
s.elog.Info(1, "received shutdown control request")
|
||||||
|
close(s.shutdownC)
|
||||||
|
statusChan <- svc.Status{State: svc.StopPending}
|
||||||
|
default:
|
||||||
|
s.elog.Error(1, fmt.Sprintf("unexpected control request #%d", c))
|
||||||
|
}
|
||||||
|
case err := <-errC:
|
||||||
|
ssec = true
|
||||||
|
if err != nil {
|
||||||
|
s.elog.Error(1, fmt.Sprintf("cloudflared terminated with error %v", err))
|
||||||
|
errno = 1
|
||||||
|
} else {
|
||||||
|
s.elog.Info(1, "cloudflared terminated without error")
|
||||||
|
errno = 0
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func installWindowsService(c *cli.Context) error {
|
||||||
|
logger.Infof("Installing Argo Tunnel Windows service")
|
||||||
|
exepath, err := os.Executable()
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("Cannot find path name that start the process")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m, err := mgr.Connect()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("Cannot establish a connection to the service control manager")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer m.Disconnect()
|
||||||
|
s, err := m.OpenService(windowsServiceName)
|
||||||
|
if err == nil {
|
||||||
|
s.Close()
|
||||||
|
logger.Errorf("service %s already exists", windowsServiceName)
|
||||||
|
return fmt.Errorf("service %s already exists", windowsServiceName)
|
||||||
|
}
|
||||||
|
config := mgr.Config{StartType: mgr.StartAutomatic, DisplayName: windowsServiceDescription}
|
||||||
|
s, err = m.CreateService(windowsServiceName, exepath, config)
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("Cannot install service %s", windowsServiceName)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer s.Close()
|
||||||
|
logger.Infof("Argo Tunnel agent service is installed")
|
||||||
|
err = eventlog.InstallAsEventCreate(windowsServiceName, eventlog.Error|eventlog.Warning|eventlog.Info)
|
||||||
|
if err != nil {
|
||||||
|
s.Delete()
|
||||||
|
logger.WithError(err).Errorf("Cannot install event logger")
|
||||||
|
return fmt.Errorf("SetupEventLogSource() failed: %s", err)
|
||||||
|
}
|
||||||
|
err = configRecoveryOption(s.Handle)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("Cannot set service recovery actions")
|
||||||
|
logger.Infof("See %s to manually configure service recovery actions", serviceUrl)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func uninstallWindowsService(c *cli.Context) error {
|
||||||
|
logger.Infof("Uninstalling Argo Tunnel Windows Service")
|
||||||
|
m, err := mgr.Connect()
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("Cannot establish a connection to the service control manager")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer m.Disconnect()
|
||||||
|
s, err := m.OpenService(windowsServiceName)
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("service %s is not installed", windowsServiceName)
|
||||||
|
return fmt.Errorf("service %s is not installed", windowsServiceName)
|
||||||
|
}
|
||||||
|
defer s.Close()
|
||||||
|
err = s.Delete()
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("Cannot delete service %s", windowsServiceName)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Infof("Argo Tunnel agent service is uninstalled")
|
||||||
|
err = eventlog.Remove(windowsServiceName)
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("Cannot remove event logger")
|
||||||
|
return fmt.Errorf("RemoveEventLogSource() failed: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// defined in https://msdn.microsoft.com/en-us/library/windows/desktop/ms685126(v=vs.85).aspx
|
||||||
|
type scAction int
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms685126(v=vs.85).aspx
|
||||||
|
const (
|
||||||
|
scActionNone scAction = iota
|
||||||
|
scActionRestart
|
||||||
|
scActionReboot
|
||||||
|
scActionRunCommand
|
||||||
|
)
|
||||||
|
|
||||||
|
// defined in https://msdn.microsoft.com/en-us/library/windows/desktop/ms685939(v=vs.85).aspx
|
||||||
|
type serviceFailureActions struct {
|
||||||
|
// time to wait to reset the failure count to zero if there are no failures in seconds
|
||||||
|
resetPeriod uint32
|
||||||
|
rebootMsg *uint16
|
||||||
|
command *uint16
|
||||||
|
// If failure count is greater than actionCount, the service controller repeats
|
||||||
|
// the last action in actions
|
||||||
|
actionCount uint32
|
||||||
|
actions uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms685937(v=vs.85).aspx
|
||||||
|
// Not supported in Windows Server 2003 and Windows XP
|
||||||
|
type serviceFailureActionsFlag struct {
|
||||||
|
// enableActionsForStopsWithErr is of type BOOL, which is declared as
|
||||||
|
// typedef int BOOL in C
|
||||||
|
enableActionsForStopsWithErr int
|
||||||
|
}
|
||||||
|
|
||||||
|
type recoveryAction struct {
|
||||||
|
recoveryType uint32
|
||||||
|
// The time to wait before performing the specified action, in milliseconds
|
||||||
|
delay uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// until https://github.com/golang/go/issues/23239 is release, we will need to
|
||||||
|
// configure through ChangeServiceConfig2
|
||||||
|
func configRecoveryOption(handle windows.Handle) error {
|
||||||
|
actions := []recoveryAction{
|
||||||
|
{recoveryType: uint32(scActionRestart), delay: uint32(recoverActionDelay / time.Millisecond)},
|
||||||
|
}
|
||||||
|
serviceRecoveryActions := serviceFailureActions{
|
||||||
|
resetPeriod: uint32(failureCountResetPeriod / time.Second),
|
||||||
|
actionCount: uint32(len(actions)),
|
||||||
|
actions: uintptr(unsafe.Pointer(&actions[0])),
|
||||||
|
}
|
||||||
|
if err := windows.ChangeServiceConfig2(handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&serviceRecoveryActions))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
serviceFailureActionsFlag := serviceFailureActionsFlag{enableActionsForStopsWithErr: 1}
|
||||||
|
return windows.ChangeServiceConfig2(handle, serviceConfigFailureActionsFlag, (*byte)(unsafe.Pointer(&serviceFailureActionsFlag)))
|
||||||
|
}
|
|
@ -0,0 +1,165 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// activeStreamMap is used to moderate access to active streams between the read and write
|
||||||
|
// threads, and deny access to new peer streams while shutting down.
|
||||||
|
type activeStreamMap struct {
|
||||||
|
sync.RWMutex
|
||||||
|
// streams tracks open streams.
|
||||||
|
streams map[uint32]*MuxedStream
|
||||||
|
// streamsEmpty is a chan that should be closed when no more streams are open.
|
||||||
|
streamsEmpty chan struct{}
|
||||||
|
// nextStreamID is the next ID to use on our side of the connection.
|
||||||
|
// This is odd for clients, even for servers.
|
||||||
|
nextStreamID uint32
|
||||||
|
// maxPeerStreamID is the ID of the most recent stream opened by the peer.
|
||||||
|
maxPeerStreamID uint32
|
||||||
|
// ignoreNewStreams is true when the connection is being shut down. New streams
|
||||||
|
// cannot be registered.
|
||||||
|
ignoreNewStreams bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newActiveStreamMap(useClientStreamNumbers bool) *activeStreamMap {
|
||||||
|
m := &activeStreamMap{
|
||||||
|
streams: make(map[uint32]*MuxedStream),
|
||||||
|
streamsEmpty: make(chan struct{}),
|
||||||
|
nextStreamID: 1,
|
||||||
|
}
|
||||||
|
// Client initiated stream uses odd stream ID, server initiated stream uses even stream ID
|
||||||
|
if !useClientStreamNumbers {
|
||||||
|
m.nextStreamID = 2
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of active streams.
|
||||||
|
func (m *activeStreamMap) Len() int {
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
return len(m.streams)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *activeStreamMap) Get(streamID uint32) (*MuxedStream, bool) {
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
stream, ok := m.streams[streamID]
|
||||||
|
return stream, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set returns true if the stream was assigned successfully. If a stream
|
||||||
|
// already existed with that ID or we are shutting down, return false.
|
||||||
|
func (m *activeStreamMap) Set(newStream *MuxedStream) bool {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
if _, ok := m.streams[newStream.streamID]; ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if m.ignoreNewStreams {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m.streams[newStream.streamID] = newStream
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete stops tracking the stream. It should be called only after it is closed and resetted.
|
||||||
|
func (m *activeStreamMap) Delete(streamID uint32) {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
delete(m.streams, streamID)
|
||||||
|
if len(m.streams) == 0 && m.streamsEmpty != nil {
|
||||||
|
close(m.streamsEmpty)
|
||||||
|
m.streamsEmpty = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown blocks new streams from being created. It returns a channel that receives an event
|
||||||
|
// once the last stream has closed, or nil if a shutdown is in progress.
|
||||||
|
func (m *activeStreamMap) Shutdown() <-chan struct{} {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
if m.ignoreNewStreams {
|
||||||
|
// already shutting down
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
m.ignoreNewStreams = true
|
||||||
|
done := make(chan struct{})
|
||||||
|
if len(m.streams) == 0 {
|
||||||
|
// nothing to shut down
|
||||||
|
close(done)
|
||||||
|
return done
|
||||||
|
}
|
||||||
|
m.streamsEmpty = done
|
||||||
|
return done
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcquireLocalID acquires a new stream ID for a stream you're opening.
|
||||||
|
func (m *activeStreamMap) AcquireLocalID() uint32 {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
x := m.nextStreamID
|
||||||
|
m.nextStreamID += 2
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObservePeerID observes the ID of a stream opened by the peer. It returns true if we should accept
|
||||||
|
// the new stream, or false to reject it. The ErrCode gives the reason why.
|
||||||
|
func (m *activeStreamMap) AcquirePeerID(streamID uint32) (bool, http2.ErrCode) {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
switch {
|
||||||
|
case m.ignoreNewStreams:
|
||||||
|
return false, http2.ErrCodeStreamClosed
|
||||||
|
case streamID > m.maxPeerStreamID:
|
||||||
|
m.maxPeerStreamID = streamID
|
||||||
|
return true, http2.ErrCodeNo
|
||||||
|
default:
|
||||||
|
return false, http2.ErrCodeStreamClosed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPeerStreamID is true if the stream ID belongs to the peer.
|
||||||
|
func (m *activeStreamMap) IsPeerStreamID(streamID uint32) bool {
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
return (streamID % 2) != (m.nextStreamID % 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLocalStreamID is true if it is a stream we have opened, even if it is now closed.
|
||||||
|
func (m *activeStreamMap) IsLocalStreamID(streamID uint32) bool {
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
return (streamID%2) == (m.nextStreamID%2) && streamID < m.nextStreamID
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastPeerStreamID returns the most recently opened peer stream ID.
|
||||||
|
func (m *activeStreamMap) LastPeerStreamID() uint32 {
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
return m.maxPeerStreamID
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastLocalStreamID returns the most recently opened local stream ID.
|
||||||
|
func (m *activeStreamMap) LastLocalStreamID() uint32 {
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
if m.nextStreamID > 1 {
|
||||||
|
return m.nextStreamID - 2
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abort closes every active stream and prevents new ones being created. This should be used to
|
||||||
|
// return errors in pending read/writes when the underlying connection goes away.
|
||||||
|
func (m *activeStreamMap) Abort() {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
for _, stream := range m.streams {
|
||||||
|
stream.Close()
|
||||||
|
}
|
||||||
|
m.ignoreNewStreams = true
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// BooleanFuse is a data structure that can be set once to a particular value using Fuse(value).
|
||||||
|
// Subsequent calls to Fuse() will have no effect.
|
||||||
|
type BooleanFuse struct {
|
||||||
|
value int32
|
||||||
|
mu sync.Mutex
|
||||||
|
cond *sync.Cond
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBooleanFuse() *BooleanFuse {
|
||||||
|
f := &BooleanFuse{}
|
||||||
|
f.cond = sync.NewCond(&f.mu)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value gets the value
|
||||||
|
func (f *BooleanFuse) Value() bool {
|
||||||
|
// 0: unset
|
||||||
|
// 1: set true
|
||||||
|
// 2: set false
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
return f.value == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *BooleanFuse) Fuse(result bool) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
newValue := int32(2)
|
||||||
|
if result {
|
||||||
|
newValue = 1
|
||||||
|
}
|
||||||
|
if f.value == 0 {
|
||||||
|
f.value = newValue
|
||||||
|
f.cond.Broadcast()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Await blocks until Fuse has been called at least once.
|
||||||
|
func (f *BooleanFuse) Await() bool {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
for f.value == 0 {
|
||||||
|
f.cond.Wait()
|
||||||
|
}
|
||||||
|
return f.value == 1
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AtomicCounter struct {
|
||||||
|
count uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAtomicCounter(initCount uint64) *AtomicCounter {
|
||||||
|
return &AtomicCounter{count: initCount}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *AtomicCounter) IncrementBy(number uint64) {
|
||||||
|
atomic.AddUint64(&c.count, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the current value of counter and reset it to 0
|
||||||
|
func (c *AtomicCounter) Count() uint64 {
|
||||||
|
return atomic.SwapUint64(&c.count, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the current value of counter
|
||||||
|
func (c *AtomicCounter) Value() uint64 {
|
||||||
|
return atomic.LoadUint64(&c.count)
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCounter(t *testing.T) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(dataPoints)
|
||||||
|
c := AtomicCounter{}
|
||||||
|
for i := 0; i < dataPoints; i++ {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
c.IncrementBy(uint64(1))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
assert.Equal(t, uint64(dataPoints), c.Count())
|
||||||
|
assert.Equal(t, uint64(0), c.Count())
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrHandshakeTimeout = MuxerHandshakeError{"1000 handshake timeout"}
|
||||||
|
ErrBadHandshakeNotSettings = MuxerHandshakeError{"1001 unexpected response"}
|
||||||
|
ErrBadHandshakeUnexpectedAck = MuxerHandshakeError{"1002 unexpected response"}
|
||||||
|
ErrBadHandshakeNoMagic = MuxerHandshakeError{"1003 unexpected response"}
|
||||||
|
ErrBadHandshakeWrongMagic = MuxerHandshakeError{"1004 connected to endpoint of wrong type"}
|
||||||
|
ErrBadHandshakeNotSettingsAck = MuxerHandshakeError{"1005 unexpected response"}
|
||||||
|
ErrBadHandshakeUnexpectedSettings = MuxerHandshakeError{"1006 unexpected response"}
|
||||||
|
|
||||||
|
ErrUnexpectedFrameType = MuxerProtocolError{"2001 unexpected frame type", http2.ErrCodeProtocol}
|
||||||
|
ErrUnknownStream = MuxerProtocolError{"2002 unknown stream", http2.ErrCodeProtocol}
|
||||||
|
ErrInvalidStream = MuxerProtocolError{"2003 invalid stream", http2.ErrCodeProtocol}
|
||||||
|
|
||||||
|
ErrStreamHeadersSent = MuxerApplicationError{"3000 headers already sent"}
|
||||||
|
ErrConnectionClosed = MuxerApplicationError{"3001 connection closed"}
|
||||||
|
ErrConnectionDropped = MuxerApplicationError{"3002 connection dropped"}
|
||||||
|
|
||||||
|
ErrClosedStream = MuxerStreamError{"4000 stream closed", http2.ErrCodeStreamClosed}
|
||||||
|
)
|
||||||
|
|
||||||
|
type MuxerHandshakeError struct {
|
||||||
|
cause string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e MuxerHandshakeError) Error() string {
|
||||||
|
return fmt.Sprintf("Handshake error: %s", e.cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
type MuxerProtocolError struct {
|
||||||
|
cause string
|
||||||
|
h2code http2.ErrCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e MuxerProtocolError) Error() string {
|
||||||
|
return fmt.Sprintf("Protocol error: %s", e.cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
type MuxerApplicationError struct {
|
||||||
|
cause string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e MuxerApplicationError) Error() string {
|
||||||
|
return fmt.Sprintf("Application error: %s", e.cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
type MuxerStreamError struct {
|
||||||
|
cause string
|
||||||
|
h2code http2.ErrCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e MuxerStreamError) Error() string {
|
||||||
|
return fmt.Sprintf("Stream error: %s", e.cause)
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
// +build cgo
|
||||||
|
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"code.cfops.it/go/brotli"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CompressionIsSupported() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDecompressor(src io.Reader) *brotli.Reader {
|
||||||
|
return brotli.NewReader(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCompressor(dst io.Writer, quality, lgwin int) *brotli.Writer {
|
||||||
|
return brotli.NewWriter(dst, brotli.WriterOptions{Quality: quality, LGWin: lgwin})
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
// +build !cgo
|
||||||
|
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CompressionIsSupported() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDecompressor(src io.Reader) decompressor {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCompressor(dst io.Writer, quality, lgwin int) compressor {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,593 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
)
|
||||||
|
|
||||||
|
/* This is an implementation of https://github.com/vkrasnov/h2-compression-dictionaries
|
||||||
|
but modified for tunnels in a few key ways:
|
||||||
|
Since tunnels is a server-to-server service, some aspects of the spec would cause
|
||||||
|
unnessasary head-of-line blocking on the CPU and on the network, hence this implementation
|
||||||
|
allows for parallel compression on the "client", and buffering on the "server" to solve
|
||||||
|
this problem. */
|
||||||
|
|
||||||
|
// Assign temporary values
|
||||||
|
const SettingCompression http2.SettingID = 0xff20
|
||||||
|
|
||||||
|
const (
|
||||||
|
FrameSetCompressionContext http2.FrameType = 0xf0
|
||||||
|
FrameUseDictionary http2.FrameType = 0xf1
|
||||||
|
FrameSetDictionary http2.FrameType = 0xf2
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
FlagSetDictionaryAppend http2.Flags = 0x1
|
||||||
|
FlagSetDictionaryOffset http2.Flags = 0x2
|
||||||
|
)
|
||||||
|
|
||||||
|
const compressionVersion = uint8(1)
|
||||||
|
const compressionFormat = uint8(2)
|
||||||
|
|
||||||
|
type CompressionSetting uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
CompressionNone CompressionSetting = iota
|
||||||
|
CompressionLow
|
||||||
|
CompressionMedium
|
||||||
|
CompressionMax
|
||||||
|
)
|
||||||
|
|
||||||
|
type CompressionPreset struct {
|
||||||
|
nDicts, dictSize, quality uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
type compressor interface {
|
||||||
|
Write([]byte) (int, error)
|
||||||
|
Flush() error
|
||||||
|
SetDictionary([]byte)
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type decompressor interface {
|
||||||
|
Read([]byte) (int, error)
|
||||||
|
SetDictionary([]byte)
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
var compressionPresets = map[CompressionSetting]CompressionPreset{
|
||||||
|
CompressionNone: {0, 0, 0},
|
||||||
|
CompressionLow: {32, 17, 5},
|
||||||
|
CompressionMedium: {64, 18, 6},
|
||||||
|
CompressionMax: {255, 19, 9},
|
||||||
|
}
|
||||||
|
|
||||||
|
func compressionSettingVal(version, fmt, sz, nd uint8) uint32 {
|
||||||
|
// Currently the compression settings are inlcude:
|
||||||
|
// * version: only 1 is supported
|
||||||
|
// * fmt: only 2 for brotli is supported
|
||||||
|
// * sz: log2 of the maximal allowed dictionary size
|
||||||
|
// * nd: max allowed number of dictionaries
|
||||||
|
return uint32(version)<<24 + uint32(fmt)<<16 + uint32(sz)<<8 + uint32(nd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCompressionSettingVal(setting uint32) (version, fmt, sz, nd uint8) {
|
||||||
|
version = uint8(setting >> 24)
|
||||||
|
fmt = uint8(setting >> 16)
|
||||||
|
sz = uint8(setting >> 8)
|
||||||
|
nd = uint8(setting)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CompressionSetting) toH2Setting() uint32 {
|
||||||
|
p, ok := compressionPresets[c]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return compressionSettingVal(compressionVersion, compressionFormat, p.dictSize, p.nDicts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CompressionSetting) getPreset() CompressionPreset {
|
||||||
|
return compressionPresets[c]
|
||||||
|
}
|
||||||
|
|
||||||
|
type dictUpdate struct {
|
||||||
|
reader *h2DictionaryReader
|
||||||
|
dictionary *h2ReadDictionary
|
||||||
|
buff []byte
|
||||||
|
isReady bool
|
||||||
|
isUse bool
|
||||||
|
s setDictRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
type h2ReadDictionary struct {
|
||||||
|
dictionary []byte
|
||||||
|
queue []*dictUpdate
|
||||||
|
maxSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
type h2ReadDictionaries struct {
|
||||||
|
d []h2ReadDictionary
|
||||||
|
maxSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
type h2DictionaryReader struct {
|
||||||
|
*SharedBuffer // Propagate the decompressed output into the original buffer
|
||||||
|
decompBuffer *bytes.Buffer // Intermediate buffer for the brotli compressor
|
||||||
|
dictionary []byte // The content of the dictionary being used by this reader
|
||||||
|
internalBuffer []byte
|
||||||
|
s, e int // Start and end of the buffer
|
||||||
|
decomp decompressor // The brotli compressor
|
||||||
|
isClosed bool // Indicates that Close was called for this reader
|
||||||
|
queue []*dictUpdate // List of dictionaries to update, when the data is available
|
||||||
|
}
|
||||||
|
|
||||||
|
type h2WriteDictionary []byte
|
||||||
|
|
||||||
|
type setDictRequest struct {
|
||||||
|
streamID uint32
|
||||||
|
dictID uint8
|
||||||
|
dictSZ uint64
|
||||||
|
truncate, offset uint64
|
||||||
|
P, E, D bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type useDictRequest struct {
|
||||||
|
dictID uint8
|
||||||
|
streamID uint32
|
||||||
|
setDict []setDictRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
type h2WriteDictionaries struct {
|
||||||
|
dictLock sync.Mutex
|
||||||
|
dictChan chan useDictRequest
|
||||||
|
dictionaries []h2WriteDictionary
|
||||||
|
nextAvail int // next unused dictionary slot
|
||||||
|
maxAvail int // max ID, defined by SETTINGS
|
||||||
|
maxSize int // max size, defined by SETTINGS
|
||||||
|
typeToDict map[string]uint8 // map from content type to dictionary that encodes it
|
||||||
|
pathToDict map[string]uint8 // map from path to dictionary that encodes it
|
||||||
|
quality int
|
||||||
|
window int
|
||||||
|
compIn, compOut *AtomicCounter
|
||||||
|
}
|
||||||
|
|
||||||
|
type h2DictWriter struct {
|
||||||
|
*bytes.Buffer
|
||||||
|
comp compressor
|
||||||
|
dicts *h2WriteDictionaries
|
||||||
|
writerLock sync.Mutex
|
||||||
|
|
||||||
|
streamID uint32
|
||||||
|
path string
|
||||||
|
contentType string
|
||||||
|
}
|
||||||
|
|
||||||
|
type h2Dictionaries struct {
|
||||||
|
write *h2WriteDictionaries
|
||||||
|
read *h2ReadDictionaries
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *dictUpdate) update(buff []byte) {
|
||||||
|
o.buff = make([]byte, len(buff))
|
||||||
|
copy(o.buff, buff)
|
||||||
|
o.isReady = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *h2ReadDictionary) update() {
|
||||||
|
for len(d.queue) > 0 {
|
||||||
|
o := d.queue[0]
|
||||||
|
if !o.isReady {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if o.isUse {
|
||||||
|
reader := o.reader
|
||||||
|
reader.dictionary = make([]byte, len(d.dictionary))
|
||||||
|
copy(reader.dictionary, d.dictionary)
|
||||||
|
reader.decomp = newDecompressor(reader.decompBuffer)
|
||||||
|
if len(reader.dictionary) > 0 {
|
||||||
|
reader.decomp.SetDictionary(reader.dictionary)
|
||||||
|
}
|
||||||
|
reader.Write([]byte{})
|
||||||
|
} else {
|
||||||
|
d.dictionary = adjustDictionary(d.dictionary, o.buff, o.s, d.maxSize)
|
||||||
|
}
|
||||||
|
d.queue = d.queue[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newH2ReadDictionaries(nd, sz uint8) h2ReadDictionaries {
|
||||||
|
d := make([]h2ReadDictionary, int(nd))
|
||||||
|
for i := range d {
|
||||||
|
d[i].maxSize = 1 << uint(sz)
|
||||||
|
}
|
||||||
|
return h2ReadDictionaries{d: d, maxSize: 1 << uint(sz)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dicts *h2ReadDictionaries) getDictByID(dictID uint8) (*h2ReadDictionary, error) {
|
||||||
|
if int(dictID) > len(dicts.d) {
|
||||||
|
return nil, MuxerStreamError{"dictID too big", http2.ErrCodeProtocol}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &dicts.d[dictID], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dicts *h2ReadDictionaries) newReader(b *SharedBuffer, dictID uint8) *h2DictionaryReader {
|
||||||
|
if int(dictID) > len(dicts.d) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dictionary := &dicts.d[dictID]
|
||||||
|
reader := &h2DictionaryReader{SharedBuffer: b, decompBuffer: &bytes.Buffer{}, internalBuffer: make([]byte, dicts.maxSize)}
|
||||||
|
|
||||||
|
if len(dictionary.queue) == 0 {
|
||||||
|
reader.dictionary = make([]byte, len(dictionary.dictionary))
|
||||||
|
copy(reader.dictionary, dictionary.dictionary)
|
||||||
|
reader.decomp = newDecompressor(reader.decompBuffer)
|
||||||
|
if len(reader.dictionary) > 0 {
|
||||||
|
reader.decomp.SetDictionary(reader.dictionary)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dictionary.queue = append(dictionary.queue, &dictUpdate{isUse: true, isReady: true, reader: reader})
|
||||||
|
}
|
||||||
|
return reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *h2DictionaryReader) updateWaitingDictionaries() {
|
||||||
|
// Update all the waiting dictionaries
|
||||||
|
for _, o := range r.queue {
|
||||||
|
if o.isReady {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r.isClosed || uint64(r.e) >= o.s.dictSZ {
|
||||||
|
o.update(r.internalBuffer[:r.e])
|
||||||
|
if o == o.dictionary.queue[0] {
|
||||||
|
defer o.dictionary.update()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write actually happens when reading from network, this is therefore the stage where we decompress the buffer
|
||||||
|
func (r *h2DictionaryReader) Write(p []byte) (n int, err error) {
|
||||||
|
// Every write goes into brotli buffer first
|
||||||
|
n, err = r.decompBuffer.Write(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.decomp == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
m, err := r.decomp.Read(r.internalBuffer[r.e:])
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
r.SharedBuffer.Close()
|
||||||
|
r.decomp.Close()
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.SharedBuffer.Write(r.internalBuffer[r.e : r.e+m])
|
||||||
|
r.e += m
|
||||||
|
|
||||||
|
if m == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.e == len(r.internalBuffer) {
|
||||||
|
r.updateWaitingDictionaries()
|
||||||
|
r.e = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.updateWaitingDictionaries()
|
||||||
|
|
||||||
|
if r.isClosed {
|
||||||
|
r.SharedBuffer.Close()
|
||||||
|
r.decomp.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *h2DictionaryReader) Close() error {
|
||||||
|
if r.isClosed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r.isClosed = true
|
||||||
|
r.Write([]byte{})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var compressibleTypes = map[string]bool{
|
||||||
|
"application/atom+xml": true,
|
||||||
|
"application/javascript": true,
|
||||||
|
"application/json": true,
|
||||||
|
"application/ld+json": true,
|
||||||
|
"application/manifest+json": true,
|
||||||
|
"application/rss+xml": true,
|
||||||
|
"application/vnd.geo+json": true,
|
||||||
|
"application/vnd.ms-fontobject": true,
|
||||||
|
"application/x-font-ttf": true,
|
||||||
|
"application/x-yaml": true,
|
||||||
|
"application/x-web-app-manifest+json": true,
|
||||||
|
"application/xhtml+xml": true,
|
||||||
|
"application/xml": true,
|
||||||
|
"font/opentype": true,
|
||||||
|
"image/bmp": true,
|
||||||
|
"image/svg+xml": true,
|
||||||
|
"image/x-icon": true,
|
||||||
|
"text/cache-manifest": true,
|
||||||
|
"text/css": true,
|
||||||
|
"text/html": true,
|
||||||
|
"text/plain": true,
|
||||||
|
"text/vcard": true,
|
||||||
|
"text/vnd.rim.location.xloc": true,
|
||||||
|
"text/vtt": true,
|
||||||
|
"text/x-component": true,
|
||||||
|
"text/x-cross-domain-policy": true,
|
||||||
|
"text/x-yaml": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func getContentType(headers []Header) string {
|
||||||
|
for _, h := range headers {
|
||||||
|
if strings.ToLower(h.Name) == "content-type" {
|
||||||
|
val := strings.ToLower(h.Value)
|
||||||
|
sep := strings.IndexRune(val, ';')
|
||||||
|
if sep != -1 {
|
||||||
|
return val[:sep]
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func newH2WriteDictionaries(nd, sz, quality uint8, compIn, compOut *AtomicCounter) (*h2WriteDictionaries, chan useDictRequest) {
|
||||||
|
useDictChan := make(chan useDictRequest)
|
||||||
|
return &h2WriteDictionaries{
|
||||||
|
dictionaries: make([]h2WriteDictionary, nd),
|
||||||
|
nextAvail: 0,
|
||||||
|
maxAvail: int(nd),
|
||||||
|
maxSize: 1 << uint(sz),
|
||||||
|
dictChan: useDictChan,
|
||||||
|
typeToDict: make(map[string]uint8),
|
||||||
|
pathToDict: make(map[string]uint8),
|
||||||
|
quality: int(quality),
|
||||||
|
window: 1 << uint(sz+1),
|
||||||
|
compIn: compIn,
|
||||||
|
compOut: compOut,
|
||||||
|
}, useDictChan
|
||||||
|
}
|
||||||
|
|
||||||
|
func adjustDictionary(currentDictionary, newData []byte, set setDictRequest, maxSize int) []byte {
|
||||||
|
currentDictionary = append(currentDictionary, newData[:set.dictSZ]...)
|
||||||
|
|
||||||
|
if len(currentDictionary) > maxSize {
|
||||||
|
currentDictionary = currentDictionary[len(currentDictionary)-maxSize:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return currentDictionary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h2d *h2WriteDictionaries) getNextDictID() (dictID uint8, ok bool) {
|
||||||
|
if h2d.nextAvail < h2d.maxAvail {
|
||||||
|
dictID, ok = uint8(h2d.nextAvail), true
|
||||||
|
h2d.nextAvail++
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h2d *h2WriteDictionaries) getGenericDictID() (dictID uint8, ok bool) {
|
||||||
|
if h2d.maxAvail == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return uint8(h2d.maxAvail - 1), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h2d *h2WriteDictionaries) getDictWriter(s *MuxedStream, headers []Header) *h2DictWriter {
|
||||||
|
w := s.writeBuffer
|
||||||
|
|
||||||
|
if w == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.method != "GET" && s.method != "POST" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.contentType = getContentType(headers)
|
||||||
|
if _, ok := compressibleTypes[s.contentType]; !ok && !strings.HasPrefix(s.contentType, "text") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &h2DictWriter{
|
||||||
|
Buffer: w.(*bytes.Buffer),
|
||||||
|
path: s.path,
|
||||||
|
contentType: s.contentType,
|
||||||
|
streamID: s.streamID,
|
||||||
|
dicts: h2d,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assignDictToStream(s *MuxedStream, p []byte) bool {
|
||||||
|
|
||||||
|
// On first write to stream:
|
||||||
|
// * assign the right dictionary
|
||||||
|
// * update relevant dictionaries
|
||||||
|
// * send the required USE_DICT and SET_DICT frames
|
||||||
|
|
||||||
|
h2d := s.dictionaries.write
|
||||||
|
if h2d == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
w, ok := s.writeBuffer.(*h2DictWriter)
|
||||||
|
if !ok || w.comp != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
h2d.dictLock.Lock()
|
||||||
|
|
||||||
|
if w.comp != nil {
|
||||||
|
// Check again with lock, in therory the inteface allows for unordered writes
|
||||||
|
h2d.dictLock.Unlock()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The logic of dictionary generation is below
|
||||||
|
|
||||||
|
// Is there a dictionary for the exact path or content-type?
|
||||||
|
var useID uint8
|
||||||
|
pathID, pathFound := h2d.pathToDict[w.path]
|
||||||
|
typeID, typeFound := h2d.typeToDict[w.contentType]
|
||||||
|
|
||||||
|
if pathFound {
|
||||||
|
// Use dictionary for path as top priority
|
||||||
|
useID = pathID
|
||||||
|
if !typeFound { // Shouldn't really happen, unless type changes between requests
|
||||||
|
typeID, typeFound = h2d.getNextDictID()
|
||||||
|
if typeFound {
|
||||||
|
h2d.typeToDict[w.contentType] = typeID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if typeFound {
|
||||||
|
// Use dictionary for same content type as second priority
|
||||||
|
useID = typeID
|
||||||
|
pathID, pathFound = h2d.getNextDictID()
|
||||||
|
if pathFound { // If a slot is available, generate new dictionary for path
|
||||||
|
h2d.pathToDict[w.path] = pathID
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Use the overflow dictionary as last resort
|
||||||
|
// If slots are availabe generate new dictioanries for path and content-type
|
||||||
|
useID, _ = h2d.getGenericDictID()
|
||||||
|
pathID, pathFound = h2d.getNextDictID()
|
||||||
|
if pathFound {
|
||||||
|
h2d.pathToDict[w.path] = pathID
|
||||||
|
}
|
||||||
|
typeID, typeFound = h2d.getNextDictID()
|
||||||
|
if typeFound {
|
||||||
|
h2d.typeToDict[w.contentType] = typeID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
useLen := h2d.maxSize
|
||||||
|
if len(p) < useLen {
|
||||||
|
useLen = len(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update all the dictionaries using the new data
|
||||||
|
setDicts := make([]setDictRequest, 0, 3)
|
||||||
|
setDict := setDictRequest{
|
||||||
|
streamID: w.streamID,
|
||||||
|
dictID: useID,
|
||||||
|
dictSZ: uint64(useLen),
|
||||||
|
}
|
||||||
|
setDicts = append(setDicts, setDict)
|
||||||
|
if pathID != useID {
|
||||||
|
setDict.dictID = pathID
|
||||||
|
setDicts = append(setDicts, setDict)
|
||||||
|
}
|
||||||
|
if typeID != useID {
|
||||||
|
setDict.dictID = typeID
|
||||||
|
setDicts = append(setDicts, setDict)
|
||||||
|
}
|
||||||
|
|
||||||
|
h2d.dictChan <- useDictRequest{streamID: w.streamID, dictID: uint8(useID), setDict: setDicts}
|
||||||
|
|
||||||
|
dict := h2d.dictionaries[useID]
|
||||||
|
|
||||||
|
// Brolti requires the dictionary to be immutable
|
||||||
|
copyDict := make([]byte, len(dict))
|
||||||
|
copy(copyDict, dict)
|
||||||
|
|
||||||
|
for _, set := range setDicts {
|
||||||
|
h2d.dictionaries[set.dictID] = adjustDictionary(h2d.dictionaries[set.dictID], p, set, h2d.maxSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.comp = newCompressor(w.Buffer, h2d.quality, h2d.window)
|
||||||
|
|
||||||
|
s.writeLock.Lock()
|
||||||
|
h2d.dictLock.Unlock()
|
||||||
|
|
||||||
|
if len(copyDict) > 0 {
|
||||||
|
w.comp.SetDictionary(copyDict)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *h2DictWriter) Write(p []byte) (n int, err error) {
|
||||||
|
bufLen := w.Buffer.Len()
|
||||||
|
if w.comp != nil {
|
||||||
|
n, err = w.comp.Write(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.comp.Flush()
|
||||||
|
w.dicts.compIn.IncrementBy(uint64(n))
|
||||||
|
w.dicts.compOut.IncrementBy(uint64(w.Buffer.Len() - bufLen))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return w.Buffer.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *h2DictWriter) Close() error {
|
||||||
|
return w.comp.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// From http2/hpack
|
||||||
|
func http2ReadVarInt(n byte, p []byte) (remain []byte, v uint64, err error) {
|
||||||
|
if n < 1 || n > 8 {
|
||||||
|
panic("bad n")
|
||||||
|
}
|
||||||
|
if len(p) == 0 {
|
||||||
|
return nil, 0, MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol}
|
||||||
|
}
|
||||||
|
v = uint64(p[0])
|
||||||
|
if n < 8 {
|
||||||
|
v &= (1 << uint64(n)) - 1
|
||||||
|
}
|
||||||
|
if v < (1<<uint64(n))-1 {
|
||||||
|
return p[1:], v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
origP := p
|
||||||
|
p = p[1:]
|
||||||
|
var m uint64
|
||||||
|
for len(p) > 0 {
|
||||||
|
b := p[0]
|
||||||
|
p = p[1:]
|
||||||
|
v += uint64(b&127) << m
|
||||||
|
if b&128 == 0 {
|
||||||
|
return p, v, nil
|
||||||
|
}
|
||||||
|
m += 7
|
||||||
|
if m >= 63 {
|
||||||
|
return origP, 0, MuxerStreamError{"invalid integer", http2.ErrCodeProtocol}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, 0, MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol}
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||||
|
k := uint64((1 << n) - 1)
|
||||||
|
if i < k {
|
||||||
|
return append(dst, byte(i))
|
||||||
|
}
|
||||||
|
dst = append(dst, byte(k))
|
||||||
|
i -= k
|
||||||
|
for ; i >= 128; i >>= 7 {
|
||||||
|
dst = append(dst, byte(0x80|(i&0x7f)))
|
||||||
|
}
|
||||||
|
return append(dst, byte(i))
|
||||||
|
}
|
|
@ -0,0 +1,415 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"golang.org/x/net/http2/hpack"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultFrameSize uint32 = 1 << 14 // Minimum frame size in http2 spec
|
||||||
|
defaultWindowSize uint32 = 65535
|
||||||
|
maxWindowSize uint32 = (1 << 31) - 1 // 2^31-1 = 2147483647, max window size specified in http2 spec
|
||||||
|
defaultTimeout time.Duration = 5 * time.Second
|
||||||
|
defaultRetries uint64 = 5
|
||||||
|
|
||||||
|
SettingMuxerMagic http2.SettingID = 0x42db
|
||||||
|
MuxerMagicOrigin uint32 = 0xa2e43c8b
|
||||||
|
MuxerMagicEdge uint32 = 0x1088ebf9
|
||||||
|
)
|
||||||
|
|
||||||
|
type MuxedStreamHandler interface {
|
||||||
|
ServeStream(*MuxedStream) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type MuxedStreamFunc func(stream *MuxedStream) error
|
||||||
|
|
||||||
|
func (f MuxedStreamFunc) ServeStream(stream *MuxedStream) error {
|
||||||
|
return f(stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
type MuxerConfig struct {
|
||||||
|
Timeout time.Duration
|
||||||
|
Handler MuxedStreamHandler
|
||||||
|
IsClient bool
|
||||||
|
// Name is used to identify this muxer instance when logging.
|
||||||
|
Name string
|
||||||
|
// The minimum time this connection can be idle before sending a heartbeat.
|
||||||
|
HeartbeatInterval time.Duration
|
||||||
|
// The minimum number of heartbeats to send before terminating the connection.
|
||||||
|
MaxHeartbeats uint64
|
||||||
|
// Logger to use
|
||||||
|
Logger *log.Entry
|
||||||
|
CompressionQuality CompressionSetting
|
||||||
|
}
|
||||||
|
|
||||||
|
type Muxer struct {
|
||||||
|
// f is used to read and write HTTP2 frames on the wire.
|
||||||
|
f *http2.Framer
|
||||||
|
// config is the MuxerConfig given in Handshake.
|
||||||
|
config MuxerConfig
|
||||||
|
// w, r are references to the underlying connection used.
|
||||||
|
w io.WriteCloser
|
||||||
|
r io.ReadCloser
|
||||||
|
// muxReader is the read process.
|
||||||
|
muxReader *MuxReader
|
||||||
|
// muxWriter is the write process.
|
||||||
|
muxWriter *MuxWriter
|
||||||
|
// muxMetricsUpdater is the process to update metrics
|
||||||
|
muxMetricsUpdater *muxMetricsUpdater
|
||||||
|
// newStreamChan is used to create new streams on the writer thread.
|
||||||
|
// The writer will assign the next available stream ID.
|
||||||
|
newStreamChan chan MuxedStreamRequest
|
||||||
|
// abortChan is used to abort the writer event loop.
|
||||||
|
abortChan chan struct{}
|
||||||
|
// abortOnce is used to ensure abortChan is closed once only.
|
||||||
|
abortOnce sync.Once
|
||||||
|
// readyList is used to signal writable streams.
|
||||||
|
readyList *ReadyList
|
||||||
|
// streams tracks currently-open streams.
|
||||||
|
streams *activeStreamMap
|
||||||
|
// explicitShutdown records whether the Muxer is closing because Shutdown was called, or due to another
|
||||||
|
// error.
|
||||||
|
explicitShutdown *BooleanFuse
|
||||||
|
|
||||||
|
compressionQuality CompressionPreset
|
||||||
|
}
|
||||||
|
|
||||||
|
type Header struct {
|
||||||
|
Name, Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handshake establishes a muxed connection with the peer.
|
||||||
|
// After the handshake completes, it is possible to open and accept streams.
|
||||||
|
func Handshake(
|
||||||
|
w io.WriteCloser,
|
||||||
|
r io.ReadCloser,
|
||||||
|
config MuxerConfig,
|
||||||
|
) (*Muxer, error) {
|
||||||
|
// Set default config values
|
||||||
|
if config.Timeout == 0 {
|
||||||
|
config.Timeout = defaultTimeout
|
||||||
|
}
|
||||||
|
// Initialise connection state fields
|
||||||
|
m := &Muxer{
|
||||||
|
f: http2.NewFramer(w, r), // A framer that writes to w and reads from r
|
||||||
|
config: config,
|
||||||
|
w: w,
|
||||||
|
r: r,
|
||||||
|
newStreamChan: make(chan MuxedStreamRequest),
|
||||||
|
abortChan: make(chan struct{}),
|
||||||
|
readyList: NewReadyList(),
|
||||||
|
streams: newActiveStreamMap(config.IsClient),
|
||||||
|
}
|
||||||
|
|
||||||
|
m.f.ReadMetaHeaders = hpack.NewDecoder(4096, func(hpack.HeaderField) {})
|
||||||
|
// Initialise the settings to identify this connection and confirm the other end is sane.
|
||||||
|
handshakeSetting := http2.Setting{ID: SettingMuxerMagic, Val: MuxerMagicEdge}
|
||||||
|
compressionSetting := http2.Setting{ID: SettingCompression, Val: config.CompressionQuality.toH2Setting()}
|
||||||
|
if CompressionIsSupported() {
|
||||||
|
log.Debug("Compression is supported")
|
||||||
|
m.compressionQuality = config.CompressionQuality.getPreset()
|
||||||
|
} else {
|
||||||
|
log.Debug("Compression is not supported")
|
||||||
|
compressionSetting = http2.Setting{ID: SettingCompression, Val: 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedMagic := MuxerMagicOrigin
|
||||||
|
if config.IsClient {
|
||||||
|
handshakeSetting.Val = MuxerMagicOrigin
|
||||||
|
expectedMagic = MuxerMagicEdge
|
||||||
|
}
|
||||||
|
errChan := make(chan error, 2)
|
||||||
|
// Simultaneously send our settings and verify the peer's settings.
|
||||||
|
go func() { errChan <- m.f.WriteSettings(handshakeSetting, compressionSetting) }()
|
||||||
|
go func() { errChan <- m.readPeerSettings(expectedMagic) }()
|
||||||
|
err := joinErrorsWithTimeout(errChan, 2, config.Timeout, ErrHandshakeTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Confirm sanity by ACKing the frame and expecting an ACK for our frame.
|
||||||
|
// Not strictly necessary, but let's pretend to be H2-like.
|
||||||
|
go func() { errChan <- m.f.WriteSettingsAck() }()
|
||||||
|
go func() { errChan <- m.readPeerSettingsAck() }()
|
||||||
|
err = joinErrorsWithTimeout(errChan, 2, config.Timeout, ErrHandshakeTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// set up reader/writer pair ready for serve
|
||||||
|
streamErrors := NewStreamErrorMap()
|
||||||
|
goAwayChan := make(chan http2.ErrCode, 1)
|
||||||
|
updateRTTChan := make(chan *roundTripMeasurement, 1)
|
||||||
|
updateReceiveWindowChan := make(chan uint32, 1)
|
||||||
|
updateSendWindowChan := make(chan uint32, 1)
|
||||||
|
updateInBoundBytesChan := make(chan uint64)
|
||||||
|
updateOutBoundBytesChan := make(chan uint64)
|
||||||
|
inBoundCounter := NewAtomicCounter(0)
|
||||||
|
outBoundCounter := NewAtomicCounter(0)
|
||||||
|
pingTimestamp := NewPingTimestamp()
|
||||||
|
connActive := NewSignal()
|
||||||
|
idleDuration := config.HeartbeatInterval
|
||||||
|
// Sanity check to enusre idelDuration is sane
|
||||||
|
if idleDuration == 0 || idleDuration < defaultTimeout {
|
||||||
|
idleDuration = defaultTimeout
|
||||||
|
config.Logger.Warn("Minimum idle time has been adjusted to ", defaultTimeout)
|
||||||
|
}
|
||||||
|
maxRetries := config.MaxHeartbeats
|
||||||
|
if maxRetries == 0 {
|
||||||
|
maxRetries = defaultRetries
|
||||||
|
config.Logger.Warn("Minimum number of unacked heartbeats to send before closing the connection has been adjusted to ", maxRetries)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.explicitShutdown = NewBooleanFuse()
|
||||||
|
m.muxReader = &MuxReader{
|
||||||
|
f: m.f,
|
||||||
|
handler: m.config.Handler,
|
||||||
|
streams: m.streams,
|
||||||
|
readyList: m.readyList,
|
||||||
|
streamErrors: streamErrors,
|
||||||
|
goAwayChan: goAwayChan,
|
||||||
|
abortChan: m.abortChan,
|
||||||
|
pingTimestamp: pingTimestamp,
|
||||||
|
connActive: connActive,
|
||||||
|
initialStreamWindow: defaultWindowSize,
|
||||||
|
streamWindowMax: maxWindowSize,
|
||||||
|
r: m.r,
|
||||||
|
updateRTTChan: updateRTTChan,
|
||||||
|
updateReceiveWindowChan: updateReceiveWindowChan,
|
||||||
|
updateSendWindowChan: updateSendWindowChan,
|
||||||
|
bytesRead: inBoundCounter,
|
||||||
|
updateInBoundBytesChan: updateInBoundBytesChan,
|
||||||
|
}
|
||||||
|
m.muxWriter = &MuxWriter{
|
||||||
|
f: m.f,
|
||||||
|
streams: m.streams,
|
||||||
|
streamErrors: streamErrors,
|
||||||
|
readyStreamChan: m.readyList.ReadyChannel(),
|
||||||
|
newStreamChan: m.newStreamChan,
|
||||||
|
goAwayChan: goAwayChan,
|
||||||
|
abortChan: m.abortChan,
|
||||||
|
pingTimestamp: pingTimestamp,
|
||||||
|
idleTimer: NewIdleTimer(idleDuration, maxRetries),
|
||||||
|
connActiveChan: connActive.WaitChannel(),
|
||||||
|
maxFrameSize: defaultFrameSize,
|
||||||
|
updateReceiveWindowChan: updateReceiveWindowChan,
|
||||||
|
updateSendWindowChan: updateSendWindowChan,
|
||||||
|
bytesWrote: outBoundCounter,
|
||||||
|
updateOutBoundBytesChan: updateOutBoundBytesChan,
|
||||||
|
}
|
||||||
|
m.muxWriter.headerEncoder = hpack.NewEncoder(&m.muxWriter.headerBuffer)
|
||||||
|
|
||||||
|
compBytesBefore, compBytesAfter := NewAtomicCounter(0), NewAtomicCounter(0)
|
||||||
|
|
||||||
|
m.muxMetricsUpdater = newMuxMetricsUpdater(
|
||||||
|
updateRTTChan,
|
||||||
|
updateReceiveWindowChan,
|
||||||
|
updateSendWindowChan,
|
||||||
|
updateInBoundBytesChan,
|
||||||
|
updateOutBoundBytesChan,
|
||||||
|
m.abortChan,
|
||||||
|
compBytesBefore,
|
||||||
|
compBytesAfter,
|
||||||
|
)
|
||||||
|
|
||||||
|
if m.compressionQuality.dictSize > 0 && m.compressionQuality.nDicts > 0 {
|
||||||
|
nd, sz := m.compressionQuality.nDicts, m.compressionQuality.dictSize
|
||||||
|
writeDicts, dictChan := newH2WriteDictionaries(
|
||||||
|
nd,
|
||||||
|
sz,
|
||||||
|
m.compressionQuality.quality,
|
||||||
|
compBytesBefore,
|
||||||
|
compBytesAfter,
|
||||||
|
)
|
||||||
|
readDicts := newH2ReadDictionaries(nd, sz)
|
||||||
|
m.muxReader.dictionaries = h2Dictionaries{read: &readDicts, write: writeDicts}
|
||||||
|
m.muxWriter.useDictChan = dictChan
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Muxer) readPeerSettings(magic uint32) error {
|
||||||
|
frame, err := m.f.ReadFrame()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
settingsFrame, ok := frame.(*http2.SettingsFrame)
|
||||||
|
if !ok {
|
||||||
|
return ErrBadHandshakeNotSettings
|
||||||
|
}
|
||||||
|
if settingsFrame.Header().Flags != 0 {
|
||||||
|
return ErrBadHandshakeUnexpectedAck
|
||||||
|
}
|
||||||
|
peerMagic, ok := settingsFrame.Value(SettingMuxerMagic)
|
||||||
|
if !ok {
|
||||||
|
return ErrBadHandshakeNoMagic
|
||||||
|
}
|
||||||
|
if magic != peerMagic {
|
||||||
|
return ErrBadHandshakeWrongMagic
|
||||||
|
}
|
||||||
|
peerCompression, ok := settingsFrame.Value(SettingCompression)
|
||||||
|
if !ok {
|
||||||
|
m.compressionQuality = compressionPresets[CompressionNone]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ver, fmt, sz, nd := parseCompressionSettingVal(peerCompression)
|
||||||
|
if ver != compressionVersion || fmt != compressionFormat || sz == 0 || nd == 0 {
|
||||||
|
m.compressionQuality = compressionPresets[CompressionNone]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Values used for compression are the mimimum between the two peers
|
||||||
|
if sz < m.compressionQuality.dictSize {
|
||||||
|
m.compressionQuality.dictSize = sz
|
||||||
|
}
|
||||||
|
if nd < m.compressionQuality.nDicts {
|
||||||
|
m.compressionQuality.nDicts = nd
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Muxer) readPeerSettingsAck() error {
|
||||||
|
frame, err := m.f.ReadFrame()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
settingsFrame, ok := frame.(*http2.SettingsFrame)
|
||||||
|
if !ok {
|
||||||
|
return ErrBadHandshakeNotSettingsAck
|
||||||
|
}
|
||||||
|
if settingsFrame.Header().Flags != http2.FlagSettingsAck {
|
||||||
|
return ErrBadHandshakeUnexpectedSettings
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func joinErrorsWithTimeout(errChan <-chan error, receiveCount int, timeout time.Duration, timeoutError error) error {
|
||||||
|
for i := 0; i < receiveCount; i++ {
|
||||||
|
select {
|
||||||
|
case err := <-errChan:
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case <-time.After(timeout):
|
||||||
|
return timeoutError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Muxer) Serve(ctx context.Context) error {
|
||||||
|
errGroup, _ := errgroup.WithContext(ctx)
|
||||||
|
errGroup.Go(func() error {
|
||||||
|
err := m.muxReader.run(m.config.Logger)
|
||||||
|
m.explicitShutdown.Fuse(false)
|
||||||
|
m.r.Close()
|
||||||
|
m.abort()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
errGroup.Go(func() error {
|
||||||
|
err := m.muxWriter.run(m.config.Logger)
|
||||||
|
m.explicitShutdown.Fuse(false)
|
||||||
|
m.w.Close()
|
||||||
|
m.abort()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
errGroup.Go(func() error {
|
||||||
|
err := m.muxMetricsUpdater.run(m.config.Logger)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
err := errGroup.Wait()
|
||||||
|
if isUnexpectedTunnelError(err, m.explicitShutdown.Value()) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Muxer) Shutdown() {
|
||||||
|
m.explicitShutdown.Fuse(true)
|
||||||
|
m.muxReader.Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUnexpectedTunnelError identifies errors that are expected when shutting down the h2mux tunnel.
|
||||||
|
// The set of expected errors change depending on whether we initiated shutdown or not.
|
||||||
|
func isUnexpectedTunnelError(err error, expectedShutdown bool) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !expectedShutdown {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return !isConnectionClosedError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isConnectionClosedError(err error) bool {
|
||||||
|
if err == io.EOF {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err == io.ErrClosedPipe {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err.Error() == "tls: use of closed connection" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenStream opens a new data stream with the given headers.
|
||||||
|
// Called by proxy server and tunnel
|
||||||
|
func (m *Muxer) OpenStream(headers []Header, body io.Reader) (*MuxedStream, error) {
|
||||||
|
stream := &MuxedStream{
|
||||||
|
responseHeadersReceived: make(chan struct{}),
|
||||||
|
readBuffer: NewSharedBuffer(),
|
||||||
|
writeBuffer: &bytes.Buffer{},
|
||||||
|
receiveWindow: defaultWindowSize,
|
||||||
|
receiveWindowCurrentMax: defaultWindowSize, // Initial window size limit. exponentially increase it when receiveWindow is exhausted
|
||||||
|
receiveWindowMax: maxWindowSize,
|
||||||
|
sendWindow: defaultWindowSize,
|
||||||
|
readyList: m.readyList,
|
||||||
|
writeHeaders: headers,
|
||||||
|
dictionaries: m.muxReader.dictionaries,
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
// Will be received by mux writer
|
||||||
|
case m.newStreamChan <- MuxedStreamRequest{stream: stream, body: body}:
|
||||||
|
case <-m.abortChan:
|
||||||
|
return nil, ErrConnectionClosed
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-stream.responseHeadersReceived:
|
||||||
|
return stream, nil
|
||||||
|
case <-m.abortChan:
|
||||||
|
return nil, ErrConnectionClosed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Muxer) Metrics() *MuxerMetrics {
|
||||||
|
return m.muxMetricsUpdater.Metrics()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Muxer) abort() {
|
||||||
|
m.abortOnce.Do(func() {
|
||||||
|
close(m.abortChan)
|
||||||
|
m.streams.Abort()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return how many retries/ticks since the connection was last marked active
|
||||||
|
func (m *Muxer) TimerRetries() uint64 {
|
||||||
|
return m.muxWriter.idleTimer.RetryCount()
|
||||||
|
}
|
|
@ -0,0 +1,960 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
if os.Getenv("VERBOSE") == "1" {
|
||||||
|
log.SetLevel(log.DebugLevel)
|
||||||
|
}
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
type DefaultMuxerPair struct {
|
||||||
|
OriginMuxConfig MuxerConfig
|
||||||
|
OriginMux *Muxer
|
||||||
|
OriginConn net.Conn
|
||||||
|
EdgeMuxConfig MuxerConfig
|
||||||
|
EdgeMux *Muxer
|
||||||
|
EdgeConn net.Conn
|
||||||
|
doneC chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultMuxerPair() *DefaultMuxerPair {
|
||||||
|
origin, edge := net.Pipe()
|
||||||
|
return &DefaultMuxerPair{
|
||||||
|
OriginMuxConfig: MuxerConfig{
|
||||||
|
Timeout: time.Second,
|
||||||
|
IsClient: true,
|
||||||
|
Name: "origin",
|
||||||
|
Logger: log.NewEntry(log.New()),
|
||||||
|
},
|
||||||
|
OriginConn: origin,
|
||||||
|
EdgeMuxConfig: MuxerConfig{
|
||||||
|
Timeout: time.Second,
|
||||||
|
IsClient: false,
|
||||||
|
Name: "edge",
|
||||||
|
Logger: log.NewEntry(log.New()),
|
||||||
|
},
|
||||||
|
EdgeConn: edge,
|
||||||
|
doneC: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCompressedMuxerPair(quality CompressionSetting) *DefaultMuxerPair {
|
||||||
|
origin, edge := net.Pipe()
|
||||||
|
return &DefaultMuxerPair{
|
||||||
|
OriginMuxConfig: MuxerConfig{
|
||||||
|
Timeout: time.Second,
|
||||||
|
IsClient: true,
|
||||||
|
Name: "origin",
|
||||||
|
CompressionQuality: quality,
|
||||||
|
Logger: log.NewEntry(log.New()),
|
||||||
|
},
|
||||||
|
OriginConn: origin,
|
||||||
|
EdgeMuxConfig: MuxerConfig{
|
||||||
|
Timeout: time.Second,
|
||||||
|
IsClient: false,
|
||||||
|
Name: "edge",
|
||||||
|
CompressionQuality: quality,
|
||||||
|
Logger: log.NewEntry(log.New()),
|
||||||
|
},
|
||||||
|
EdgeConn: edge,
|
||||||
|
doneC: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *DefaultMuxerPair) Handshake(t *testing.T) {
|
||||||
|
edgeErrC := make(chan error)
|
||||||
|
originErrC := make(chan error)
|
||||||
|
go func() {
|
||||||
|
var err error
|
||||||
|
p.EdgeMux, err = Handshake(p.EdgeConn, p.EdgeConn, p.EdgeMuxConfig)
|
||||||
|
edgeErrC <- err
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
var err error
|
||||||
|
p.OriginMux, err = Handshake(p.OriginConn, p.OriginConn, p.OriginMuxConfig)
|
||||||
|
originErrC <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-edgeErrC:
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("edge handshake failure: %s", err)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second * 5):
|
||||||
|
t.Fatalf("edge handshake timeout")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-originErrC:
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("origin handshake failure: %s", err)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second * 5):
|
||||||
|
t.Fatalf("origin handshake timeout")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *DefaultMuxerPair) HandshakeAndServe(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
p.Handshake(t)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
err := p.EdgeMux.Serve(ctx)
|
||||||
|
if err != nil && err != io.EOF && err != io.ErrClosedPipe {
|
||||||
|
t.Errorf("error in edge muxer Serve(): %s", err)
|
||||||
|
}
|
||||||
|
p.OriginMux.Shutdown()
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
err := p.OriginMux.Serve(ctx)
|
||||||
|
if err != nil && err != io.EOF && err != io.ErrClosedPipe {
|
||||||
|
t.Errorf("error in origin muxer Serve(): %s", err)
|
||||||
|
}
|
||||||
|
p.EdgeMux.Shutdown()
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
// notify when both muxes have stopped serving
|
||||||
|
wg.Wait()
|
||||||
|
close(p.doneC)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *DefaultMuxerPair) Wait(t *testing.T) {
|
||||||
|
select {
|
||||||
|
case <-p.doneC:
|
||||||
|
return
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatal("timeout waiting for shutdown")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandshake(t *testing.T) {
|
||||||
|
muxPair := NewDefaultMuxerPair()
|
||||||
|
muxPair.Handshake(t)
|
||||||
|
AssertIfPipeReadable(t, muxPair.OriginConn)
|
||||||
|
AssertIfPipeReadable(t, muxPair.EdgeConn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSingleStream(t *testing.T) {
|
||||||
|
closeC := make(chan struct{})
|
||||||
|
muxPair := NewDefaultMuxerPair()
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
|
defer close(closeC)
|
||||||
|
if len(stream.Headers) != 1 {
|
||||||
|
t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers))
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Name != "test-header" {
|
||||||
|
t.Fatalf("expected header name %s, got %s", "test-header", stream.Headers[0].Name)
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Value != "headerValue" {
|
||||||
|
t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
||||||
|
}
|
||||||
|
stream.WriteHeaders([]Header{
|
||||||
|
{Name: "response-header", Value: "responseValue"},
|
||||||
|
})
|
||||||
|
buf := []byte("Hello world")
|
||||||
|
stream.Write(buf)
|
||||||
|
// after this receive, the edge closed the stream
|
||||||
|
<-closeC
|
||||||
|
n, err := io.ReadFull(stream, buf)
|
||||||
|
if n > 0 {
|
||||||
|
t.Fatalf("read %d bytes after EOF", n)
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("expected EOF, got %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{{Name: "test-header", Value: "headerValue"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
|
}
|
||||||
|
if len(stream.Headers) != 1 {
|
||||||
|
t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers))
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Name != "response-header" {
|
||||||
|
t.Fatalf("expected header name %s, got %s", "response-header", stream.Headers[0].Name)
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Value != "responseValue" {
|
||||||
|
t.Fatalf("expected header value %s, got %s", "responseValue", stream.Headers[0].Value)
|
||||||
|
}
|
||||||
|
responseBody := make([]byte, 11)
|
||||||
|
n, err := io.ReadFull(stream, responseBody)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error from (*MuxedStream).Read: %s", err)
|
||||||
|
}
|
||||||
|
if n != len(responseBody) {
|
||||||
|
t.Fatalf("expected response body to have %d bytes, got %d", len(responseBody), n)
|
||||||
|
}
|
||||||
|
if string(responseBody) != "Hello world" {
|
||||||
|
t.Fatalf("expected response body %s, got %s", "Hello world", responseBody)
|
||||||
|
}
|
||||||
|
stream.Close()
|
||||||
|
closeC <- struct{}{}
|
||||||
|
n, err = stream.Write([]byte("aaaaa"))
|
||||||
|
if n > 0 {
|
||||||
|
t.Fatalf("wrote %d bytes after EOF", n)
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("expected EOF, got %s", err)
|
||||||
|
}
|
||||||
|
<-closeC
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSingleStreamLargeResponseBody(t *testing.T) {
|
||||||
|
muxPair := NewDefaultMuxerPair()
|
||||||
|
bodySize := 1 << 24
|
||||||
|
streamReady := make(chan struct{})
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
|
if len(stream.Headers) != 1 {
|
||||||
|
t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers))
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Name != "test-header" {
|
||||||
|
t.Fatalf("expected header name %s, got %s", "test-header", stream.Headers[0].Name)
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Value != "headerValue" {
|
||||||
|
t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
||||||
|
}
|
||||||
|
stream.WriteHeaders([]Header{
|
||||||
|
{Name: "response-header", Value: "responseValue"},
|
||||||
|
})
|
||||||
|
payload := make([]byte, bodySize)
|
||||||
|
for i := range payload {
|
||||||
|
payload[i] = byte(i % 256)
|
||||||
|
}
|
||||||
|
t.Log("Writing payload...")
|
||||||
|
n, err := stream.Write(payload)
|
||||||
|
t.Logf("Wrote %d bytes into the stream", n)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("origin write error: %s", err)
|
||||||
|
}
|
||||||
|
if n != len(payload) {
|
||||||
|
t.Fatalf("origin short write: %d/%d bytes", n, len(payload))
|
||||||
|
}
|
||||||
|
t.Log("Payload written; signaling that the stream is ready")
|
||||||
|
streamReady <- struct{}{}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{{Name: "test-header", Value: "headerValue"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
|
}
|
||||||
|
if len(stream.Headers) != 1 {
|
||||||
|
t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers))
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Name != "response-header" {
|
||||||
|
t.Fatalf("expected header name %s, got %s", "response-header", stream.Headers[0].Name)
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Value != "responseValue" {
|
||||||
|
t.Fatalf("expected header value %s, got %s", "responseValue", stream.Headers[0].Value)
|
||||||
|
}
|
||||||
|
responseBody := make([]byte, bodySize)
|
||||||
|
|
||||||
|
<-streamReady
|
||||||
|
t.Log("Received stream ready signal; resuming the test")
|
||||||
|
|
||||||
|
n, err := io.ReadFull(stream, responseBody)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error from (*MuxedStream).Read: %s", err)
|
||||||
|
}
|
||||||
|
if n != len(responseBody) {
|
||||||
|
t.Fatalf("expected response body to have %d bytes, got %d", len(responseBody), n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultipleStreams(t *testing.T) {
|
||||||
|
muxPair := NewDefaultMuxerPair()
|
||||||
|
maxStreams := 64
|
||||||
|
errorsC := make(chan error, maxStreams)
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
|
if len(stream.Headers) != 1 {
|
||||||
|
t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers))
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Name != "client-token" {
|
||||||
|
t.Fatalf("expected header name %s, got %s", "client-token", stream.Headers[0].Name)
|
||||||
|
}
|
||||||
|
log.Debugf("Got request for stream %s", stream.Headers[0].Value)
|
||||||
|
stream.WriteHeaders([]Header{
|
||||||
|
{Name: "response-token", Value: stream.Headers[0].Value},
|
||||||
|
})
|
||||||
|
log.Debugf("Wrote headers for stream %s", stream.Headers[0].Value)
|
||||||
|
stream.Write([]byte("OK"))
|
||||||
|
log.Debugf("Wrote body for stream %s", stream.Headers[0].Value)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(maxStreams)
|
||||||
|
for i := 0; i < maxStreams; i++ {
|
||||||
|
go func(tokenId int) {
|
||||||
|
defer wg.Done()
|
||||||
|
tokenString := fmt.Sprintf("%d", tokenId)
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{{Name: "client-token", Value: tokenString}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
log.Debugf("Got headers for stream %d", tokenId)
|
||||||
|
if err != nil {
|
||||||
|
errorsC <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(stream.Headers) != 1 {
|
||||||
|
errorsC <- fmt.Errorf("stream %d has error: expected %d headers, got %d", stream.streamID, 1, len(stream.Headers))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Name != "response-token" {
|
||||||
|
errorsC <- fmt.Errorf("stream %d has error: expected header name %s, got %s", stream.streamID, "response-token", stream.Headers[0].Name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Value != tokenString {
|
||||||
|
errorsC <- fmt.Errorf("stream %d has error: expected header value %s, got %s", stream.streamID, tokenString, stream.Headers[0].Value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
responseBody := make([]byte, 2)
|
||||||
|
n, err := io.ReadFull(stream, responseBody)
|
||||||
|
if err != nil {
|
||||||
|
errorsC <- fmt.Errorf("stream %d has error: error from (*MuxedStream).Read: %s", stream.streamID, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n != len(responseBody) {
|
||||||
|
errorsC <- fmt.Errorf("stream %d has error: expected response body to have %d bytes, got %d", stream.streamID, len(responseBody), n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if string(responseBody) != "OK" {
|
||||||
|
errorsC <- fmt.Errorf("stream %d has error: expected response body %s, got %s", stream.streamID, "OK", responseBody)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
close(errorsC)
|
||||||
|
testFail := false
|
||||||
|
for err := range errorsC {
|
||||||
|
testFail = true
|
||||||
|
log.Error(err)
|
||||||
|
}
|
||||||
|
if testFail {
|
||||||
|
t.Fatalf("TestMultipleStreamsFlowControl failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultipleStreamsFlowControl(t *testing.T) {
|
||||||
|
maxStreams := 32
|
||||||
|
errorsC := make(chan error, maxStreams)
|
||||||
|
streamReady := make(chan struct{})
|
||||||
|
responseSizes := make([]int32, maxStreams)
|
||||||
|
for i := 0; i < maxStreams; i++ {
|
||||||
|
responseSizes[i] = rand.Int31n(int32(defaultWindowSize << 4))
|
||||||
|
}
|
||||||
|
muxPair := NewDefaultMuxerPair()
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
|
if len(stream.Headers) != 1 {
|
||||||
|
t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers))
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Name != "test-header" {
|
||||||
|
t.Fatalf("expected header name %s, got %s", "test-header", stream.Headers[0].Name)
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Value != "headerValue" {
|
||||||
|
t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
||||||
|
}
|
||||||
|
stream.WriteHeaders([]Header{
|
||||||
|
{Name: "response-header", Value: "responseValue"},
|
||||||
|
})
|
||||||
|
payload := make([]byte, responseSizes[(stream.streamID-2)/2])
|
||||||
|
for i := range payload {
|
||||||
|
payload[i] = byte(i % 256)
|
||||||
|
}
|
||||||
|
n, err := stream.Write(payload)
|
||||||
|
streamReady <- struct{}{}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("origin write error: %s", err)
|
||||||
|
}
|
||||||
|
if n != len(payload) {
|
||||||
|
t.Fatalf("origin short write: %d/%d bytes", n, len(payload))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(maxStreams)
|
||||||
|
for i := 0; i < maxStreams; i++ {
|
||||||
|
go func(tokenId int) {
|
||||||
|
defer wg.Done()
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{{Name: "test-header", Value: "headerValue"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
errorsC <- fmt.Errorf("stream %d error in OpenStream: %s", stream.streamID, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(stream.Headers) != 1 {
|
||||||
|
errorsC <- fmt.Errorf("stream %d expected %d headers, got %d", stream.streamID, 1, len(stream.Headers))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Name != "response-header" {
|
||||||
|
errorsC <- fmt.Errorf("stream %d expected header name %s, got %s", stream.streamID, "response-header", stream.Headers[0].Name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Value != "responseValue" {
|
||||||
|
errorsC <- fmt.Errorf("stream %d expected header value %s, got %s", stream.streamID, "responseValue", stream.Headers[0].Value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
<-streamReady
|
||||||
|
responseBody := make([]byte, responseSizes[(stream.streamID-2)/2])
|
||||||
|
n, err := io.ReadFull(stream, responseBody)
|
||||||
|
if err != nil {
|
||||||
|
errorsC <- fmt.Errorf("stream %d error from (*MuxedStream).Read: %s", stream.streamID, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n != len(responseBody) {
|
||||||
|
errorsC <- fmt.Errorf("stream %d expected response body to have %d bytes, got %d", stream.streamID, len(responseBody), n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
close(errorsC)
|
||||||
|
testFail := false
|
||||||
|
for err := range errorsC {
|
||||||
|
testFail = true
|
||||||
|
log.Error(err)
|
||||||
|
}
|
||||||
|
if testFail {
|
||||||
|
t.Fatalf("TestMultipleStreamsFlowControl failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGracefulShutdown(t *testing.T) {
|
||||||
|
sendC := make(chan struct{})
|
||||||
|
responseBuf := bytes.Repeat([]byte("Hello world"), 65536)
|
||||||
|
muxPair := NewDefaultMuxerPair()
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
|
stream.WriteHeaders([]Header{
|
||||||
|
{Name: "response-header", Value: "responseValue"},
|
||||||
|
})
|
||||||
|
<-sendC
|
||||||
|
log.Debugf("Writing %d bytes", len(responseBuf))
|
||||||
|
stream.Write(responseBuf)
|
||||||
|
stream.CloseWrite()
|
||||||
|
log.Debugf("Wrote %d bytes", len(responseBuf))
|
||||||
|
// Reading from the stream will block until the edge closes its end of the stream.
|
||||||
|
// Otherwise, we'll close the whole connection before receiving the 'stream closed'
|
||||||
|
// message from the edge.
|
||||||
|
// Graceful shutdown works if you omit this, it just gives spurious errors for now -
|
||||||
|
// TODO ignore errors when writing 'stream closed' and we're shutting down.
|
||||||
|
stream.Read([]byte{0})
|
||||||
|
log.Debugf("Handler ends")
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{{Name: "test-header", Value: "headerValue"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
// Start graceful shutdown of the edge mux - this should also close the origin mux when done
|
||||||
|
muxPair.EdgeMux.Shutdown()
|
||||||
|
close(sendC)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
|
}
|
||||||
|
responseBody := make([]byte, len(responseBuf))
|
||||||
|
log.Debugf("Waiting for %d bytes", len(responseBuf))
|
||||||
|
n, err := io.ReadFull(stream, responseBody)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error from (*MuxedStream).Read with %d bytes read: %s", n, err)
|
||||||
|
}
|
||||||
|
if n != len(responseBody) {
|
||||||
|
t.Fatalf("expected response body to have %d bytes, got %d", len(responseBody), n)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(responseBuf, responseBody) {
|
||||||
|
t.Fatalf("response body mismatch")
|
||||||
|
}
|
||||||
|
stream.Close()
|
||||||
|
muxPair.Wait(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnexpectedShutdown(t *testing.T) {
|
||||||
|
sendC := make(chan struct{})
|
||||||
|
handlerFinishC := make(chan struct{})
|
||||||
|
responseBuf := bytes.Repeat([]byte("Hello world"), 65536)
|
||||||
|
muxPair := NewDefaultMuxerPair()
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
|
defer close(handlerFinishC)
|
||||||
|
stream.WriteHeaders([]Header{
|
||||||
|
{Name: "response-header", Value: "responseValue"},
|
||||||
|
})
|
||||||
|
<-sendC
|
||||||
|
n, err := stream.Read([]byte{0})
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("unexpected error from (*MuxedStream).Read: %s", err)
|
||||||
|
}
|
||||||
|
if n != 0 {
|
||||||
|
t.Fatalf("expected empty read, got %d bytes", n)
|
||||||
|
}
|
||||||
|
// Write comes after read, because write buffers data before it is flushed. It wouldn't know about EOF
|
||||||
|
// until some time later. Calling read first forces it to know about EOF now.
|
||||||
|
_, err = stream.Write(responseBuf)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("unexpected error from (*MuxedStream).Write: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{{Name: "test-header", Value: "headerValue"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
// Close the underlying connection before telling the origin to write.
|
||||||
|
muxPair.EdgeConn.Close()
|
||||||
|
close(sendC)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
|
}
|
||||||
|
responseBody := make([]byte, len(responseBuf))
|
||||||
|
n, err := io.ReadFull(stream, responseBody)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("unexpected error from (*MuxedStream).Read: %s", err)
|
||||||
|
}
|
||||||
|
if n != 0 {
|
||||||
|
t.Fatalf("expected response body to have %d bytes, got %d", 0, n)
|
||||||
|
}
|
||||||
|
// The write ordering requirement explained in the origin handler applies here too.
|
||||||
|
_, err = stream.Write(responseBuf)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("unexpected error from (*MuxedStream).Write: %s", err)
|
||||||
|
}
|
||||||
|
<-handlerFinishC
|
||||||
|
}
|
||||||
|
|
||||||
|
func EchoHandler(stream *MuxedStream) error {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fmt.Fprintf(&buf, "Hello, world!\n\n# REQUEST HEADERS:\n\n")
|
||||||
|
for _, header := range stream.Headers {
|
||||||
|
fmt.Fprintf(&buf, "[%s] = %s\n", header.Name, header.Value)
|
||||||
|
}
|
||||||
|
stream.WriteHeaders([]Header{
|
||||||
|
{Name: ":status", Value: "200"},
|
||||||
|
{Name: "server", Value: "Echo-server/1.0"},
|
||||||
|
{Name: "date", Value: time.Now().Format(time.RFC850)},
|
||||||
|
{Name: "content-type", Value: "text/html; charset=utf-8"},
|
||||||
|
{Name: "content-length", Value: strconv.Itoa(buf.Len())},
|
||||||
|
})
|
||||||
|
buf.WriteTo(stream)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOpenAfterDisconnect(t *testing.T) {
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
muxPair := NewDefaultMuxerPair()
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(EchoHandler)
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
// Close both directions of the connection to cause EOF on both peers.
|
||||||
|
muxPair.OriginConn.Close()
|
||||||
|
muxPair.EdgeConn.Close()
|
||||||
|
case 1:
|
||||||
|
// Close origin conn to cause EOF on origin first.
|
||||||
|
muxPair.OriginConn.Close()
|
||||||
|
case 2:
|
||||||
|
// Close edge conn to cause EOF on edge first.
|
||||||
|
muxPair.EdgeConn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{{Name: "test-header", Value: "headerValue"}},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if err != ErrConnectionClosed {
|
||||||
|
t.Fatalf("unexpected error in OpenStream: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHPACK(t *testing.T) {
|
||||||
|
muxPair := NewDefaultMuxerPair()
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(EchoHandler)
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{
|
||||||
|
{Name: ":method", Value: "RPC"},
|
||||||
|
{Name: ":scheme", Value: "capnp"},
|
||||||
|
{Name: ":path", Value: "*"},
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
|
}
|
||||||
|
stream.Close()
|
||||||
|
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{
|
||||||
|
{Name: ":method", Value: "GET"},
|
||||||
|
{Name: ":scheme", Value: "https"},
|
||||||
|
{Name: ":authority", Value: "tunnel.otterlyadorable.co.uk"},
|
||||||
|
{Name: ":path", Value: "/get"},
|
||||||
|
{Name: "accept-encoding", Value: "gzip"},
|
||||||
|
{Name: "cf-ray", Value: "378948953f044408-SFO-DOG"},
|
||||||
|
{Name: "cf-visitor", Value: "{\"scheme\":\"https\"}"},
|
||||||
|
{Name: "cf-connecting-ip", Value: "2400:cb00:0025:010d:0000:0000:0000:0001"},
|
||||||
|
{Name: "x-forwarded-for", Value: "2400:cb00:0025:010d:0000:0000:0000:0001"},
|
||||||
|
{Name: "x-forwarded-proto", Value: "https"},
|
||||||
|
{Name: "accept-language", Value: "en-gb"},
|
||||||
|
{Name: "referer", Value: "https://tunnel.otterlyadorable.co.uk/"},
|
||||||
|
{Name: "cookie", Value: "__cfduid=d4555095065f92daedc059490771967d81493032162"},
|
||||||
|
{Name: "connection", Value: "Keep-Alive"},
|
||||||
|
{Name: "cf-ipcountry", Value: "US"},
|
||||||
|
{Name: "accept", Value: "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
|
||||||
|
{Name: "user-agent", Value: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4"},
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
|
}
|
||||||
|
if len(stream.Headers) == 0 {
|
||||||
|
t.Fatal("response has no headers")
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Name != ":status" {
|
||||||
|
t.Fatalf("first header should be status, found %s instead", stream.Headers[0].Name)
|
||||||
|
}
|
||||||
|
if stream.Headers[0].Value != "200" {
|
||||||
|
t.Fatalf("expected status 200, got %s", stream.Headers[0].Value)
|
||||||
|
}
|
||||||
|
ioutil.ReadAll(stream)
|
||||||
|
stream.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func AssertIfPipeReadable(t *testing.T, pipe io.ReadCloser) {
|
||||||
|
errC := make(chan error)
|
||||||
|
go func() {
|
||||||
|
b := []byte{0}
|
||||||
|
n, err := pipe.Read(b)
|
||||||
|
if n > 0 {
|
||||||
|
t.Fatalf("read pipe was not empty")
|
||||||
|
}
|
||||||
|
errC <- err
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case err := <-errC:
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read error: %s", err)
|
||||||
|
}
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
|
// nothing to read
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultipleStreamsWithDictionaries(t *testing.T) {
|
||||||
|
|
||||||
|
for q := CompressionNone; q <= CompressionMax; q++ {
|
||||||
|
muxPair := NewCompressedMuxerPair(q)
|
||||||
|
|
||||||
|
htmlBody := `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"` +
|
||||||
|
`"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">` +
|
||||||
|
`<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">` +
|
||||||
|
`<head>` +
|
||||||
|
` <title>Your page title here</title>` +
|
||||||
|
`</head>` +
|
||||||
|
`<body>` +
|
||||||
|
`<h1>Your major heading here</h1>` +
|
||||||
|
`<p>` +
|
||||||
|
`This is a regular text paragraph.` +
|
||||||
|
`</p>` +
|
||||||
|
`<ul>` +
|
||||||
|
` <li>` +
|
||||||
|
` First bullet of a bullet list.` +
|
||||||
|
` </li>` +
|
||||||
|
` <li>` +
|
||||||
|
` This is the <em>second</em> bullet.` +
|
||||||
|
` </li>` +
|
||||||
|
`</ul>` +
|
||||||
|
`</body>` +
|
||||||
|
`</html>`
|
||||||
|
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
|
var contentType string
|
||||||
|
var pathHeader Header
|
||||||
|
|
||||||
|
for _, h := range stream.Headers {
|
||||||
|
if h.Name == ":path" {
|
||||||
|
pathHeader = h
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pathHeader.Name != ":path" {
|
||||||
|
panic("Couldn't find :path header in test")
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(pathHeader.Value, "html") {
|
||||||
|
contentType = "text/html; charset=utf-8"
|
||||||
|
} else if strings.Contains(pathHeader.Value, "js") {
|
||||||
|
contentType = "application/javascript"
|
||||||
|
} else if strings.Contains(pathHeader.Value, "css") {
|
||||||
|
contentType = "text/css"
|
||||||
|
} else {
|
||||||
|
contentType = "img/gif"
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.WriteHeaders([]Header{
|
||||||
|
Header{Name: "content-type", Value: contentType},
|
||||||
|
})
|
||||||
|
stream.Write([]byte(strings.Replace(htmlBody, "paragraph", pathHeader.Value, 1) + stream.Headers[5].Value))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
paths := []string{
|
||||||
|
"/html1",
|
||||||
|
"/html2?sa:ds",
|
||||||
|
"/html3",
|
||||||
|
"/css1",
|
||||||
|
"/html1",
|
||||||
|
"/html2?sa:ds",
|
||||||
|
"/html3",
|
||||||
|
"/css1",
|
||||||
|
"/css2",
|
||||||
|
"/css3",
|
||||||
|
"/js",
|
||||||
|
"/js",
|
||||||
|
"/js",
|
||||||
|
"/js2",
|
||||||
|
"/img2",
|
||||||
|
"/html1",
|
||||||
|
"/html2?sa:ds",
|
||||||
|
"/html3",
|
||||||
|
"/css1",
|
||||||
|
"/css2",
|
||||||
|
"/css3",
|
||||||
|
"/js",
|
||||||
|
"/js",
|
||||||
|
"/js",
|
||||||
|
"/js2",
|
||||||
|
"/img1",
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(len(paths))
|
||||||
|
|
||||||
|
for i, s := range paths {
|
||||||
|
go func(i int, path string) {
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{
|
||||||
|
{Name: ":method", Value: "GET"},
|
||||||
|
{Name: ":scheme", Value: "https"},
|
||||||
|
{Name: ":authority", Value: "tunnel.otterlyadorable.co.uk"},
|
||||||
|
{Name: ":path", Value: path},
|
||||||
|
{Name: "cf-ray", Value: "378948953f044408-SFO-DOG"},
|
||||||
|
{Name: "idx", Value: strconv.Itoa(i)},
|
||||||
|
{Name: "accept-encoding", Value: "gzip, br"},
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectBody := strings.Replace(htmlBody, "paragraph", path, 1) + strconv.Itoa(i)
|
||||||
|
responseBody := make([]byte, len(expectBody)*2)
|
||||||
|
n, err := stream.Read(responseBody)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error from (*MuxedStream).Read: %s", err)
|
||||||
|
t.Fatalf("error from (*MuxedStream).Read: %s", err)
|
||||||
|
}
|
||||||
|
if n != len(expectBody) {
|
||||||
|
log.Printf("expected response body to have %d bytes, got %d", len(expectBody), n)
|
||||||
|
t.Fatalf("expected response body to have %d bytes, got %d", len(expectBody), n)
|
||||||
|
}
|
||||||
|
if string(responseBody[:n]) != expectBody {
|
||||||
|
log.Printf("expected response body %s, got %s", expectBody, responseBody[:n])
|
||||||
|
t.Fatalf("expected response body %s, got %s", expectBody, responseBody[:n])
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}(i, s)
|
||||||
|
time.Sleep(1 * time.Millisecond)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if q > CompressionNone && muxPair.OriginMux.muxMetricsUpdater.compBytesBefore.Value() <= 10*muxPair.OriginMux.muxMetricsUpdater.compBytesAfter.Value() {
|
||||||
|
t.Fatalf("Cross-stream compression is expected to give a better compression ratio")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sampleSiteHandler(stream *MuxedStream) error {
|
||||||
|
var contentType string
|
||||||
|
var pathHeader Header
|
||||||
|
|
||||||
|
for _, h := range stream.Headers {
|
||||||
|
if h.Name == ":path" {
|
||||||
|
pathHeader = h
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pathHeader.Name != ":path" {
|
||||||
|
panic("Couldn't find :path header in test")
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(pathHeader.Value, "html") {
|
||||||
|
contentType = "text/html; charset=utf-8"
|
||||||
|
} else if strings.Contains(pathHeader.Value, "js") {
|
||||||
|
contentType = "application/javascript"
|
||||||
|
} else if strings.Contains(pathHeader.Value, "css") {
|
||||||
|
contentType = "text/css"
|
||||||
|
} else {
|
||||||
|
contentType = "img/gif"
|
||||||
|
}
|
||||||
|
stream.WriteHeaders([]Header{
|
||||||
|
Header{Name: "content-type", Value: contentType},
|
||||||
|
})
|
||||||
|
log.Debugf("Wrote headers for stream %s", pathHeader.Value)
|
||||||
|
b, _ := ioutil.ReadFile("./sample" + pathHeader.Value)
|
||||||
|
stream.Write(b)
|
||||||
|
log.Debugf("Wrote body for stream %s", pathHeader.Value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sampleSiteTest(t *testing.T, muxPair *DefaultMuxerPair, path string) {
|
||||||
|
stream, err := muxPair.EdgeMux.OpenStream(
|
||||||
|
[]Header{
|
||||||
|
{Name: ":method", Value: "GET"},
|
||||||
|
{Name: ":scheme", Value: "https"},
|
||||||
|
{Name: ":authority", Value: "tunnel.otterlyadorable.co.uk"},
|
||||||
|
{Name: ":path", Value: path},
|
||||||
|
{Name: "accept-encoding", Value: "br, gzip"},
|
||||||
|
{Name: "cf-ray", Value: "378948953f044408-SFO-DOG"},
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
|
}
|
||||||
|
expectBody, _ := ioutil.ReadFile("./sample" + path)
|
||||||
|
responseBody := make([]byte, len(expectBody))
|
||||||
|
n, err := io.ReadFull(stream, responseBody)
|
||||||
|
log.Debugf("Got body for stream %s", path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error from (*MuxedStream).Read: %s", err)
|
||||||
|
}
|
||||||
|
if n != len(expectBody) {
|
||||||
|
t.Fatalf("expected response body to have %d bytes, got %d", len(expectBody), n)
|
||||||
|
}
|
||||||
|
if string(responseBody[:n]) != string(expectBody) {
|
||||||
|
t.Fatalf("expected response body %s, got %s", expectBody, responseBody[:n])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSampleSiteWithDictionaries(t *testing.T) {
|
||||||
|
for q := CompressionNone; q <= CompressionMax; q++ {
|
||||||
|
muxPair := NewCompressedMuxerPair(q)
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(sampleSiteHandler)
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
paths := []string{
|
||||||
|
"/index.html",
|
||||||
|
"/index2.html",
|
||||||
|
"/index1.html",
|
||||||
|
"/ghost-url.min.js",
|
||||||
|
"/jquery.fitvids.js",
|
||||||
|
"/index1.html",
|
||||||
|
"/index2.html",
|
||||||
|
"/index.html",
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(len(paths))
|
||||||
|
for _, s := range paths {
|
||||||
|
go func(path string) {
|
||||||
|
sampleSiteTest(t, muxPair, path)
|
||||||
|
wg.Done()
|
||||||
|
}(s)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if q > CompressionNone && muxPair.OriginMux.muxMetricsUpdater.compBytesBefore.Value() <= 10*muxPair.OriginMux.muxMetricsUpdater.compBytesAfter.Value() {
|
||||||
|
t.Fatalf("Cross-stream compression is expected to give a better compression ratio")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLongSiteWithDictionaries(t *testing.T) {
|
||||||
|
for q := CompressionNone; q <= CompressionMedium; q++ {
|
||||||
|
muxPair := NewCompressedMuxerPair(q)
|
||||||
|
muxPair.OriginMuxConfig.Handler = MuxedStreamFunc(sampleSiteHandler)
|
||||||
|
muxPair.HandshakeAndServe(t)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
paths := []string{
|
||||||
|
"/index.html",
|
||||||
|
"/index1.html",
|
||||||
|
"/index2.html",
|
||||||
|
"/ghost-url.min.js",
|
||||||
|
"/jquery.fitvids.js"}
|
||||||
|
|
||||||
|
tstLen := 1000
|
||||||
|
wg.Add(tstLen)
|
||||||
|
for i := 0; i < tstLen; i++ {
|
||||||
|
path := paths[rand.Int()%len(paths)]
|
||||||
|
go func(path string) {
|
||||||
|
sampleSiteTest(t, muxPair, path)
|
||||||
|
wg.Done()
|
||||||
|
}(path)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if q > CompressionNone && muxPair.OriginMux.muxMetricsUpdater.compBytesBefore.Value() <= 100*muxPair.OriginMux.muxMetricsUpdater.compBytesAfter.Value() {
|
||||||
|
t.Fatalf("Cross-stream compression is expected to give a better compression ratio")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,81 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IdleTimer is a type of Timer designed for managing heartbeats on an idle connection.
|
||||||
|
// The timer ticks on an interval with added jitter to avoid accidental synchronisation
|
||||||
|
// between two endpoints. It tracks the number of retries/ticks since the connection was
|
||||||
|
// last marked active.
|
||||||
|
//
|
||||||
|
// The methods of IdleTimer must not be called while a goroutine is reading from C.
|
||||||
|
type IdleTimer struct {
|
||||||
|
// The channel on which ticks are delivered.
|
||||||
|
C <-chan time.Time
|
||||||
|
|
||||||
|
// A timer used to measure idle connection time. Reset after sending data.
|
||||||
|
idleTimer *time.Timer
|
||||||
|
// The maximum length of time a connection is idle before sending a ping.
|
||||||
|
idleDuration time.Duration
|
||||||
|
// A pseudorandom source used to add jitter to the idle duration.
|
||||||
|
randomSource *rand.Rand
|
||||||
|
// The maximum number of retries allowed.
|
||||||
|
maxRetries uint64
|
||||||
|
// The number of retries since the connection was last marked active.
|
||||||
|
retries uint64
|
||||||
|
// A lock to prevent race condition while checking retries
|
||||||
|
stateLock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIdleTimer(idleDuration time.Duration, maxRetries uint64) *IdleTimer {
|
||||||
|
t := &IdleTimer{
|
||||||
|
idleTimer: time.NewTimer(idleDuration),
|
||||||
|
idleDuration: idleDuration,
|
||||||
|
randomSource: rand.New(rand.NewSource(time.Now().Unix())),
|
||||||
|
maxRetries: maxRetries,
|
||||||
|
}
|
||||||
|
t.C = t.idleTimer.C
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry should be called when retrying the idle timeout. If the maximum number of retries
|
||||||
|
// has been met, returns false.
|
||||||
|
// After calling this function and sending a heartbeat, call ResetTimer. Since sending the
|
||||||
|
// heartbeat could be a blocking operation, we resetting the timer after the write completes
|
||||||
|
// to avoid it expiring during the write.
|
||||||
|
func (t *IdleTimer) Retry() bool {
|
||||||
|
t.stateLock.Lock()
|
||||||
|
defer t.stateLock.Unlock()
|
||||||
|
if t.retries >= t.maxRetries {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
t.retries++
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *IdleTimer) RetryCount() uint64 {
|
||||||
|
t.stateLock.RLock()
|
||||||
|
defer t.stateLock.RUnlock()
|
||||||
|
return t.retries
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkActive resets the idle connection timer and suppresses any outstanding idle events.
|
||||||
|
func (t *IdleTimer) MarkActive() {
|
||||||
|
if !t.idleTimer.Stop() {
|
||||||
|
// eat the timer event to prevent spurious pings
|
||||||
|
<-t.idleTimer.C
|
||||||
|
}
|
||||||
|
t.stateLock.Lock()
|
||||||
|
t.retries = 0
|
||||||
|
t.stateLock.Unlock()
|
||||||
|
t.ResetTimer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset the idle timer according to the configured duration, with some added jitter.
|
||||||
|
func (t *IdleTimer) ResetTimer() {
|
||||||
|
jitter := time.Duration(t.randomSource.Int63n(int64(t.idleDuration)))
|
||||||
|
t.idleTimer.Reset(t.idleDuration + jitter)
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRetry(t *testing.T) {
|
||||||
|
timer := NewIdleTimer(time.Second, 2)
|
||||||
|
assert.Equal(t, uint64(0), timer.RetryCount())
|
||||||
|
ok := timer.Retry()
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, uint64(1), timer.RetryCount())
|
||||||
|
ok = timer.Retry()
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, uint64(2), timer.RetryCount())
|
||||||
|
ok = timer.Retry()
|
||||||
|
assert.False(t, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarkActive(t *testing.T) {
|
||||||
|
timer := NewIdleTimer(time.Second, 2)
|
||||||
|
assert.Equal(t, uint64(0), timer.RetryCount())
|
||||||
|
ok := timer.Retry()
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, uint64(1), timer.RetryCount())
|
||||||
|
timer.MarkActive()
|
||||||
|
assert.Equal(t, uint64(0), timer.RetryCount())
|
||||||
|
}
|
|
@ -0,0 +1,289 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ReadWriteLengther interface {
|
||||||
|
io.ReadWriter
|
||||||
|
Reset()
|
||||||
|
Len() int
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReadWriteClosedCloser interface {
|
||||||
|
io.ReadWriteCloser
|
||||||
|
Closed() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type MuxedStream struct {
|
||||||
|
Headers []Header
|
||||||
|
|
||||||
|
streamID uint32
|
||||||
|
|
||||||
|
responseHeadersReceived chan struct{}
|
||||||
|
|
||||||
|
readBuffer ReadWriteClosedCloser
|
||||||
|
receiveWindow uint32
|
||||||
|
// current window size limit. Exponentially increase it when it's exhausted
|
||||||
|
receiveWindowCurrentMax uint32
|
||||||
|
// limit set in http2 spec. 2^31-1
|
||||||
|
receiveWindowMax uint32
|
||||||
|
|
||||||
|
// nonzero if a WINDOW_UPDATE frame for a stream needs to be sent
|
||||||
|
windowUpdate uint32
|
||||||
|
|
||||||
|
writeLock sync.Mutex
|
||||||
|
// The zero value for Buffer is an empty buffer ready to use.
|
||||||
|
writeBuffer ReadWriteLengther
|
||||||
|
|
||||||
|
sendWindow uint32
|
||||||
|
|
||||||
|
readyList *ReadyList
|
||||||
|
headersSent bool
|
||||||
|
writeHeaders []Header
|
||||||
|
// true if the write end of this stream has been closed
|
||||||
|
writeEOF bool
|
||||||
|
// true if we have sent EOF to the peer
|
||||||
|
sentEOF bool
|
||||||
|
// true if the peer sent us an EOF
|
||||||
|
receivedEOF bool
|
||||||
|
|
||||||
|
// dictionary that was used to compress the stream
|
||||||
|
receivedUseDict bool
|
||||||
|
method string
|
||||||
|
contentType string
|
||||||
|
path string
|
||||||
|
dictionaries h2Dictionaries
|
||||||
|
readBufferLock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MuxedStream) Read(p []byte) (n int, err error) {
|
||||||
|
if s.dictionaries.read != nil {
|
||||||
|
s.readBufferLock.RLock()
|
||||||
|
b := s.readBuffer
|
||||||
|
s.readBufferLock.RUnlock()
|
||||||
|
return b.Read(p)
|
||||||
|
}
|
||||||
|
return s.readBuffer.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MuxedStream) Write(p []byte) (n int, err error) {
|
||||||
|
ok := assignDictToStream(s, p)
|
||||||
|
if !ok {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
}
|
||||||
|
defer s.writeLock.Unlock()
|
||||||
|
if s.writeEOF {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n, err = s.writeBuffer.Write(p)
|
||||||
|
if n != len(p) || err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
s.writeNotify()
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MuxedStream) Close() error {
|
||||||
|
// TUN-115: Close the write buffer before the read buffer.
|
||||||
|
// In the case of shutdown, read will not get new data, but the write buffer can still receive
|
||||||
|
// new data. Closing read before write allows application to race between a failed read and a
|
||||||
|
// successful write, even though this close should appear to be atomic.
|
||||||
|
// This can't happen the other way because reads may succeed after a failed write; if we read
|
||||||
|
// past EOF the application will block until we close the buffer.
|
||||||
|
err := s.CloseWrite()
|
||||||
|
if err != nil {
|
||||||
|
if s.CloseRead() == nil {
|
||||||
|
// don't bother the caller with errors if at least one close succeeded
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return s.CloseRead()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MuxedStream) CloseRead() error {
|
||||||
|
return s.readBuffer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MuxedStream) CloseWrite() error {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
defer s.writeLock.Unlock()
|
||||||
|
if s.writeEOF {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
s.writeEOF = true
|
||||||
|
if c, ok := s.writeBuffer.(io.Closer); ok {
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
s.writeNotify()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MuxedStream) WriteHeaders(headers []Header) error {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
defer s.writeLock.Unlock()
|
||||||
|
if s.writeHeaders != nil {
|
||||||
|
return ErrStreamHeadersSent
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.dictionaries.write != nil {
|
||||||
|
dictWriter := s.dictionaries.write.getDictWriter(s, headers)
|
||||||
|
if dictWriter != nil {
|
||||||
|
s.writeBuffer = dictWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
s.writeHeaders = headers
|
||||||
|
s.headersSent = false
|
||||||
|
s.writeNotify()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MuxedStream) getReceiveWindow() uint32 {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
defer s.writeLock.Unlock()
|
||||||
|
return s.receiveWindow
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MuxedStream) getSendWindow() uint32 {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
defer s.writeLock.Unlock()
|
||||||
|
return s.sendWindow
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeNotify must happen while holding writeLock.
|
||||||
|
func (s *MuxedStream) writeNotify() {
|
||||||
|
s.readyList.Signal(s.streamID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call by muxreader when it gets a WindowUpdateFrame. This is an update of the peer's
|
||||||
|
// receive window (how much data we can send).
|
||||||
|
func (s *MuxedStream) replenishSendWindow(bytes uint32) {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
s.sendWindow += bytes
|
||||||
|
s.writeNotify()
|
||||||
|
s.writeLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call by muxreader when it receives a data frame
|
||||||
|
func (s *MuxedStream) consumeReceiveWindow(bytes uint32) bool {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
defer s.writeLock.Unlock()
|
||||||
|
// received data size is greater than receive window/buffer
|
||||||
|
if s.receiveWindow < bytes {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s.receiveWindow -= bytes
|
||||||
|
if s.receiveWindow < s.receiveWindowCurrentMax/2 {
|
||||||
|
// exhausting client send window (how much data client can send)
|
||||||
|
if s.receiveWindowCurrentMax < s.receiveWindowMax {
|
||||||
|
s.receiveWindowCurrentMax <<= 1
|
||||||
|
}
|
||||||
|
s.windowUpdate += s.receiveWindowCurrentMax - s.receiveWindow
|
||||||
|
s.writeNotify()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// receiveEOF should be called when the peer indicates no more data will be sent.
|
||||||
|
// Returns true if the socket is now closed (i.e. the write side is already closed).
|
||||||
|
func (s *MuxedStream) receiveEOF() (closed bool) {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
defer s.writeLock.Unlock()
|
||||||
|
s.receivedEOF = true
|
||||||
|
s.CloseRead()
|
||||||
|
return s.writeEOF && s.writeBuffer.Len() == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MuxedStream) gotReceiveEOF() bool {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
defer s.writeLock.Unlock()
|
||||||
|
return s.receivedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// MuxedStreamReader implements io.ReadCloser for the read end of the stream.
|
||||||
|
// This is useful for passing to functions that close the object after it is done reading,
|
||||||
|
// but you still want to be able to write data afterwards (e.g. http.Client).
|
||||||
|
type MuxedStreamReader struct {
|
||||||
|
*MuxedStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MuxedStreamReader) Read(p []byte) (n int, err error) {
|
||||||
|
return s.MuxedStream.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MuxedStreamReader) Close() error {
|
||||||
|
return s.MuxedStream.CloseRead()
|
||||||
|
}
|
||||||
|
|
||||||
|
// streamChunk represents a chunk of data to be written.
|
||||||
|
type streamChunk struct {
|
||||||
|
streamID uint32
|
||||||
|
// true if a HEADERS frame should be sent
|
||||||
|
sendHeaders bool
|
||||||
|
headers []Header
|
||||||
|
// nonzero if a WINDOW_UPDATE frame should be sent
|
||||||
|
windowUpdate uint32
|
||||||
|
// true if data frames should be sent
|
||||||
|
sendData bool
|
||||||
|
eof bool
|
||||||
|
buffer bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// getChunk atomically extracts a chunk of data to be written by MuxWriter.
|
||||||
|
// The data returned will not exceed the send window for this stream.
|
||||||
|
func (s *MuxedStream) getChunk() *streamChunk {
|
||||||
|
s.writeLock.Lock()
|
||||||
|
defer s.writeLock.Unlock()
|
||||||
|
|
||||||
|
chunk := &streamChunk{
|
||||||
|
streamID: s.streamID,
|
||||||
|
sendHeaders: !s.headersSent,
|
||||||
|
headers: s.writeHeaders,
|
||||||
|
windowUpdate: s.windowUpdate,
|
||||||
|
sendData: !s.sentEOF,
|
||||||
|
eof: s.writeEOF && uint32(s.writeBuffer.Len()) <= s.sendWindow,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copies at most s.sendWindow bytes
|
||||||
|
writeLen, _ := io.CopyN(&chunk.buffer, s.writeBuffer, int64(s.sendWindow))
|
||||||
|
s.sendWindow -= uint32(writeLen)
|
||||||
|
s.receiveWindow += s.windowUpdate
|
||||||
|
s.windowUpdate = 0
|
||||||
|
s.headersSent = true
|
||||||
|
|
||||||
|
// if this chunk contains the end of the stream, close the stream now
|
||||||
|
if chunk.sendData && chunk.eof {
|
||||||
|
s.sentEOF = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *streamChunk) sendHeadersFrame() bool {
|
||||||
|
return c.sendHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *streamChunk) sendWindowUpdateFrame() bool {
|
||||||
|
return c.windowUpdate > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *streamChunk) sendDataFrame() bool {
|
||||||
|
return c.sendData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *streamChunk) nextDataFrame(frameSize int) (payload []byte, endStream bool) {
|
||||||
|
payload = c.buffer.Next(frameSize)
|
||||||
|
if c.buffer.Len() == 0 {
|
||||||
|
// this is the last data frame in this chunk
|
||||||
|
c.sendData = false
|
||||||
|
if c.eof {
|
||||||
|
endStream = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,92 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
const testWindowSize uint32 = 65535
|
||||||
|
const testMaxWindowSize uint32 = testWindowSize << 2
|
||||||
|
|
||||||
|
// Only sending WINDOW_UPDATE frame, so sendWindow should never change
|
||||||
|
func TestFlowControlSingleStream(t *testing.T) {
|
||||||
|
stream := &MuxedStream{
|
||||||
|
responseHeadersReceived: make(chan struct{}),
|
||||||
|
readBuffer: NewSharedBuffer(),
|
||||||
|
writeBuffer: &bytes.Buffer{},
|
||||||
|
receiveWindow: testWindowSize,
|
||||||
|
receiveWindowCurrentMax: testWindowSize,
|
||||||
|
receiveWindowMax: testMaxWindowSize,
|
||||||
|
sendWindow: testWindowSize,
|
||||||
|
readyList: NewReadyList(),
|
||||||
|
}
|
||||||
|
assert.True(t, stream.consumeReceiveWindow(testWindowSize/2))
|
||||||
|
dataSent := testWindowSize / 2
|
||||||
|
assert.Equal(t, testWindowSize-dataSent, stream.receiveWindow)
|
||||||
|
assert.Equal(t, testWindowSize, stream.receiveWindowCurrentMax)
|
||||||
|
assert.Equal(t, uint32(0), stream.windowUpdate)
|
||||||
|
tempWindowUpdate := stream.windowUpdate
|
||||||
|
|
||||||
|
streamChunk := stream.getChunk()
|
||||||
|
assert.Equal(t, tempWindowUpdate, streamChunk.windowUpdate)
|
||||||
|
assert.Equal(t, testWindowSize-dataSent, stream.receiveWindow)
|
||||||
|
assert.Equal(t, uint32(0), stream.windowUpdate)
|
||||||
|
assert.Equal(t, testWindowSize, stream.sendWindow)
|
||||||
|
|
||||||
|
assert.True(t, stream.consumeReceiveWindow(2))
|
||||||
|
dataSent += 2
|
||||||
|
assert.Equal(t, testWindowSize-dataSent, stream.receiveWindow)
|
||||||
|
assert.Equal(t, testWindowSize<<1, stream.receiveWindowCurrentMax)
|
||||||
|
assert.Equal(t, (testWindowSize<<1)-stream.receiveWindow, stream.windowUpdate)
|
||||||
|
tempWindowUpdate = stream.windowUpdate
|
||||||
|
|
||||||
|
streamChunk = stream.getChunk()
|
||||||
|
assert.Equal(t, tempWindowUpdate, streamChunk.windowUpdate)
|
||||||
|
assert.Equal(t, testWindowSize<<1, stream.receiveWindow)
|
||||||
|
assert.Equal(t, uint32(0), stream.windowUpdate)
|
||||||
|
assert.Equal(t, testWindowSize, stream.sendWindow)
|
||||||
|
|
||||||
|
assert.True(t, stream.consumeReceiveWindow(testWindowSize+10))
|
||||||
|
dataSent = testWindowSize + 10
|
||||||
|
assert.Equal(t, (testWindowSize<<1)-dataSent, stream.receiveWindow)
|
||||||
|
assert.Equal(t, testWindowSize<<2, stream.receiveWindowCurrentMax)
|
||||||
|
assert.Equal(t, (testWindowSize<<2)-stream.receiveWindow, stream.windowUpdate)
|
||||||
|
tempWindowUpdate = stream.windowUpdate
|
||||||
|
|
||||||
|
streamChunk = stream.getChunk()
|
||||||
|
assert.Equal(t, tempWindowUpdate, streamChunk.windowUpdate)
|
||||||
|
assert.Equal(t, testWindowSize<<2, stream.receiveWindow)
|
||||||
|
assert.Equal(t, uint32(0), stream.windowUpdate)
|
||||||
|
assert.Equal(t, testWindowSize, stream.sendWindow)
|
||||||
|
|
||||||
|
assert.False(t, stream.consumeReceiveWindow(testMaxWindowSize+1))
|
||||||
|
assert.Equal(t, testWindowSize<<2, stream.receiveWindow)
|
||||||
|
assert.Equal(t, testMaxWindowSize, stream.receiveWindowCurrentMax)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMuxedStreamEOF(t *testing.T) {
|
||||||
|
for i := 0; i < 4096; i++ {
|
||||||
|
readyList := NewReadyList()
|
||||||
|
stream := &MuxedStream{
|
||||||
|
streamID: 1,
|
||||||
|
readBuffer: NewSharedBuffer(),
|
||||||
|
receiveWindow: 65536,
|
||||||
|
receiveWindowMax: 65536,
|
||||||
|
sendWindow: 65536,
|
||||||
|
readyList: readyList,
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() { stream.Close() }()
|
||||||
|
n, err := stream.Read([]byte{0})
|
||||||
|
assert.Equal(t, io.EOF, err)
|
||||||
|
assert.Equal(t, 0, n)
|
||||||
|
// Write comes after read, because write buffers data before it is flushed. It wouldn't know about EOF
|
||||||
|
// until some time later. Calling read first forces it to know about EOF now.
|
||||||
|
n, err = stream.Write([]byte{1})
|
||||||
|
assert.Equal(t, io.EOF, err)
|
||||||
|
assert.Equal(t, 0, n)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,246 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang-collections/collections/queue"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// data points used to compute average receive window and send window size
|
||||||
|
const (
|
||||||
|
// data points used to compute average receive window and send window size
|
||||||
|
dataPoints = 100
|
||||||
|
// updateFreq is set to 1 sec so we can get inbound & outbound byes/sec
|
||||||
|
updateFreq = time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type muxMetricsUpdater struct {
|
||||||
|
// rttData keeps record of rtt, rttMin, rttMax and last measured time
|
||||||
|
rttData *rttData
|
||||||
|
// receiveWindowData keeps record of receive window measurement
|
||||||
|
receiveWindowData *flowControlData
|
||||||
|
// sendWindowData keeps record of send window measurement
|
||||||
|
sendWindowData *flowControlData
|
||||||
|
// inBoundRate is incoming bytes/sec
|
||||||
|
inBoundRate *rate
|
||||||
|
// outBoundRate is outgoing bytes/sec
|
||||||
|
outBoundRate *rate
|
||||||
|
// updateRTTChan is the channel to receive new RTT measurement from muxReader
|
||||||
|
updateRTTChan <-chan *roundTripMeasurement
|
||||||
|
//updateReceiveWindowChan is the channel to receive updated receiveWindow size from muxReader and muxWriter
|
||||||
|
updateReceiveWindowChan <-chan uint32
|
||||||
|
//updateSendWindowChan is the channel to receive updated sendWindow size from muxReader and muxWriter
|
||||||
|
updateSendWindowChan <-chan uint32
|
||||||
|
// updateInBoundBytesChan us the channel to receive bytesRead from muxReader
|
||||||
|
updateInBoundBytesChan <-chan uint64
|
||||||
|
// updateOutBoundBytesChan us the channel to receive bytesWrote from muxWriter
|
||||||
|
updateOutBoundBytesChan <-chan uint64
|
||||||
|
// shutdownC is to signal the muxerMetricsUpdater to shutdown
|
||||||
|
abortChan <-chan struct{}
|
||||||
|
|
||||||
|
compBytesBefore, compBytesAfter *AtomicCounter
|
||||||
|
}
|
||||||
|
|
||||||
|
type MuxerMetrics struct {
|
||||||
|
RTT, RTTMin, RTTMax time.Duration
|
||||||
|
ReceiveWindowAve, SendWindowAve float64
|
||||||
|
ReceiveWindowMin, ReceiveWindowMax, SendWindowMin, SendWindowMax uint32
|
||||||
|
InBoundRateCurr, InBoundRateMin, InBoundRateMax uint64
|
||||||
|
OutBoundRateCurr, OutBoundRateMin, OutBoundRateMax uint64
|
||||||
|
CompBytesBefore, CompBytesAfter *AtomicCounter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MuxerMetrics) CompRateAve() float64 {
|
||||||
|
if m.CompBytesBefore.Value() == 0 {
|
||||||
|
return 1.
|
||||||
|
}
|
||||||
|
return float64(m.CompBytesAfter.Value()) / float64(m.CompBytesBefore.Value())
|
||||||
|
}
|
||||||
|
|
||||||
|
type roundTripMeasurement struct {
|
||||||
|
receiveTime, sendTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type rttData struct {
|
||||||
|
rtt, rttMin, rttMax time.Duration
|
||||||
|
lastMeasurementTime time.Time
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type flowControlData struct {
|
||||||
|
sum uint64
|
||||||
|
min, max uint32
|
||||||
|
queue *queue.Queue
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type rate struct {
|
||||||
|
curr uint64
|
||||||
|
min, max uint64
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMuxMetricsUpdater(
|
||||||
|
updateRTTChan <-chan *roundTripMeasurement,
|
||||||
|
updateReceiveWindowChan <-chan uint32,
|
||||||
|
updateSendWindowChan <-chan uint32,
|
||||||
|
updateInBoundBytesChan <-chan uint64,
|
||||||
|
updateOutBoundBytesChan <-chan uint64,
|
||||||
|
abortChan <-chan struct{},
|
||||||
|
compBytesBefore, compBytesAfter *AtomicCounter,
|
||||||
|
) *muxMetricsUpdater {
|
||||||
|
return &muxMetricsUpdater{
|
||||||
|
rttData: newRTTData(),
|
||||||
|
receiveWindowData: newFlowControlData(),
|
||||||
|
sendWindowData: newFlowControlData(),
|
||||||
|
inBoundRate: newRate(),
|
||||||
|
outBoundRate: newRate(),
|
||||||
|
updateRTTChan: updateRTTChan,
|
||||||
|
updateReceiveWindowChan: updateReceiveWindowChan,
|
||||||
|
updateSendWindowChan: updateSendWindowChan,
|
||||||
|
updateInBoundBytesChan: updateInBoundBytesChan,
|
||||||
|
updateOutBoundBytesChan: updateOutBoundBytesChan,
|
||||||
|
abortChan: abortChan,
|
||||||
|
compBytesBefore: compBytesBefore,
|
||||||
|
compBytesAfter: compBytesAfter,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (updater *muxMetricsUpdater) Metrics() *MuxerMetrics {
|
||||||
|
m := &MuxerMetrics{}
|
||||||
|
m.RTT, m.RTTMin, m.RTTMax = updater.rttData.metrics()
|
||||||
|
m.ReceiveWindowAve, m.ReceiveWindowMin, m.ReceiveWindowMax = updater.receiveWindowData.metrics()
|
||||||
|
m.SendWindowAve, m.SendWindowMin, m.SendWindowMax = updater.sendWindowData.metrics()
|
||||||
|
m.InBoundRateCurr, m.InBoundRateMin, m.InBoundRateMax = updater.inBoundRate.get()
|
||||||
|
m.OutBoundRateCurr, m.OutBoundRateMin, m.OutBoundRateMax = updater.outBoundRate.get()
|
||||||
|
m.CompBytesBefore, m.CompBytesAfter = updater.compBytesBefore, updater.compBytesAfter
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (updater *muxMetricsUpdater) run(parentLogger *log.Entry) error {
|
||||||
|
logger := parentLogger.WithFields(log.Fields{
|
||||||
|
"subsystem": "mux",
|
||||||
|
"dir": "metrics",
|
||||||
|
})
|
||||||
|
defer logger.Debug("event loop finished")
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-updater.abortChan:
|
||||||
|
logger.Infof("Stopping mux metrics updater")
|
||||||
|
return nil
|
||||||
|
case roundTripMeasurement := <-updater.updateRTTChan:
|
||||||
|
go updater.rttData.update(roundTripMeasurement)
|
||||||
|
logger.Debug("Update rtt")
|
||||||
|
case receiveWindow := <-updater.updateReceiveWindowChan:
|
||||||
|
go updater.receiveWindowData.update(receiveWindow)
|
||||||
|
logger.Debug("Update receive window")
|
||||||
|
case sendWindow := <-updater.updateSendWindowChan:
|
||||||
|
go updater.sendWindowData.update(sendWindow)
|
||||||
|
logger.Debug("Update send window")
|
||||||
|
case inBoundBytes := <-updater.updateInBoundBytesChan:
|
||||||
|
// inBoundBytes is bytes/sec because the update interval is 1 sec
|
||||||
|
go updater.inBoundRate.update(inBoundBytes)
|
||||||
|
logger.Debugf("Inbound bytes %d", inBoundBytes)
|
||||||
|
case outBoundBytes := <-updater.updateOutBoundBytesChan:
|
||||||
|
// outBoundBytes is bytes/sec because the update interval is 1 sec
|
||||||
|
go updater.outBoundRate.update(outBoundBytes)
|
||||||
|
logger.Debugf("Outbound bytes %d", outBoundBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRTTData() *rttData {
|
||||||
|
return &rttData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rttData) update(measurement *roundTripMeasurement) {
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
// discard pings before lastMeasurementTime
|
||||||
|
if r.lastMeasurementTime.After(measurement.sendTime) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.lastMeasurementTime = measurement.sendTime
|
||||||
|
r.rtt = measurement.receiveTime.Sub(measurement.sendTime)
|
||||||
|
if r.rttMax < r.rtt {
|
||||||
|
r.rttMax = r.rtt
|
||||||
|
}
|
||||||
|
if r.rttMin == 0 || r.rttMin > r.rtt {
|
||||||
|
r.rttMin = r.rtt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rttData) metrics() (rtt, rttMin, rttMax time.Duration) {
|
||||||
|
r.lock.RLock()
|
||||||
|
defer r.lock.RUnlock()
|
||||||
|
return r.rtt, r.rttMin, r.rttMax
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFlowControlData() *flowControlData {
|
||||||
|
return &flowControlData{queue: queue.New()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *flowControlData) update(measurement uint32) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
var firstItem uint32
|
||||||
|
// store new data into queue, remove oldest data if queue is full
|
||||||
|
f.queue.Enqueue(measurement)
|
||||||
|
if f.queue.Len() > dataPoints {
|
||||||
|
// data type should always be uint32
|
||||||
|
firstItem = f.queue.Dequeue().(uint32)
|
||||||
|
}
|
||||||
|
// if (measurement - firstItem) < 0, uint64(measurement - firstItem)
|
||||||
|
// will overflow and become a large positive number
|
||||||
|
f.sum += uint64(measurement)
|
||||||
|
f.sum -= uint64(firstItem)
|
||||||
|
if measurement > f.max {
|
||||||
|
f.max = measurement
|
||||||
|
}
|
||||||
|
if f.min == 0 || measurement < f.min {
|
||||||
|
f.min = measurement
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// caller of ave() should acquire lock first
|
||||||
|
func (f *flowControlData) ave() float64 {
|
||||||
|
if f.queue.Len() == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return float64(f.sum) / float64(f.queue.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *flowControlData) metrics() (ave float64, min, max uint32) {
|
||||||
|
f.lock.RLock()
|
||||||
|
defer f.lock.RUnlock()
|
||||||
|
return f.ave(), f.min, f.max
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRate() *rate {
|
||||||
|
return &rate{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rate) update(measurement uint64) {
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
r.curr = measurement
|
||||||
|
// if measurement is 0, then there is no incoming/outgoing connection, don't update min/max
|
||||||
|
if r.curr == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if measurement > r.max {
|
||||||
|
r.max = measurement
|
||||||
|
}
|
||||||
|
if r.min == 0 || measurement < r.min {
|
||||||
|
r.min = measurement
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rate) get() (curr, min, max uint64) {
|
||||||
|
r.lock.RLock()
|
||||||
|
defer r.lock.RUnlock()
|
||||||
|
return r.curr, r.min, r.max
|
||||||
|
}
|
|
@ -0,0 +1,180 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ave(sum uint64, len int) float64 {
|
||||||
|
return float64(sum) / float64(len)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRTTUpdate(t *testing.T) {
|
||||||
|
r := newRTTData()
|
||||||
|
start := time.Now()
|
||||||
|
// send at 0 ms, receive at 2 ms, RTT = 2ms
|
||||||
|
m := &roundTripMeasurement{receiveTime: start.Add(2 * time.Millisecond), sendTime: start}
|
||||||
|
r.update(m)
|
||||||
|
assert.Equal(t, start, r.lastMeasurementTime)
|
||||||
|
assert.Equal(t, 2*time.Millisecond, r.rtt)
|
||||||
|
assert.Equal(t, 2*time.Millisecond, r.rttMin)
|
||||||
|
assert.Equal(t, 2*time.Millisecond, r.rttMax)
|
||||||
|
|
||||||
|
// send at 3 ms, receive at 6 ms, RTT = 3ms
|
||||||
|
m = &roundTripMeasurement{receiveTime: start.Add(6 * time.Millisecond), sendTime: start.Add(3 * time.Millisecond)}
|
||||||
|
r.update(m)
|
||||||
|
assert.Equal(t, start.Add(3*time.Millisecond), r.lastMeasurementTime)
|
||||||
|
assert.Equal(t, 3*time.Millisecond, r.rtt)
|
||||||
|
assert.Equal(t, 2*time.Millisecond, r.rttMin)
|
||||||
|
assert.Equal(t, 3*time.Millisecond, r.rttMax)
|
||||||
|
|
||||||
|
// send at 7 ms, receive at 8 ms, RTT = 1ms
|
||||||
|
m = &roundTripMeasurement{receiveTime: start.Add(8 * time.Millisecond), sendTime: start.Add(7 * time.Millisecond)}
|
||||||
|
r.update(m)
|
||||||
|
assert.Equal(t, start.Add(7*time.Millisecond), r.lastMeasurementTime)
|
||||||
|
assert.Equal(t, 1*time.Millisecond, r.rtt)
|
||||||
|
assert.Equal(t, 1*time.Millisecond, r.rttMin)
|
||||||
|
assert.Equal(t, 3*time.Millisecond, r.rttMax)
|
||||||
|
|
||||||
|
// send at -4 ms, receive at 0 ms, RTT = 4ms, but this ping is before last measurement
|
||||||
|
// so it will be discarded
|
||||||
|
m = &roundTripMeasurement{receiveTime: start, sendTime: start.Add(-2 * time.Millisecond)}
|
||||||
|
r.update(m)
|
||||||
|
assert.Equal(t, start.Add(7*time.Millisecond), r.lastMeasurementTime)
|
||||||
|
assert.Equal(t, 1*time.Millisecond, r.rtt)
|
||||||
|
assert.Equal(t, 1*time.Millisecond, r.rttMin)
|
||||||
|
assert.Equal(t, 3*time.Millisecond, r.rttMax)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFlowControlDataUpdate(t *testing.T) {
|
||||||
|
f := newFlowControlData()
|
||||||
|
assert.Equal(t, 0, f.queue.Len())
|
||||||
|
assert.Equal(t, float64(0), f.ave())
|
||||||
|
|
||||||
|
var sum uint64
|
||||||
|
min := maxWindowSize - dataPoints
|
||||||
|
max := maxWindowSize
|
||||||
|
for i := 1; i <= dataPoints; i++ {
|
||||||
|
size := maxWindowSize - uint32(i)
|
||||||
|
f.update(size)
|
||||||
|
assert.Equal(t, max-uint32(1), f.max)
|
||||||
|
assert.Equal(t, size, f.min)
|
||||||
|
|
||||||
|
assert.Equal(t, i, f.queue.Len())
|
||||||
|
|
||||||
|
sum += uint64(size)
|
||||||
|
assert.Equal(t, sum, f.sum)
|
||||||
|
assert.Equal(t, ave(sum, f.queue.Len()), f.ave())
|
||||||
|
}
|
||||||
|
|
||||||
|
// queue is full, should start to dequeue first element
|
||||||
|
for i := 1; i <= dataPoints; i++ {
|
||||||
|
f.update(max)
|
||||||
|
assert.Equal(t, max, f.max)
|
||||||
|
assert.Equal(t, min, f.min)
|
||||||
|
|
||||||
|
assert.Equal(t, dataPoints, f.queue.Len())
|
||||||
|
|
||||||
|
sum += uint64(i)
|
||||||
|
assert.Equal(t, sum, f.sum)
|
||||||
|
assert.Equal(t, ave(sum, dataPoints), f.ave())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMuxMetricsUpdater(t *testing.T) {
|
||||||
|
t.Skip("Race condition")
|
||||||
|
updateRTTChan := make(chan *roundTripMeasurement)
|
||||||
|
updateReceiveWindowChan := make(chan uint32)
|
||||||
|
updateSendWindowChan := make(chan uint32)
|
||||||
|
updateInBoundBytesChan := make(chan uint64)
|
||||||
|
updateOutBoundBytesChan := make(chan uint64)
|
||||||
|
abortChan := make(chan struct{})
|
||||||
|
errChan := make(chan error)
|
||||||
|
compBefore, compAfter := NewAtomicCounter(0), NewAtomicCounter(0)
|
||||||
|
m := newMuxMetricsUpdater(updateRTTChan,
|
||||||
|
updateReceiveWindowChan,
|
||||||
|
updateSendWindowChan,
|
||||||
|
updateInBoundBytesChan,
|
||||||
|
updateOutBoundBytesChan,
|
||||||
|
abortChan,
|
||||||
|
compBefore,
|
||||||
|
compAfter,
|
||||||
|
)
|
||||||
|
logger := log.NewEntry(log.New())
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errChan <- m.run(logger)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
|
||||||
|
// mock muxReader
|
||||||
|
readerStart := time.Now()
|
||||||
|
rm := &roundTripMeasurement{receiveTime: readerStart, sendTime: readerStart}
|
||||||
|
updateRTTChan <- rm
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
// Becareful if dataPoints is not divisibile by 4
|
||||||
|
readerSend := readerStart.Add(time.Millisecond)
|
||||||
|
for i := 1; i <= dataPoints/4; i++ {
|
||||||
|
readerReceive := readerSend.Add(time.Duration(i) * time.Millisecond)
|
||||||
|
rm := &roundTripMeasurement{receiveTime: readerReceive, sendTime: readerSend}
|
||||||
|
updateRTTChan <- rm
|
||||||
|
readerSend = readerReceive.Add(time.Millisecond)
|
||||||
|
|
||||||
|
updateReceiveWindowChan <- uint32(i)
|
||||||
|
updateSendWindowChan <- uint32(i)
|
||||||
|
|
||||||
|
updateInBoundBytesChan <- uint64(i)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// mock muxWriter
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for j := dataPoints/4 + 1; j <= dataPoints/2; j++ {
|
||||||
|
updateReceiveWindowChan <- uint32(j)
|
||||||
|
updateSendWindowChan <- uint32(j)
|
||||||
|
|
||||||
|
// should always be disgard since the send time is before readerSend
|
||||||
|
rm := &roundTripMeasurement{receiveTime: readerStart, sendTime: readerStart.Add(-time.Duration(j*dataPoints) * time.Millisecond)}
|
||||||
|
updateRTTChan <- rm
|
||||||
|
|
||||||
|
updateOutBoundBytesChan <- uint64(j)
|
||||||
|
}
|
||||||
|
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
metrics := m.Metrics()
|
||||||
|
points := dataPoints / 2
|
||||||
|
assert.Equal(t, time.Millisecond, metrics.RTTMin)
|
||||||
|
assert.Equal(t, time.Duration(dataPoints/4)*time.Millisecond, metrics.RTTMax)
|
||||||
|
|
||||||
|
// sum(1..i) = i*(i+1)/2, ave(1..i) = i*(i+1)/2/i = (i+1)/2
|
||||||
|
assert.Equal(t, float64(points+1)/float64(2), metrics.ReceiveWindowAve)
|
||||||
|
assert.Equal(t, uint32(1), metrics.ReceiveWindowMin)
|
||||||
|
assert.Equal(t, uint32(points), metrics.ReceiveWindowMax)
|
||||||
|
|
||||||
|
assert.Equal(t, float64(points+1)/float64(2), metrics.SendWindowAve)
|
||||||
|
assert.Equal(t, uint32(1), metrics.SendWindowMin)
|
||||||
|
assert.Equal(t, uint32(points), metrics.SendWindowMax)
|
||||||
|
|
||||||
|
assert.Equal(t, uint64(dataPoints/4), metrics.InBoundRateCurr)
|
||||||
|
assert.Equal(t, uint64(1), metrics.InBoundRateMin)
|
||||||
|
assert.Equal(t, uint64(dataPoints/4), metrics.InBoundRateMax)
|
||||||
|
|
||||||
|
assert.Equal(t, uint64(dataPoints/2), metrics.OutBoundRateCurr)
|
||||||
|
assert.Equal(t, uint64(dataPoints/4+1), metrics.OutBoundRateMin)
|
||||||
|
assert.Equal(t, uint64(dataPoints/2), metrics.OutBoundRateMax)
|
||||||
|
|
||||||
|
close(abortChan)
|
||||||
|
assert.Nil(t, <-errChan)
|
||||||
|
close(errChan)
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,497 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MuxReader struct {
|
||||||
|
// f is used to read HTTP2 frames.
|
||||||
|
f *http2.Framer
|
||||||
|
// handler provides a callback to receive new streams. if nil, new streams cannot be accepted.
|
||||||
|
handler MuxedStreamHandler
|
||||||
|
// streams tracks currently-open streams.
|
||||||
|
streams *activeStreamMap
|
||||||
|
// readyList is used to signal writable streams.
|
||||||
|
readyList *ReadyList
|
||||||
|
// streamErrors lets us report stream errors to the MuxWriter.
|
||||||
|
streamErrors *StreamErrorMap
|
||||||
|
// goAwayChan is used to tell the writer to send a GOAWAY message.
|
||||||
|
goAwayChan chan<- http2.ErrCode
|
||||||
|
// abortChan is used when shutting down ungracefully. When this becomes readable, all activity should stop.
|
||||||
|
abortChan <-chan struct{}
|
||||||
|
// pingTimestamp is an atomic value containing the latest received ping timestamp.
|
||||||
|
pingTimestamp *PingTimestamp
|
||||||
|
// connActive is used to signal to the writer that something happened on the connection.
|
||||||
|
// This is used to clear idle timeout disconnection deadlines.
|
||||||
|
connActive Signal
|
||||||
|
// The initial value for the send and receive window of a new stream.
|
||||||
|
initialStreamWindow uint32
|
||||||
|
// The max value for the send window of a stream.
|
||||||
|
streamWindowMax uint32
|
||||||
|
// r is a reference to the underlying connection used when shutting down.
|
||||||
|
r io.Closer
|
||||||
|
// updateRTTChan is the channel to send new RTT measurement to muxerMetricsUpdater
|
||||||
|
updateRTTChan chan<- *roundTripMeasurement
|
||||||
|
// updateReceiveWindowChan is the channel to update receiveWindow size to muxerMetricsUpdater
|
||||||
|
updateReceiveWindowChan chan<- uint32
|
||||||
|
// updateSendWindowChan is the channel to update sendWindow size to muxerMetricsUpdater
|
||||||
|
updateSendWindowChan chan<- uint32
|
||||||
|
// bytesRead is the amount of bytes read from data frame since the last time we send bytes read to metrics
|
||||||
|
bytesRead *AtomicCounter
|
||||||
|
// updateOutBoundBytesChan is the channel to send bytesWrote to muxerMetricsUpdater
|
||||||
|
updateInBoundBytesChan chan<- uint64
|
||||||
|
// dictionaries holds the h2 cross-stream compression dictionaries
|
||||||
|
dictionaries h2Dictionaries
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MuxReader) Shutdown() {
|
||||||
|
done := r.streams.Shutdown()
|
||||||
|
if done == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.sendGoAway(http2.ErrCodeNo)
|
||||||
|
go func() {
|
||||||
|
// close reader side when last stream ends; this will cause the writer to abort
|
||||||
|
<-done
|
||||||
|
r.r.Close()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MuxReader) run(parentLogger *log.Entry) error {
|
||||||
|
logger := parentLogger.WithFields(log.Fields{
|
||||||
|
"subsystem": "mux",
|
||||||
|
"dir": "read",
|
||||||
|
})
|
||||||
|
defer logger.Debug("event loop finished")
|
||||||
|
|
||||||
|
// routine to periodically update bytesRead
|
||||||
|
go func() {
|
||||||
|
tickC := time.Tick(updateFreq)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.abortChan:
|
||||||
|
return
|
||||||
|
case <-tickC:
|
||||||
|
r.updateInBoundBytesChan <- r.bytesRead.Count()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
frame, err := r.f.ReadFrame()
|
||||||
|
if err != nil {
|
||||||
|
switch e := err.(type) {
|
||||||
|
case http2.StreamError:
|
||||||
|
logger.WithError(err).Warn("stream error")
|
||||||
|
r.streamError(e.StreamID, e.Code)
|
||||||
|
case http2.ConnectionError:
|
||||||
|
logger.WithError(err).Warn("connection error")
|
||||||
|
return r.connectionError(err)
|
||||||
|
default:
|
||||||
|
if isConnectionClosedError(err) {
|
||||||
|
if r.streams.Len() == 0 {
|
||||||
|
logger.Debug("shutting down")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
logger.Warn("connection closed unexpectedly")
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
logger.WithError(err).Warn("frame read error")
|
||||||
|
return r.connectionError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.connActive.Signal()
|
||||||
|
logger.WithField("data", frame).Debug("read frame")
|
||||||
|
switch f := frame.(type) {
|
||||||
|
case *http2.DataFrame:
|
||||||
|
err = r.receiveFrameData(f, logger)
|
||||||
|
case *http2.MetaHeadersFrame:
|
||||||
|
err = r.receiveHeaderData(f)
|
||||||
|
case *http2.RSTStreamFrame:
|
||||||
|
streamID := f.Header().StreamID
|
||||||
|
if streamID == 0 {
|
||||||
|
return ErrInvalidStream
|
||||||
|
}
|
||||||
|
r.streams.Delete(streamID)
|
||||||
|
case *http2.PingFrame:
|
||||||
|
r.receivePingData(f)
|
||||||
|
case *http2.GoAwayFrame:
|
||||||
|
err = r.receiveGoAway(f)
|
||||||
|
// The receiver of a flow-controlled frame sends a WINDOW_UPDATE frame as it
|
||||||
|
// consumes data and frees up space in flow-control windows
|
||||||
|
case *http2.WindowUpdateFrame:
|
||||||
|
err = r.updateStreamWindow(f)
|
||||||
|
case *http2.UnknownFrame:
|
||||||
|
switch f.Header().Type {
|
||||||
|
case FrameUseDictionary:
|
||||||
|
err = r.receiveUseDictionary(f)
|
||||||
|
case FrameSetDictionary:
|
||||||
|
err = r.receiveSetDictionary(f)
|
||||||
|
default:
|
||||||
|
err = ErrUnexpectedFrameType
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = ErrUnexpectedFrameType
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
logger.WithField("data", frame).WithError(err).Debug("frame error")
|
||||||
|
return r.connectionError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MuxReader) newMuxedStream(streamID uint32) *MuxedStream {
|
||||||
|
return &MuxedStream{
|
||||||
|
streamID: streamID,
|
||||||
|
readBuffer: NewSharedBuffer(),
|
||||||
|
writeBuffer: &bytes.Buffer{},
|
||||||
|
receiveWindow: r.initialStreamWindow,
|
||||||
|
receiveWindowCurrentMax: r.initialStreamWindow,
|
||||||
|
receiveWindowMax: r.streamWindowMax,
|
||||||
|
sendWindow: r.initialStreamWindow,
|
||||||
|
readyList: r.readyList,
|
||||||
|
dictionaries: r.dictionaries,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getStreamForFrame returns a stream if valid, or an error describing why the stream could not be returned.
|
||||||
|
func (r *MuxReader) getStreamForFrame(frame http2.Frame) (*MuxedStream, error) {
|
||||||
|
sid := frame.Header().StreamID
|
||||||
|
if sid == 0 {
|
||||||
|
return nil, ErrUnexpectedFrameType
|
||||||
|
}
|
||||||
|
if stream, ok := r.streams.Get(sid); ok {
|
||||||
|
return stream, nil
|
||||||
|
}
|
||||||
|
if r.streams.IsLocalStreamID(sid) {
|
||||||
|
// no stream available, but no error
|
||||||
|
return nil, ErrClosedStream
|
||||||
|
}
|
||||||
|
if sid < r.streams.LastPeerStreamID() {
|
||||||
|
// no stream available, stream closed error
|
||||||
|
return nil, ErrClosedStream
|
||||||
|
}
|
||||||
|
return nil, ErrUnknownStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MuxReader) defaultStreamErrorHandler(err error, header http2.FrameHeader) error {
|
||||||
|
if header.Flags.Has(http2.FlagHeadersEndStream) {
|
||||||
|
return nil
|
||||||
|
} else if err == ErrUnknownStream || err == ErrClosedStream {
|
||||||
|
return r.streamError(header.StreamID, http2.ErrCodeStreamClosed)
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receives header frames from a stream. A non-nil error is a connection error.
|
||||||
|
func (r *MuxReader) receiveHeaderData(frame *http2.MetaHeadersFrame) error {
|
||||||
|
var stream *MuxedStream
|
||||||
|
sid := frame.Header().StreamID
|
||||||
|
if sid == 0 {
|
||||||
|
return ErrUnexpectedFrameType
|
||||||
|
}
|
||||||
|
newStream := r.streams.IsPeerStreamID(sid)
|
||||||
|
if newStream {
|
||||||
|
// header request
|
||||||
|
// TODO support trailers (if stream exists)
|
||||||
|
ok, err := r.streams.AcquirePeerID(sid)
|
||||||
|
if !ok {
|
||||||
|
// ignore new streams while shutting down
|
||||||
|
return r.streamError(sid, err)
|
||||||
|
}
|
||||||
|
stream = r.newMuxedStream(sid)
|
||||||
|
// Set stream. Returns false if a stream already existed with that ID or we are shutting down, return false.
|
||||||
|
if !r.streams.Set(stream) {
|
||||||
|
// got HEADERS frame for an existing stream
|
||||||
|
// TODO support trailers
|
||||||
|
return r.streamError(sid, http2.ErrCodeInternal)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// header response
|
||||||
|
var err error
|
||||||
|
if stream, err = r.getStreamForFrame(frame); err != nil {
|
||||||
|
return r.defaultStreamErrorHandler(err, frame.Header())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
headers := make([]Header, 0, len(frame.Fields))
|
||||||
|
for _, header := range frame.Fields {
|
||||||
|
switch header.Name {
|
||||||
|
case ":method":
|
||||||
|
stream.method = header.Value
|
||||||
|
case ":path":
|
||||||
|
u, err := url.Parse(header.Value)
|
||||||
|
if err == nil {
|
||||||
|
stream.path = u.Path
|
||||||
|
}
|
||||||
|
case "accept-encoding":
|
||||||
|
// remove accept-encoding if dictionaries are enabled
|
||||||
|
if r.dictionaries.write != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
headers = append(headers, Header{Name: header.Name, Value: header.Value})
|
||||||
|
}
|
||||||
|
stream.Headers = headers
|
||||||
|
if frame.Header().Flags.Has(http2.FlagHeadersEndStream) {
|
||||||
|
stream.receiveEOF()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if newStream {
|
||||||
|
go r.handleStream(stream)
|
||||||
|
} else {
|
||||||
|
close(stream.responseHeadersReceived)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MuxReader) handleStream(stream *MuxedStream) {
|
||||||
|
defer stream.Close()
|
||||||
|
r.handler.ServeStream(stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receives a data frame from a stream. A non-nil error is a connection error.
|
||||||
|
func (r *MuxReader) receiveFrameData(frame *http2.DataFrame, parentLogger *log.Entry) error {
|
||||||
|
logger := parentLogger.WithField("stream", frame.Header().StreamID)
|
||||||
|
stream, err := r.getStreamForFrame(frame)
|
||||||
|
if err != nil {
|
||||||
|
return r.defaultStreamErrorHandler(err, frame.Header())
|
||||||
|
}
|
||||||
|
data := frame.Data()
|
||||||
|
if len(data) > 0 {
|
||||||
|
n, err := stream.readBuffer.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
return r.streamError(stream.streamID, http2.ErrCodeInternal)
|
||||||
|
}
|
||||||
|
r.bytesRead.IncrementBy(uint64(n))
|
||||||
|
}
|
||||||
|
if frame.Header().Flags.Has(http2.FlagDataEndStream) {
|
||||||
|
if stream.receiveEOF() {
|
||||||
|
r.streams.Delete(stream.streamID)
|
||||||
|
logger.Debug("stream closed")
|
||||||
|
} else {
|
||||||
|
logger.Debug("shutdown receive side")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !stream.consumeReceiveWindow(uint32(len(data))) {
|
||||||
|
return r.streamError(stream.streamID, http2.ErrCodeFlowControl)
|
||||||
|
}
|
||||||
|
r.updateReceiveWindowChan <- stream.getReceiveWindow()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive a PING from the peer. Update RTT and send/receive window metrics if it's an ACK.
|
||||||
|
func (r *MuxReader) receivePingData(frame *http2.PingFrame) {
|
||||||
|
ts := int64(binary.LittleEndian.Uint64(frame.Data[:]))
|
||||||
|
if !frame.IsAck() {
|
||||||
|
r.pingTimestamp.Set(ts)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates the computed values with a new measurement.
|
||||||
|
// outgoingTime is the time that the probe was sent.
|
||||||
|
// We assume that time.Now() is the time we received that probe.
|
||||||
|
r.updateRTTChan <- &roundTripMeasurement{
|
||||||
|
receiveTime: time.Now(),
|
||||||
|
sendTime: time.Unix(0, ts),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive a GOAWAY from the peer. Gracefully shut down our connection.
|
||||||
|
func (r *MuxReader) receiveGoAway(frame *http2.GoAwayFrame) error {
|
||||||
|
r.Shutdown()
|
||||||
|
// Close all streams above the last processed stream
|
||||||
|
lastStream := r.streams.LastLocalStreamID()
|
||||||
|
for i := frame.LastStreamID + 2; i <= lastStream; i++ {
|
||||||
|
if stream, ok := r.streams.Get(i); ok {
|
||||||
|
stream.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive a USE_DICTIONARY from the peer. Setup dictionary for stream.
|
||||||
|
func (r *MuxReader) receiveUseDictionary(frame *http2.UnknownFrame) error {
|
||||||
|
payload := frame.Payload()
|
||||||
|
streamID := frame.StreamID
|
||||||
|
|
||||||
|
// Check frame is formatted properly
|
||||||
|
if len(payload) != 1 {
|
||||||
|
return r.streamError(streamID, http2.ErrCodeProtocol)
|
||||||
|
}
|
||||||
|
|
||||||
|
stream, err := r.getStreamForFrame(frame)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if stream.receivedUseDict == true || stream.dictionaries.read == nil {
|
||||||
|
return r.streamError(streamID, http2.ErrCodeInternal)
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.receivedUseDict = true
|
||||||
|
dictID := payload[0]
|
||||||
|
|
||||||
|
dictReader := stream.dictionaries.read.newReader(stream.readBuffer.(*SharedBuffer), dictID)
|
||||||
|
if dictReader == nil {
|
||||||
|
return r.streamError(streamID, http2.ErrCodeInternal)
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.readBufferLock.Lock()
|
||||||
|
stream.readBuffer = dictReader
|
||||||
|
stream.readBufferLock.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive a SET_DICTIONARY from the peer. Update dictionaries accordingly.
|
||||||
|
func (r *MuxReader) receiveSetDictionary(frame *http2.UnknownFrame) (err error) {
|
||||||
|
|
||||||
|
payload := frame.Payload()
|
||||||
|
flags := frame.Flags
|
||||||
|
|
||||||
|
stream, err := r.getStreamForFrame(frame)
|
||||||
|
if err != nil && err != ErrClosedStream {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reader, ok := stream.readBuffer.(*h2DictionaryReader)
|
||||||
|
if !ok {
|
||||||
|
return r.streamError(frame.StreamID, http2.ErrCodeProtocol)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A SetDictionary frame consists of several
|
||||||
|
// Dictionary-Entries that specify how existing dictionaries
|
||||||
|
// are to be updated using the current stream data
|
||||||
|
// +---------------+---------------+
|
||||||
|
// | Dictionary-Entry (+) ...
|
||||||
|
// +---------------+---------------+
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Each Dictionary-Entry is formatted as follows:
|
||||||
|
// +-------------------------------+
|
||||||
|
// | Dictionary-ID (8) |
|
||||||
|
// +---+---------------------------+
|
||||||
|
// | P | Size (7+) |
|
||||||
|
// +---+---------------------------+
|
||||||
|
// | E?| D?| Truncate? (6+) |
|
||||||
|
// +---+---------------------------+
|
||||||
|
// | Offset? (8+) |
|
||||||
|
// +-------------------------------+
|
||||||
|
|
||||||
|
var size, truncate, offset uint64
|
||||||
|
var p, e, d bool
|
||||||
|
|
||||||
|
// Parse a single Dictionary-Entry
|
||||||
|
if len(payload) < 2 { // Must have at least id and size
|
||||||
|
return MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol}
|
||||||
|
}
|
||||||
|
|
||||||
|
dictID := uint8(payload[0])
|
||||||
|
p = (uint8(payload[1]) >> 7) == 1
|
||||||
|
payload, size, err = http2ReadVarInt(7, payload[1:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags.Has(FlagSetDictionaryAppend) {
|
||||||
|
// Presence of FlagSetDictionaryAppend means we expect e, d and truncate
|
||||||
|
if len(payload) < 1 {
|
||||||
|
return MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol}
|
||||||
|
}
|
||||||
|
e = (uint8(payload[0]) >> 7) == 1
|
||||||
|
d = (uint8((payload[0])>>6) & 1) == 1
|
||||||
|
payload, truncate, err = http2ReadVarInt(6, payload)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags.Has(FlagSetDictionaryOffset) {
|
||||||
|
// Presence of FlagSetDictionaryOffset means we expect offset
|
||||||
|
if len(payload) < 1 {
|
||||||
|
return MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol}
|
||||||
|
}
|
||||||
|
payload, offset, err = http2ReadVarInt(8, payload)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
setdict := setDictRequest{streamID: stream.streamID,
|
||||||
|
dictID: dictID,
|
||||||
|
dictSZ: size,
|
||||||
|
truncate: truncate,
|
||||||
|
offset: offset,
|
||||||
|
P: p,
|
||||||
|
E: e,
|
||||||
|
D: d}
|
||||||
|
|
||||||
|
// Find the right dictionary
|
||||||
|
dict, err := r.dictionaries.read.getDictByID(dictID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register a dictionary update order for the dictionary and reader
|
||||||
|
updateEntry := &dictUpdate{reader: reader, dictionary: dict, s: setdict}
|
||||||
|
dict.queue = append(dict.queue, updateEntry)
|
||||||
|
reader.queue = append(reader.queue, updateEntry)
|
||||||
|
// End of frame
|
||||||
|
if len(payload) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receives header frames from a stream. A non-nil error is a connection error.
|
||||||
|
func (r *MuxReader) updateStreamWindow(frame *http2.WindowUpdateFrame) error {
|
||||||
|
stream, err := r.getStreamForFrame(frame)
|
||||||
|
if err != nil && err != ErrUnknownStream && err != ErrClosedStream {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stream == nil {
|
||||||
|
// ignore window updates on closed streams
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
stream.replenishSendWindow(frame.Increment)
|
||||||
|
r.updateSendWindowChan <- stream.getSendWindow()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Raise a stream processing error, closing the stream. Runs on the write thread.
|
||||||
|
func (r *MuxReader) streamError(streamID uint32, e http2.ErrCode) error {
|
||||||
|
r.streamErrors.RaiseError(streamID, e)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MuxReader) connectionError(err error) error {
|
||||||
|
http2Code := http2.ErrCodeInternal
|
||||||
|
switch e := err.(type) {
|
||||||
|
case http2.ConnectionError:
|
||||||
|
http2Code = http2.ErrCode(e)
|
||||||
|
case MuxerProtocolError:
|
||||||
|
http2Code = e.h2code
|
||||||
|
}
|
||||||
|
r.sendGoAway(http2Code)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instruct the writer to send a GOAWAY message if possible. This may fail in
|
||||||
|
// the case where an existing GOAWAY message is in flight or the writer event
|
||||||
|
// loop already ended.
|
||||||
|
func (r *MuxReader) sendGoAway(errCode http2.ErrCode) {
|
||||||
|
select {
|
||||||
|
case r.goAwayChan <- errCode:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,287 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"golang.org/x/net/http2/hpack"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MuxWriter struct {
|
||||||
|
// f is used to write HTTP2 frames.
|
||||||
|
f *http2.Framer
|
||||||
|
// streams tracks currently-open streams.
|
||||||
|
streams *activeStreamMap
|
||||||
|
// streamErrors receives stream errors raised by the MuxReader.
|
||||||
|
streamErrors *StreamErrorMap
|
||||||
|
// readyStreamChan is used to multiplex writable streams onto the single connection.
|
||||||
|
// When a stream becomes writable its ID is sent on this channel.
|
||||||
|
readyStreamChan <-chan uint32
|
||||||
|
// newStreamChan is used to create new streams with a given set of headers.
|
||||||
|
newStreamChan <-chan MuxedStreamRequest
|
||||||
|
// goAwayChan is used to send a single GOAWAY message to the peer. The element received
|
||||||
|
// is the HTTP/2 error code to send.
|
||||||
|
goAwayChan <-chan http2.ErrCode
|
||||||
|
// abortChan is used when shutting down ungracefully. When this becomes readable, all activity should stop.
|
||||||
|
abortChan <-chan struct{}
|
||||||
|
// pingTimestamp is an atomic value containing the latest received ping timestamp.
|
||||||
|
pingTimestamp *PingTimestamp
|
||||||
|
// A timer used to measure idle connection time. Reset after sending data.
|
||||||
|
idleTimer *IdleTimer
|
||||||
|
// connActiveChan receives a signal that the connection received some (read) activity.
|
||||||
|
connActiveChan <-chan struct{}
|
||||||
|
// Maximum size of all frames that can be sent on this connection.
|
||||||
|
maxFrameSize uint32
|
||||||
|
// headerEncoder is the stateful header encoder for this connection
|
||||||
|
headerEncoder *hpack.Encoder
|
||||||
|
// headerBuffer is the temporary buffer used by headerEncoder.
|
||||||
|
headerBuffer bytes.Buffer
|
||||||
|
// updateReceiveWindowChan is the channel to update receiveWindow size to muxerMetricsUpdater
|
||||||
|
updateReceiveWindowChan chan<- uint32
|
||||||
|
// updateSendWindowChan is the channel to update sendWindow size to muxerMetricsUpdater
|
||||||
|
updateSendWindowChan chan<- uint32
|
||||||
|
// bytesWrote is the amount of bytes wrote to data frame since the last time we send bytes wrote to metrics
|
||||||
|
bytesWrote *AtomicCounter
|
||||||
|
// updateOutBoundBytesChan is the channel to send bytesWrote to muxerMetricsUpdater
|
||||||
|
updateOutBoundBytesChan chan<- uint64
|
||||||
|
|
||||||
|
useDictChan <-chan useDictRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
type MuxedStreamRequest struct {
|
||||||
|
stream *MuxedStream
|
||||||
|
body io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MuxedStreamRequest) flushBody() {
|
||||||
|
io.Copy(r.stream, r.body)
|
||||||
|
r.stream.CloseWrite()
|
||||||
|
}
|
||||||
|
|
||||||
|
func tsToPingData(ts int64) [8]byte {
|
||||||
|
pingData := [8]byte{}
|
||||||
|
binary.LittleEndian.PutUint64(pingData[:], uint64(ts))
|
||||||
|
return pingData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *MuxWriter) run(parentLogger *log.Entry) error {
|
||||||
|
logger := parentLogger.WithFields(log.Fields{
|
||||||
|
"subsystem": "mux",
|
||||||
|
"dir": "write",
|
||||||
|
})
|
||||||
|
defer logger.Debug("event loop finished")
|
||||||
|
|
||||||
|
// routine to periodically communicate bytesWrote
|
||||||
|
go func() {
|
||||||
|
tickC := time.Tick(updateFreq)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-w.abortChan:
|
||||||
|
return
|
||||||
|
case <-tickC:
|
||||||
|
w.updateOutBoundBytesChan <- w.bytesWrote.Count()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-w.abortChan:
|
||||||
|
logger.Debug("aborting writer thread")
|
||||||
|
return nil
|
||||||
|
case errCode := <-w.goAwayChan:
|
||||||
|
logger.Debug("sending GOAWAY code ", errCode)
|
||||||
|
err := w.f.WriteGoAway(w.streams.LastPeerStreamID(), errCode, []byte{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.idleTimer.MarkActive()
|
||||||
|
case <-w.pingTimestamp.GetUpdateChan():
|
||||||
|
logger.Debug("sending PING ACK")
|
||||||
|
err := w.f.WritePing(true, tsToPingData(w.pingTimestamp.Get()))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.idleTimer.MarkActive()
|
||||||
|
case <-w.idleTimer.C:
|
||||||
|
if !w.idleTimer.Retry() {
|
||||||
|
return ErrConnectionDropped
|
||||||
|
}
|
||||||
|
logger.Debug("sending PING")
|
||||||
|
err := w.f.WritePing(false, tsToPingData(time.Now().UnixNano()))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.idleTimer.ResetTimer()
|
||||||
|
case <-w.connActiveChan:
|
||||||
|
w.idleTimer.MarkActive()
|
||||||
|
case <-w.streamErrors.GetSignalChan():
|
||||||
|
for streamID, errCode := range w.streamErrors.GetErrors() {
|
||||||
|
logger.WithField("stream", streamID).WithField("code", errCode).Debug("resetting stream")
|
||||||
|
err := w.f.WriteRSTStream(streamID, errCode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.idleTimer.MarkActive()
|
||||||
|
case streamRequest := <-w.newStreamChan:
|
||||||
|
streamID := w.streams.AcquireLocalID()
|
||||||
|
streamRequest.stream.streamID = streamID
|
||||||
|
if !w.streams.Set(streamRequest.stream) {
|
||||||
|
// Race between OpenStream and Shutdown, and Shutdown won. Let Shutdown (and the eventual abort) take
|
||||||
|
// care of this stream. Ideally we'd pass the error directly to the stream object somehow so the
|
||||||
|
// caller can be unblocked sooner, but the value of that optimisation is minimal for most of the
|
||||||
|
// reasons why you'd call Shutdown anyway.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if streamRequest.body != nil {
|
||||||
|
go streamRequest.flushBody()
|
||||||
|
}
|
||||||
|
streamLogger := logger.WithField("stream", streamID)
|
||||||
|
err := w.writeStreamData(streamRequest.stream, streamLogger)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.idleTimer.MarkActive()
|
||||||
|
case streamID := <-w.readyStreamChan:
|
||||||
|
streamLogger := logger.WithField("stream", streamID)
|
||||||
|
stream, ok := w.streams.Get(streamID)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err := w.writeStreamData(stream, streamLogger)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.idleTimer.MarkActive()
|
||||||
|
case useDict := <-w.useDictChan:
|
||||||
|
err := w.writeUseDictionary(useDict)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Warn("error writing use dictionary")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.idleTimer.MarkActive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *MuxWriter) writeStreamData(stream *MuxedStream, logger *log.Entry) error {
|
||||||
|
logger.Debug("writable")
|
||||||
|
chunk := stream.getChunk()
|
||||||
|
w.updateReceiveWindowChan <- stream.getReceiveWindow()
|
||||||
|
w.updateSendWindowChan <- stream.getSendWindow()
|
||||||
|
if chunk.sendHeadersFrame() {
|
||||||
|
err := w.writeHeaders(chunk.streamID, chunk.headers)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Warn("error writing headers")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Debug("output headers")
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunk.sendWindowUpdateFrame() {
|
||||||
|
// Send a WINDOW_UPDATE frame to update our receive window.
|
||||||
|
// If the Stream ID is zero, the window update applies to the connection as a whole
|
||||||
|
// RFC7540 section-6.9.1 "A receiver that receives a flow-controlled frame MUST
|
||||||
|
// always account for its contribution against the connection flow-control
|
||||||
|
// window, unless the receiver treats this as a connection error"
|
||||||
|
err := w.f.WriteWindowUpdate(chunk.streamID, chunk.windowUpdate)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Warn("error writing window update")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Debugf("increment receive window by %d", chunk.windowUpdate)
|
||||||
|
}
|
||||||
|
|
||||||
|
for chunk.sendDataFrame() {
|
||||||
|
payload, sentEOF := chunk.nextDataFrame(int(w.maxFrameSize))
|
||||||
|
err := w.f.WriteData(chunk.streamID, sentEOF, payload)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Warn("error writing data")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// update the amount of data wrote
|
||||||
|
w.bytesWrote.IncrementBy(uint64(len(payload)))
|
||||||
|
logger.WithField("len", len(payload)).Debug("output data")
|
||||||
|
|
||||||
|
if sentEOF {
|
||||||
|
if stream.readBuffer.Closed() {
|
||||||
|
// transition into closed state
|
||||||
|
if !stream.gotReceiveEOF() {
|
||||||
|
// the peer may send data that we no longer want to receive. Force them into the
|
||||||
|
// closed state.
|
||||||
|
logger.Debug("resetting stream")
|
||||||
|
w.f.WriteRSTStream(chunk.streamID, http2.ErrCodeNo)
|
||||||
|
} else {
|
||||||
|
// Half-open stream transitioned into closed
|
||||||
|
logger.Debug("closing stream")
|
||||||
|
}
|
||||||
|
w.streams.Delete(chunk.streamID)
|
||||||
|
} else {
|
||||||
|
logger.Debug("closing stream write side")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *MuxWriter) encodeHeaders(headers []Header) ([]byte, error) {
|
||||||
|
w.headerBuffer.Reset()
|
||||||
|
for _, header := range headers {
|
||||||
|
err := w.headerEncoder.WriteField(hpack.HeaderField{
|
||||||
|
Name: header.Name,
|
||||||
|
Value: header.Value,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return w.headerBuffer.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeHeaders writes a block of encoded headers, splitting it into multiple frames if necessary.
|
||||||
|
func (w *MuxWriter) writeHeaders(streamID uint32, headers []Header) error {
|
||||||
|
encodedHeaders, err := w.encodeHeaders(headers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
blockSize := int(w.maxFrameSize)
|
||||||
|
endHeaders := len(encodedHeaders) == 0
|
||||||
|
for !endHeaders && err == nil {
|
||||||
|
blockFragment := encodedHeaders
|
||||||
|
if len(encodedHeaders) > blockSize {
|
||||||
|
blockFragment = blockFragment[:blockSize]
|
||||||
|
encodedHeaders = encodedHeaders[blockSize:]
|
||||||
|
// Send CONTINUATION frame if the headers can't be fit into 1 frame
|
||||||
|
err = w.f.WriteContinuation(streamID, endHeaders, blockFragment)
|
||||||
|
} else {
|
||||||
|
endHeaders = true
|
||||||
|
err = w.f.WriteHeaders(http2.HeadersFrameParam{
|
||||||
|
StreamID: streamID,
|
||||||
|
EndHeaders: endHeaders,
|
||||||
|
BlockFragment: blockFragment,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *MuxWriter) writeUseDictionary(dictRequest useDictRequest) error {
|
||||||
|
err := w.f.WriteRawFrame(FrameUseDictionary, 0, dictRequest.streamID, []byte{byte(dictRequest.dictID)})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
payload := make([]byte, 0, 64)
|
||||||
|
for _, set := range dictRequest.setDict {
|
||||||
|
payload = append(payload, byte(set.dictID))
|
||||||
|
payload = appendVarInt(payload, 7, uint64(set.dictSZ))
|
||||||
|
payload = append(payload, 0x80) // E = 1, D = 0, Truncate = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.f.WriteRawFrame(FrameSetDictionary, FlagSetDictionaryAppend, dictRequest.streamID, payload)
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,140 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
// ReadyList multiplexes several event signals onto a single channel.
|
||||||
|
type ReadyList struct {
|
||||||
|
signalC chan uint32
|
||||||
|
waitC chan uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReadyList() *ReadyList {
|
||||||
|
rl := &ReadyList{
|
||||||
|
signalC: make(chan uint32),
|
||||||
|
waitC: make(chan uint32),
|
||||||
|
}
|
||||||
|
go rl.run()
|
||||||
|
return rl
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID is the stream ID
|
||||||
|
func (r *ReadyList) Signal(ID uint32) {
|
||||||
|
r.signalC <- ID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadyList) ReadyChannel() <-chan uint32 {
|
||||||
|
return r.waitC
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadyList) Close() {
|
||||||
|
close(r.signalC)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadyList) run() {
|
||||||
|
defer close(r.waitC)
|
||||||
|
var queue readyDescriptorQueue
|
||||||
|
var firstReady *readyDescriptor
|
||||||
|
activeDescriptors := newReadyDescriptorMap()
|
||||||
|
for {
|
||||||
|
if firstReady == nil {
|
||||||
|
// Wait for first ready descriptor
|
||||||
|
i, ok := <-r.signalC
|
||||||
|
if !ok {
|
||||||
|
// closed
|
||||||
|
return
|
||||||
|
}
|
||||||
|
firstReady = activeDescriptors.SetIfMissing(i)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case r.waitC <- firstReady.ID:
|
||||||
|
activeDescriptors.Delete(firstReady.ID)
|
||||||
|
firstReady = queue.Dequeue()
|
||||||
|
case i, ok := <-r.signalC:
|
||||||
|
if !ok {
|
||||||
|
// closed
|
||||||
|
return
|
||||||
|
}
|
||||||
|
newReady := activeDescriptors.SetIfMissing(i)
|
||||||
|
if newReady != nil {
|
||||||
|
// key doesn't exist
|
||||||
|
queue.Enqueue(newReady)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type readyDescriptor struct {
|
||||||
|
ID uint32
|
||||||
|
Next *readyDescriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
// readyDescriptorQueue is a queue of readyDescriptors in the form of a singly-linked list.
|
||||||
|
// The nil readyDescriptorQueue is an empty queue ready for use.
|
||||||
|
type readyDescriptorQueue struct {
|
||||||
|
Head *readyDescriptor
|
||||||
|
Tail *readyDescriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *readyDescriptorQueue) Empty() bool {
|
||||||
|
return q.Head == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *readyDescriptorQueue) Enqueue(x *readyDescriptor) {
|
||||||
|
if x.Next != nil {
|
||||||
|
panic("enqueued already queued item")
|
||||||
|
}
|
||||||
|
if q.Empty() {
|
||||||
|
q.Head = x
|
||||||
|
q.Tail = x
|
||||||
|
} else {
|
||||||
|
q.Tail.Next = x
|
||||||
|
q.Tail = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dequeue returns the first readyDescriptor in the queue, or nil if empty.
|
||||||
|
func (q *readyDescriptorQueue) Dequeue() *readyDescriptor {
|
||||||
|
if q.Empty() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
x := q.Head
|
||||||
|
q.Head = x.Next
|
||||||
|
x.Next = nil
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// readyDescriptorQueue is a map of readyDescriptors keyed by ID.
|
||||||
|
// It maintains a free list of deleted ready descriptors.
|
||||||
|
type readyDescriptorMap struct {
|
||||||
|
descriptors map[uint32]*readyDescriptor
|
||||||
|
free []*readyDescriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
func newReadyDescriptorMap() *readyDescriptorMap {
|
||||||
|
return &readyDescriptorMap{descriptors: make(map[uint32]*readyDescriptor)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create or reuse a readyDescriptor if the stream is not in the queue.
|
||||||
|
// This avoid stream starvation caused by a single high-bandwidth stream monopolising the writer goroutine
|
||||||
|
func (m *readyDescriptorMap) SetIfMissing(key uint32) *readyDescriptor {
|
||||||
|
if _, ok := m.descriptors[key]; ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var newDescriptor *readyDescriptor
|
||||||
|
if len(m.free) > 0 {
|
||||||
|
// reuse deleted ready descriptors
|
||||||
|
newDescriptor = m.free[len(m.free)-1]
|
||||||
|
m.free = m.free[:len(m.free)-1]
|
||||||
|
} else {
|
||||||
|
newDescriptor = &readyDescriptor{}
|
||||||
|
}
|
||||||
|
newDescriptor.ID = key
|
||||||
|
m.descriptors[key] = newDescriptor
|
||||||
|
return newDescriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *readyDescriptorMap) Delete(key uint32) {
|
||||||
|
if descriptor, ok := m.descriptors[key]; ok {
|
||||||
|
m.free = append(m.free, descriptor)
|
||||||
|
delete(m.descriptors, key)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,115 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReadyList(t *testing.T) {
|
||||||
|
rl := NewReadyList()
|
||||||
|
c := rl.ReadyChannel()
|
||||||
|
// helper functions
|
||||||
|
assertEmpty := func() {
|
||||||
|
select {
|
||||||
|
case <-c:
|
||||||
|
t.Fatalf("Spurious wakeup")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
receiveWithTimeout := func() uint32 {
|
||||||
|
select {
|
||||||
|
case i := <-c:
|
||||||
|
return i
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
|
t.Fatalf("Timeout")
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// no signals, receive should fail
|
||||||
|
assertEmpty()
|
||||||
|
rl.Signal(0)
|
||||||
|
if receiveWithTimeout() != 0 {
|
||||||
|
t.Fatalf("Received wrong ID of signalled event")
|
||||||
|
}
|
||||||
|
// no new signals, receive should fail
|
||||||
|
assertEmpty()
|
||||||
|
// Signals should not block;
|
||||||
|
// Duplicate unhandled signals should not cause multiple wakeups
|
||||||
|
signalled := [5]bool{}
|
||||||
|
for i := range signalled {
|
||||||
|
rl.Signal(uint32(i))
|
||||||
|
rl.Signal(uint32(i))
|
||||||
|
}
|
||||||
|
// All signals should be received once (in any order)
|
||||||
|
for range signalled {
|
||||||
|
i := receiveWithTimeout()
|
||||||
|
if signalled[i] {
|
||||||
|
t.Fatalf("Received signal %d more than once", i)
|
||||||
|
}
|
||||||
|
signalled[i] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadyDescriptorQueue(t *testing.T) {
|
||||||
|
var queue readyDescriptorQueue
|
||||||
|
items := [4]readyDescriptor{}
|
||||||
|
for i := range items {
|
||||||
|
items[i].ID = uint32(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !queue.Empty() {
|
||||||
|
t.Fatalf("nil queue should be empty")
|
||||||
|
}
|
||||||
|
queue.Enqueue(&items[3])
|
||||||
|
queue.Enqueue(&items[1])
|
||||||
|
queue.Enqueue(&items[0])
|
||||||
|
queue.Enqueue(&items[2])
|
||||||
|
if queue.Empty() {
|
||||||
|
t.Fatalf("Empty should be false after enqueue")
|
||||||
|
}
|
||||||
|
i := queue.Dequeue().ID
|
||||||
|
if i != 3 {
|
||||||
|
t.Fatalf("item 3 should have been dequeued, got %d instead", i)
|
||||||
|
}
|
||||||
|
i = queue.Dequeue().ID
|
||||||
|
if i != 1 {
|
||||||
|
t.Fatalf("item 1 should have been dequeued, got %d instead", i)
|
||||||
|
}
|
||||||
|
i = queue.Dequeue().ID
|
||||||
|
if i != 0 {
|
||||||
|
t.Fatalf("item 0 should have been dequeued, got %d instead", i)
|
||||||
|
}
|
||||||
|
i = queue.Dequeue().ID
|
||||||
|
if i != 2 {
|
||||||
|
t.Fatalf("item 2 should have been dequeued, got %d instead", i)
|
||||||
|
}
|
||||||
|
if !queue.Empty() {
|
||||||
|
t.Fatal("queue should be empty after dequeuing all items")
|
||||||
|
}
|
||||||
|
if queue.Dequeue() != nil {
|
||||||
|
t.Fatal("dequeue on empty queue should return nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadyDescriptorMap(t *testing.T) {
|
||||||
|
m := newReadyDescriptorMap()
|
||||||
|
m.Delete(42)
|
||||||
|
// (delete of missing key should be a noop)
|
||||||
|
x := m.SetIfMissing(42)
|
||||||
|
if x == nil {
|
||||||
|
t.Fatal("SetIfMissing for new key returned nil")
|
||||||
|
}
|
||||||
|
if m.SetIfMissing(42) != nil {
|
||||||
|
t.Fatal("SetIfMissing for existing key returned non-nil")
|
||||||
|
}
|
||||||
|
// this delete has effect
|
||||||
|
m.Delete(42)
|
||||||
|
// the next set should reuse the old object
|
||||||
|
y := m.SetIfMissing(666)
|
||||||
|
if y == nil {
|
||||||
|
t.Fatal("SetIfMissing for new key returned nil")
|
||||||
|
}
|
||||||
|
if x != y {
|
||||||
|
t.Fatal("SetIfMissing didn't reuse freed object")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PingTimestamp is an atomic interface around ping timestamping and signalling.
|
||||||
|
type PingTimestamp struct {
|
||||||
|
ts int64
|
||||||
|
signal Signal
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPingTimestamp() *PingTimestamp {
|
||||||
|
return &PingTimestamp{signal: NewSignal()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt *PingTimestamp) Set(v int64) {
|
||||||
|
if atomic.SwapInt64(&pt.ts, v) != 0 {
|
||||||
|
pt.signal.Signal()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt *PingTimestamp) Get() int64 {
|
||||||
|
return atomic.SwapInt64(&pt.ts, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt *PingTimestamp) GetUpdateChan() <-chan struct{} {
|
||||||
|
return pt.signal.WaitChannel()
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
!function(){"use strict";function a(a){var b,c=[];if(!a)return"";for(b in a)a.hasOwnProperty(b)&&(a[b]||a[b]===!1)&&c.push(b+"="+encodeURIComponent(a[b]));return c.length?"?"+c.join("&"):""}var b,c,d,e,f="https://cloudflare.ghost.io/ghost/api/v0.1/";d={api:function(){var d,e=Array.prototype.slice.call(arguments),g=f;return d=e.pop(),d&&"object"!=typeof d&&(e.push(d),d={}),d=d||{},d.client_id=b,d.client_secret=c,e.length&&e.forEach(function(a){g+=a.replace(/^\/|\/$/g,"")+"/"}),g+a(d)}},e=function(a){b=a.clientId?a.clientId:"",c=a.clientSecret?a.clientSecret:"",f=a.url?a.url:f.match(/{\{api-url}}/)?"":f},"undefined"!=typeof window&&(window.ghost=window.ghost||{},window.ghost.url=d,window.ghost.init=e),"undefined"!=typeof module&&(module.exports={url:d,init:e})}();
|
|
@ -0,0 +1,537 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8" />
|
||||||
|
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
||||||
|
|
||||||
|
<title>Cloudflare Blog</title>
|
||||||
|
<meta name="description" content="" />
|
||||||
|
<meta name="HandheldFriendly" content="True">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<meta name="msvalidate.01" content="CF295E1604697F9CAD18B5A232E871F6" />
|
||||||
|
|
||||||
|
<link rel="shortcut icon" href="/assets/images/favicon.ico?v=b6cf3f99a6">
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="57x57" href="/assets/images/apple-touch-icon-57x57-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="/assets/images/apple-touch-icon-72x72-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="/assets/images/apple-touch-icon-114x114-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="144x144" href="/assets/images/apple-touch-icon-144x144-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
|
||||||
|
<link rel="stylesheet" type="text/css" href="/assets/css/screen.css?v=b6cf3f99a6" />
|
||||||
|
<!--[if lt IE 9]><link rel="stylesheet" type="text/css" href="/assets/css/ie.css?v=b6cf3f99a6" /><![endif]-->
|
||||||
|
|
||||||
|
<!--<link href="http://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,400,700,300,600" rel="stylesheet" type="text/css">-->
|
||||||
|
|
||||||
|
<script>(function(G,o,O,g,l){G.GoogleAnalyticsObject=O;G[O]||(G[O]=function(){(G[O].q=G[O].q||[]).push(arguments)});G[O].l=+new Date;g=o.createElement('script'),l=o.scripts[0];g.src='//www.google-analytics.com/analytics.js';l.parentNode.insertBefore(g,l)}(this,document,'ga'));ga('create','UA-10218544-12', 'auto');ga('send','pageview')</script>
|
||||||
|
|
||||||
|
<link rel="canonical" href="http://blog.cloudflare.com/" />
|
||||||
|
<meta name="referrer" content="no-referrer-when-downgrade" />
|
||||||
|
<link rel="next" href="https://blog.cloudflare.com/page/2/" />
|
||||||
|
|
||||||
|
<meta property="og:site_name" content="Cloudflare Blog" />
|
||||||
|
<meta property="og:type" content="website" />
|
||||||
|
<meta property="og:title" content="Cloudflare Blog" />
|
||||||
|
<meta property="og:url" content="http://blog.cloudflare.com/" />
|
||||||
|
<meta property="og:image" content="http://blog.cloudflare.com/content/images/2016/09/logo-for-blog_thumb-1.png" />
|
||||||
|
<meta property="article:publisher" content="https://www.facebook.com/Cloudflare" />
|
||||||
|
<meta name="twitter:card" content="summary_large_image" />
|
||||||
|
<meta name="twitter:title" content="Cloudflare Blog" />
|
||||||
|
<meta name="twitter:url" content="http://blog.cloudflare.com/" />
|
||||||
|
<meta name="twitter:image" content="http://blog.cloudflare.com/content/images/2016/09/logo-for-blog_thumb-1.png" />
|
||||||
|
<meta name="twitter:site" content="@cloudflare" />
|
||||||
|
<meta property="og:image:width" content="189" />
|
||||||
|
<meta property="og:image:height" content="47" />
|
||||||
|
|
||||||
|
<script type="application/ld+json">
|
||||||
|
{
|
||||||
|
"@context": "https://schema.org",
|
||||||
|
"@type": "Website",
|
||||||
|
"publisher": {
|
||||||
|
"@type": "Organization",
|
||||||
|
"name": "Cloudflare Blog",
|
||||||
|
"logo": {
|
||||||
|
"@type": "ImageObject",
|
||||||
|
"url": "http://blog.cloudflare.com/content/images/2016/09/logo-for-blog_thumb.png",
|
||||||
|
"width": 189,
|
||||||
|
"height": 47
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"url": "https://blog.cloudflare.com/",
|
||||||
|
"image": {
|
||||||
|
"@type": "ImageObject",
|
||||||
|
"url": "http://blog.cloudflare.com/content/images/2016/09/logo-for-blog_thumb-1.png",
|
||||||
|
"width": 189,
|
||||||
|
"height": 47
|
||||||
|
},
|
||||||
|
"mainEntityOfPage": {
|
||||||
|
"@type": "WebPage",
|
||||||
|
"@id": "http://blog.cloudflare.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<script type="text/javascript" src="/shared/ghost-url.min.js?v=b6cf3f99a6"></script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
ghost.init({
|
||||||
|
clientId: "ghost-frontend",
|
||||||
|
clientSecret: "cf0df60d1ab4"
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
<meta name="generator" content="Ghost 0.11" />
|
||||||
|
<link rel="alternate" type="application/rss+xml" title="Cloudflare Blog" href="https://blog.cloudflare.com/rss/" />
|
||||||
|
<meta name="msvalidate.01" content="CF295E1604697F9CAD18B5A232E871F6" />
|
||||||
|
<meta class="swiftype" name="language" data-type="string" content="en" />
|
||||||
|
<script src="https://s3-us-west-1.amazonaws.com/cf-ghost-assets-hotfix/js/index.js"></script>
|
||||||
|
<script type="text/javascript" src="//cdn.bizible.com/scripts/bizible.js" async=""></script>
|
||||||
|
<script>
|
||||||
|
var trackRecruitingLink = function(role, url) {
|
||||||
|
ga('send', 'event', 'recruiting', 'jobscore-click', role, {
|
||||||
|
'transport': 'beacon',
|
||||||
|
'hitCallback': function(){document.location = url;}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
(function() {
|
||||||
|
var didInit = false;
|
||||||
|
function initMunchkin() {
|
||||||
|
if(didInit === false) {
|
||||||
|
didInit = true;
|
||||||
|
Munchkin.init('713-XSC-918');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var s = document.createElement('script');
|
||||||
|
s.type = 'text/javascript';
|
||||||
|
s.async = true;
|
||||||
|
s.src = '//munchkin.marketo.net/munchkin.js';
|
||||||
|
s.onreadystatechange = function() {
|
||||||
|
if (this.readyState == 'complete' || this.readyState == 'loaded') {
|
||||||
|
initMunchkin();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
s.onload = initMunchkin;
|
||||||
|
document.getElementsByTagName('head')[0].appendChild(s);
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
<script>
|
||||||
|
var HTMLAttrToAdd = document.querySelector("html");
|
||||||
|
HTMLAttrToAdd.setAttribute("lang", "en");
|
||||||
|
</script>
|
||||||
|
<style>
|
||||||
|
table {
|
||||||
|
background-color: transparent;
|
||||||
|
}
|
||||||
|
td {
|
||||||
|
padding: 5px 1em;
|
||||||
|
}
|
||||||
|
pre {
|
||||||
|
max-height: 500px;
|
||||||
|
overflow-y: scroll;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
<link href="https://s3-us-west-1.amazonaws.com/cf-ghost-assets-hotfix/css/screen.css" rel="stylesheet">
|
||||||
|
<link href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.1/themes/prism.min.css" rel="stylesheet">
|
||||||
|
|
||||||
|
<style>
|
||||||
|
.st-default-search-input {
|
||||||
|
font-family: Helvetica, Arial, "Lucida Grande", sans-serif;
|
||||||
|
font-size: 14px;
|
||||||
|
line-height: 16px;
|
||||||
|
font-weight: 400;
|
||||||
|
-moz-transition: opacity 0.2s;
|
||||||
|
-o-transition: opacity 0.2s;
|
||||||
|
-webkit-transition: opacity 0.2s;
|
||||||
|
transition: opacity 0.2s;
|
||||||
|
display: inline-block;
|
||||||
|
width: 190px;
|
||||||
|
height: 16px;
|
||||||
|
padding: 7px 11px 7px 28px;
|
||||||
|
border: 1px solid rgba(0, 0, 0, 0.25);
|
||||||
|
color: #444;
|
||||||
|
-moz-box-sizing: content-box;
|
||||||
|
box-sizing: content-box;
|
||||||
|
-moz-border-radius: 5px;
|
||||||
|
-webkit-border-radius: 5px;
|
||||||
|
border-radius: 5px;
|
||||||
|
background: #fff 8px 8px no-repeat url("data:image/png;base64, iVBORw0KGgoAAAANSUhEUgAAAA0AAAANCAYAAABy6%2BR8AAAACXBIWXMAAAsTAAALEwEAmpwYAAAAIGNIUk0AAG11AABzoAAA%2FN0AAINkAABw6AAA7GgAADA%2BAAAQkOTsmeoAAAESSURBVHjajNCxS9VRGMbxz71E4OwgoXPQxVEpXCI47%2BZqGP0LCoJO7UVD3QZzb3SwcHB7F3Uw3Zpd%2FAPCcJKG7Dj4u%2FK7Pwp94HDg5Xyf5z1Pr9YKImKANTzFXxzjU2ae6qhXaxURr%2FAFl9hHDy%2FwEK8z89sYVEp5gh84wMvMvGiSJ%2FEV85jNzLMR1McqfmN5BEBmnmMJFSvtpH7jdJiZv7q7Z%2BZPfMdcF6rN%2FT%2F1m2LGBkd4HhFT3dcRMY2FpskxaLNpayciHrWAGeziD7b%2BVfkithuTk8bkGa4wgWFmbrSTZOYeBvjc%2BucQj%2FEe6xHx4Taq1nrnKaW8K6XUUsrHWuvNevdRRLzFGwzvDbXAB9cDAHvhedDruuxSAAAAAElFTkSuQmCC")
|
||||||
|
}
|
||||||
|
|
||||||
|
.st-ui-close-button {
|
||||||
|
-moz-transition: none;
|
||||||
|
-o-transition: none;
|
||||||
|
-webkit-transition: none;
|
||||||
|
transition: none
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body class="home-template">
|
||||||
|
<div id="fb-root"></div>
|
||||||
|
<header id="header" class="header">
|
||||||
|
<div class="wrapper">
|
||||||
|
<a href="https://www.cloudflare.com" class="logo logo-header">Cloudflare</a>
|
||||||
|
<nav id="main-menu" class="header-navigation navigation" role="navigation">
|
||||||
|
<ul class="menu menu-header">
|
||||||
|
<li><a href="https://blog.cloudflare.com/">Blog home</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/overview" tabindex="1">What we do</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/support" tabindex="9">Support</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/community" tabindex="9">Community</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/login" tabindex="10">Login</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/sign-up" class="btn btn-success" tabindex="11">Sign up</a></li>
|
||||||
|
</ul>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<div class="wrapper reverse-sidebar">
|
||||||
|
<section class="primary-content" role="main">
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<article class="post tag-google-cloud tag-cloud-computing tag-internet-summit">
|
||||||
|
<header class="post-header">
|
||||||
|
<h2 class="title"><a href="/living-in-a-multi-cloud-world/">Living In A Multi-Cloud World</a></h2>
|
||||||
|
<div class="meta">
|
||||||
|
Published on <time class="meta-date" datetime="November 21st, 2017 4:30PM">November 21st, 2017 4:30PM</time>
|
||||||
|
by <a href="/author/sergi/">Sergi Isasi</a>.
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<div class="post-excerpt">
|
||||||
|
<p>A few months ago at Cloudflare’s Internet Summit, we hosted a discussion on A Cloud Without Handcuffs with Joe Beda, one of the creators of Kubernetes, and Brandon Phillips, the co-founder of CoreOS. The conversation touched on multiple areas, but it’s clear that more and more companies are recognizing the need to have some strategy around hosting their applications on multiple cloud providers. Earlier this year,…</p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<a href="/living-in-a-multi-cloud-world/" class="more">Read more » </a><br>
|
||||||
|
<small>
|
||||||
|
<span class="post-meta">
|
||||||
|
<a href="/living-in-a-multi-cloud-world/#disqus_thread">Comments</a> | tagged with <a href="/tag/google-cloud/">Google Cloud</a>, <a href="/tag/cloud-computing/">Cloud Computing</a>, <a href="/tag/internet-summit/">Internet Summit</a>
|
||||||
|
</span>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<article class="post tag-legal tag-jengo tag-patents">
|
||||||
|
<header class="post-header">
|
||||||
|
<h2 class="title"><a href="/supreme-court-wanders-into-patent-troll-fight/">The Supreme Court Wanders into the Patent Troll Fight</a></h2>
|
||||||
|
<div class="meta">
|
||||||
|
Published on <time class="meta-date" datetime="November 20th, 2017 6:18PM">November 20th, 2017 6:18PM</time>
|
||||||
|
by <a href="/author/edo-royker/">Edo Royker</a>.
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<div class="post-excerpt">
|
||||||
|
<p>Next Monday, the US Supreme Court will hear oral arguments in Oil States Energy Services, LLC vs. Greene’s Energy Group, LLC, which is a case to determine whether the Inter Partes Review (IPR) administrative process at the US Patent and Trademark Office (USPTO) used to determine the validity of patents is constitutional. The constitutionality of the IPR process is one of the biggest legal issues facing innovative…</p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<a href="/supreme-court-wanders-into-patent-troll-fight/" class="more">Read more » </a><br>
|
||||||
|
<small>
|
||||||
|
<span class="post-meta">
|
||||||
|
<a href="/supreme-court-wanders-into-patent-troll-fight/#disqus_thread">Comments</a> | tagged with <a href="/tag/legal/">Legal</a>, <a href="/tag/jengo/">Jengo</a>, <a href="/tag/patents/">Patents</a>
|
||||||
|
</span>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<article class="post tag-cloudflare-apps tag-developers tag-user-engagement">
|
||||||
|
<header class="post-header">
|
||||||
|
<h2 class="title"><a href="/7cloudflareappsengagement/">7 Cloudflare Apps Which Increase User Engagement on Your Site</a></h2>
|
||||||
|
<div class="meta">
|
||||||
|
Published on <time class="meta-date" datetime="November 14th, 2017 8:21PM">November 14th, 2017 8:21PM</time>
|
||||||
|
by <a href="/author/andrew/">Andrew Fitch</a>.
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<div class="post-excerpt">
|
||||||
|
<p>Cloudflare Apps now lists 95 apps from apps which grow email lists to apps which acquire new customers to apps which help site owners make more money. The great thing about these apps is that users don't have to have any coding or development skills. They can just sign up for the app and start using it on their sites. Let’s take a moment to highlight some…</p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<a href="/7cloudflareappsengagement/" class="more">Read more » </a><br>
|
||||||
|
<small>
|
||||||
|
<span class="post-meta">
|
||||||
|
<a href="/7cloudflareappsengagement/#disqus_thread">Comments</a> | tagged with <a href="/tag/cloudflare-apps/">Cloudflare Apps</a>, <a href="/tag/developers/">Developers</a>, <a href="/tag/user-engagement/">User Engagement</a>
|
||||||
|
</span>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<article class="post tag-acquisitions tag-cloudflare-team tag-mobile tag-neumob">
|
||||||
|
<header class="post-header">
|
||||||
|
<h2 class="title"><a href="/neumob-optimizing-mobile/">The Super Secret Cloudflare Master Plan, or why we acquired Neumob</a></h2>
|
||||||
|
<div class="meta">
|
||||||
|
Published on <time class="meta-date" datetime="November 14th, 2017 2:00PM">November 14th, 2017 2:00PM</time>
|
||||||
|
by <a href="/author/john-graham-cumming/">John Graham-Cumming</a>.
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<div class="post-excerpt">
|
||||||
|
<p>We announced today that Cloudflare has acquired Neumob. Neumob’s team built exceptional technology to speed up mobile apps, reduce errors on challenging mobile networks, and increase conversions. Cloudflare will integrate the Neumob technology with our global network to give Neumob truly global reach. It’s tempting to think of the Neumob acquisition as a point product added to the Cloudflare portfolio. But it actually represents a key…</p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<a href="/neumob-optimizing-mobile/" class="more">Read more » </a><br>
|
||||||
|
<small>
|
||||||
|
<span class="post-meta">
|
||||||
|
<a href="/neumob-optimizing-mobile/#disqus_thread">Comments</a> | tagged with <a href="/tag/acquisitions/">Acquisitions</a>, <a href="/tag/cloudflare-team/">Cloudflare Team</a>, <a href="/tag/mobile/">Mobile</a>, <a href="/tag/neumob/">Neumob</a>
|
||||||
|
</span>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<article class="post tag-security tag-legal tag-privacy tag-attacks">
|
||||||
|
<header class="post-header">
|
||||||
|
<h2 class="title"><a href="/thwarting-the-tactics-of-the-equifax-attackers/">Thwarting the Tactics of the Equifax Attackers</a></h2>
|
||||||
|
<div class="meta">
|
||||||
|
Published on <time class="meta-date" datetime="November 13th, 2017 4:09PM">November 13th, 2017 4:09PM</time>
|
||||||
|
by <a href="/author/alex-cruz-farmer/">Alex Cruz Farmer</a>.
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<div class="post-excerpt">
|
||||||
|
<p>We are now 3 months on from one of the biggest, most significant data breaches in history, but has it redefined people's awareness on security? The answer to that is absolutely yes, awareness is at an all-time high. Awareness, however, does not always result in positive action. The fallacy which is often assumed is "surely, if I keep my software up to date with all the patches, that's…</p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<a href="/thwarting-the-tactics-of-the-equifax-attackers/" class="more">Read more » </a><br>
|
||||||
|
<small>
|
||||||
|
<span class="post-meta">
|
||||||
|
<a href="/thwarting-the-tactics-of-the-equifax-attackers/#disqus_thread">Comments</a> | tagged with <a href="/tag/security/">Security</a>, <a href="/tag/legal/">Legal</a>, <a href="/tag/privacy/">Privacy</a>, <a href="/tag/attacks/">Attacks</a>
|
||||||
|
</span>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<article class="post tag-go tag-performance tag-golang tag-developers">
|
||||||
|
<header class="post-header">
|
||||||
|
<h2 class="title"><a href="/go-dont-collect-my-garbage/">Go, don't collect my garbage</a></h2>
|
||||||
|
<div class="meta">
|
||||||
|
Published on <time class="meta-date" datetime="November 13th, 2017 10:31AM">November 13th, 2017 10:31AM</time>
|
||||||
|
by <a href="/author/vlad-krasnov/">Vlad Krasnov</a>.
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<div class="post-excerpt">
|
||||||
|
<p>Not long ago I needed to benchmark the performance of Golang on a many-core machine. I took several of the benchmarks that are bundled with the Go source code, copied them, and modified them to run on all available threads. In that case the machine has 24 cores and 48 threads. CC BY-SA 2.0 image by sponki25 I started with ECDSA P256 Sign, probably because I have…</p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<a href="/go-dont-collect-my-garbage/" class="more">Read more » </a><br>
|
||||||
|
<small>
|
||||||
|
<span class="post-meta">
|
||||||
|
<a href="/go-dont-collect-my-garbage/#disqus_thread">Comments</a> | tagged with <a href="/tag/go/">Go</a>, <a href="/tag/performance/">Performance</a>, <a href="/tag/golang/">golang</a>, <a href="/tag/developers/">Developers</a>
|
||||||
|
</span>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<article class="post tag-developers tag-javascript tag-php tag-lua tag-go tag-meetup tag-cloudflare-meetups tag-community tag-pizza">
|
||||||
|
<header class="post-header">
|
||||||
|
<h2 class="title"><a href="/cloudflare-wants-to-buy-your-meetup-group-pizza/">Cloudflare Wants to Buy Your Meetup Group Pizza</a></h2>
|
||||||
|
<div class="meta">
|
||||||
|
Published on <time class="meta-date" datetime="November 10th, 2017 3:00PM">November 10th, 2017 3:00PM</time>
|
||||||
|
by <a href="/author/andrew/">Andrew Fitch</a>.
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<div class="post-excerpt">
|
||||||
|
<p>If you’re a web dev / devops / etc. meetup group that also works toward building a faster, safer Internet, I want to support your awesome group by buying you pizza. If your group’s focus falls within one of the subject categories below and you’re willing to give us a 30 second shout out and tweet a photo of your group and @Cloudflare, your meetup’s pizza…</p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<a href="/cloudflare-wants-to-buy-your-meetup-group-pizza/" class="more">Read more » </a><br>
|
||||||
|
<small>
|
||||||
|
<span class="post-meta">
|
||||||
|
<a href="/cloudflare-wants-to-buy-your-meetup-group-pizza/#disqus_thread">Comments</a> | tagged with <a href="/tag/developers/">Developers</a>, <a href="/tag/javascript/">javascript</a>, <a href="/tag/php/">php</a>, <a href="/tag/lua/">lua</a>, <a href="/tag/go/">Go</a>, <a href="/tag/meetup/">MeetUp</a>, <a href="/tag/cloudflare-meetups/">Cloudflare Meetups</a>, <a href="/tag/community/">Community</a>, <a href="/tag/pizza/">Pizza</a>
|
||||||
|
</span>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<article class="post">
|
||||||
|
<header class="post-header">
|
||||||
|
<h2 class="title"><a href="/on-the-dangers-of-intels-frequency-scaling/">On the dangers of Intel's frequency scaling</a></h2>
|
||||||
|
<div class="meta">
|
||||||
|
Published on <time class="meta-date" datetime="November 10th, 2017 11:06AM">November 10th, 2017 11:06AM</time>
|
||||||
|
by <a href="/author/vlad-krasnov/">Vlad Krasnov</a>.
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<div class="post-excerpt">
|
||||||
|
<p>While I was writing the post comparing the new Qualcomm server chip, Centriq, to our current stock of Intel Skylake-based Xeons, I noticed a disturbing phenomena. When benchmarking OpenSSL 1.1.1dev, I discovered that the performance of the cipher ChaCha20-Poly1305 does not scale very well. On a single thread, it performed at the speed of approximately 2.89GB/s, whereas on 24 cores, and 48 threads it…</p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<a href="/on-the-dangers-of-intels-frequency-scaling/" class="more">Read more » </a><br>
|
||||||
|
<small>
|
||||||
|
<span class="post-meta">
|
||||||
|
<a href="/on-the-dangers-of-intels-frequency-scaling/#disqus_thread">Comments</a>
|
||||||
|
</span>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<section class="clearfix" role="navigation">
|
||||||
|
<a class="newer-posts btn" href="/page/2/">Older »</a>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<aside class="sidebar">
|
||||||
|
<div class="widget">
|
||||||
|
<input type="text" placeholder="Search the blog" class="st-default-search-input"></input>
|
||||||
|
<script type="text/javascript">
|
||||||
|
(function(w,d,t,u,n,s,e){w['SwiftypeObject']=n;w[n]=w[n]||function(){
|
||||||
|
(w[n].q=w[n].q||[]).push(arguments);};s=d.createElement(t);
|
||||||
|
e=d.getElementsByTagName(t)[0];s.async=0;s.src=u;e.parentNode.insertBefore(s,e);
|
||||||
|
})(window,document,'script','//s.swiftypecdn.com/install/v2/st.js','_st');
|
||||||
|
_st('install','_KobMC_zsd_tDx_7NWiX','2.0.0');
|
||||||
|
</script>
|
||||||
|
</div>
|
||||||
|
<div class="widget">
|
||||||
|
<h4 class="widget-title">Cloudflare blog</h4>
|
||||||
|
<p style="margin-top: 20px">
|
||||||
|
<a href="https://www.cloudflare.com/enterprise-service-request" class="btn btn-success" tabindex="11" target="_blank">Contact our team</a>
|
||||||
|
</p>
|
||||||
|
<p>
|
||||||
|
<strong>US callers</strong><br/>
|
||||||
|
1 (888) 99-FLARE <br/>
|
||||||
|
<strong>UK callers</strong><br/>
|
||||||
|
+44 (0)20 3514 6970<br/>
|
||||||
|
<strong>International callers</strong><br/>
|
||||||
|
+1 (650) 319-8930 <BR/><BR/>
|
||||||
|
<a href="https://www.cloudflare.com/plans" target="_blank">Full feature list and plan types</a>
|
||||||
|
</p>
|
||||||
|
<p>Cloudflare provides performance and security for any website. More than 6 million websites use Cloudflare.</p>
|
||||||
|
<p>There is no hardware or software. Cloudflare works at the DNS level. It takes only 5 minutes to sign up. To learn more, please visit our website</p>
|
||||||
|
</div>
|
||||||
|
<div class="widget">
|
||||||
|
<h4 class="widget-title">Cloudflare features</h4>
|
||||||
|
<ul class="menu menu-sidebar">
|
||||||
|
<li><a href="https://www.cloudflare.com/">Overview</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/cdn/">CDN</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/website-optimization/">Optimizer</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/security/">Security</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/analytics/">Analytics</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/apps">Apps</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/network/">Network map</a></li>
|
||||||
|
<li><a href="https://www.cloudflarestatus.com">System status</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<div id="mc_embed_signup" class="widget">
|
||||||
|
<form action="https://cloudflare.us5.list-manage.com/subscribe/post?u=d80d4d74266c0c044b0bcd7ca&id=8dc0bf9dea" method="post" id="mc-embedded-subscribe-form" name="mc-embedded-subscribe-form" class="validate" target="_blank" novalidate>
|
||||||
|
<input type="email" value="" name="EMAIL" class="width-full required email" id="mce-EMAIL" placeholder="Enter your email address"/>
|
||||||
|
<div id="mce-responses" class="clearfix">
|
||||||
|
<div class="response" id="mce-error-response" style="display:none"></div>
|
||||||
|
<div class="response" id="mce-success-response" style="display:none"></div>
|
||||||
|
</div>
|
||||||
|
<div class="clearfix">
|
||||||
|
<button type="submit" name="subscribe" id="mc-embedded-subscribe" class="btn btn-primary width-full">Sign up for email updates</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</aside>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<footer id="footer" class="footer">
|
||||||
|
<div class="wrapper">
|
||||||
|
<nav class="navigation footer-nav">
|
||||||
|
<ul role="navigation">
|
||||||
|
<li id="cf_nav_menu-2" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">What We Do</h6>
|
||||||
|
<div class="menu-what-we-do-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/plans">Plans</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/performance/">Performance</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/security/">Security</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/reliability/">Reliability</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/apps">Apps</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/network-map">Network</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-3" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">Resources</h6>
|
||||||
|
<div class="menu-support-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/support">Help Center</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/community">Community</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/video">Video Guides</a></li>
|
||||||
|
<li><a href="https://www.cloudflarestatus.com">System Status</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/contact">Contact Us</a></li>
|
||||||
|
|
||||||
|
<li class="active"><a href="/">Blog</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-4" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">Not a Developer?</h6>
|
||||||
|
<div class="menu-resources-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/case-studies">Case Studies</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/resources/">White Papers</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/internet-summit/">Internet Summit</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/hosting-partners">Partners</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/hosting-partners">Integrations</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-5" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">About Us</h6>
|
||||||
|
<div class="menu-about-us-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/people">Our Team</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/join-our-team">Careers</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/press-center">Press</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/terms">Terms of Service</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/security-policy/">Privacy & Security</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/abuse/">Trust & Safety</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-6" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">Connect</h6>
|
||||||
|
<div class="menu-connect-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="http://twitter.com/cloudflare">Twitter</a></li>
|
||||||
|
<li><a href="https://www.facebook.com/Cloudflare">Facebook</a></li>
|
||||||
|
<li><a href="https://www.linkedin.com/company/cloudflare-inc-">LinkedIn</a></li>
|
||||||
|
<li><a href="https://www.youtube.com/cloudflare-">YouTube</a></li>
|
||||||
|
<li><a href="https://plus.google.com/+cloudflare/posts">Google+</a></li>
|
||||||
|
<li><a href="/rss/">RSS</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
<div class="credits">All content © 2017 <a href="https://cloudflare.com">Cloudflare</a>. Proudly published with <a href="https://ghost.org">Ghost</a>.</div>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
var links = document.links;
|
||||||
|
|
||||||
|
for (var i = 0, linksLength = links.length; i < linksLength; i++) {
|
||||||
|
if (links[i].hostname != window.location.hostname) {
|
||||||
|
links[i].target = '_blank';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.1/prism.min.js"></script>
|
||||||
|
<script type="text/javascript" src="/assets/js/jquery.fitvids.js?v=b6cf3f99a6"></script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
$(document).ready(function(){ $(".post-content").fitVids(); });
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<script type="text/javascript">
|
||||||
|
|
||||||
|
var disqus_shortname = 'cloudflare';
|
||||||
|
(function () {
|
||||||
|
var s = document.createElement('script'); s.async = true;
|
||||||
|
s.type = 'text/javascript';
|
||||||
|
s.src = '//' + disqus_shortname + '.disqus.com/count.js';
|
||||||
|
(document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s);
|
||||||
|
}());
|
||||||
|
</script>
|
||||||
|
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -0,0 +1,515 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8" />
|
||||||
|
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
||||||
|
|
||||||
|
<title>Living In A Multi-Cloud World</title>
|
||||||
|
<meta name="description" content="At our recent Internet Summit, we hosted a discussion on A Cloud Without Handcuffs with Joe Beda, one of the creators of Kubernetes, and Brandon Phillips, the co-founder of CoreOS." />
|
||||||
|
<meta name="HandheldFriendly" content="True">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<meta name="msvalidate.01" content="CF295E1604697F9CAD18B5A232E871F6" />
|
||||||
|
|
||||||
|
<link rel="shortcut icon" href="/assets/images/favicon.ico?v=b6cf3f99a6">
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="57x57" href="/assets/images/apple-touch-icon-57x57-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="/assets/images/apple-touch-icon-72x72-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="/assets/images/apple-touch-icon-114x114-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="144x144" href="/assets/images/apple-touch-icon-144x144-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
|
||||||
|
<link rel="stylesheet" type="text/css" href="/assets/css/screen.css?v=b6cf3f99a6" />
|
||||||
|
<!--[if lt IE 9]><link rel="stylesheet" type="text/css" href="/assets/css/ie.css?v=b6cf3f99a6" /><![endif]-->
|
||||||
|
|
||||||
|
<!--<link href="http://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,400,700,300,600" rel="stylesheet" type="text/css">-->
|
||||||
|
|
||||||
|
<script>(function(G,o,O,g,l){G.GoogleAnalyticsObject=O;G[O]||(G[O]=function(){(G[O].q=G[O].q||[]).push(arguments)});G[O].l=+new Date;g=o.createElement('script'),l=o.scripts[0];g.src='//www.google-analytics.com/analytics.js';l.parentNode.insertBefore(g,l)}(this,document,'ga'));ga('create','UA-10218544-12', 'auto');ga('send','pageview')</script>
|
||||||
|
|
||||||
|
<link rel="canonical" href="http://blog.cloudflare.com/living-in-a-multi-cloud-world/" />
|
||||||
|
<meta name="referrer" content="no-referrer-when-downgrade" />
|
||||||
|
<link rel="amphtml" href="http://blog.cloudflare.com/living-in-a-multi-cloud-world/amp/" />
|
||||||
|
|
||||||
|
<meta property="og:site_name" content="Cloudflare Blog" />
|
||||||
|
<meta property="og:type" content="article" />
|
||||||
|
<meta property="og:title" content="Living In A Multi-Cloud World" />
|
||||||
|
<meta property="og:description" content="At our recent Internet Summit, we hosted a discussion on A Cloud Without Handcuffs with Joe Beda, one of the creators of Kubernetes, and Brandon Phillips, the co-founder of CoreOS." />
|
||||||
|
<meta property="og:url" content="http://blog.cloudflare.com/living-in-a-multi-cloud-world/" />
|
||||||
|
<meta property="og:image" content="http://blog.cloudflare.com/content/images/2017/11/Cloudflare_Multi_Cloud-1.png" />
|
||||||
|
<meta property="article:published_time" content="2017-11-21T16:30:00.000Z" />
|
||||||
|
<meta property="article:modified_time" content="2017-11-21T16:35:36.000Z" />
|
||||||
|
<meta property="article:tag" content="Google Cloud" />
|
||||||
|
<meta property="article:tag" content="Cloud Computing" />
|
||||||
|
<meta property="article:tag" content="Internet Summit" />
|
||||||
|
|
||||||
|
<meta property="article:publisher" content="https://www.facebook.com/Cloudflare" />
|
||||||
|
<meta name="twitter:card" content="summary_large_image" />
|
||||||
|
<meta name="twitter:title" content="Living In A Multi-Cloud World" />
|
||||||
|
<meta name="twitter:description" content="At our recent Internet Summit, we hosted a discussion on A Cloud Without Handcuffs with Joe Beda, one of the creators of Kubernetes, and Brandon Phillips, the co-founder of CoreOS." />
|
||||||
|
<meta name="twitter:url" content="http://blog.cloudflare.com/living-in-a-multi-cloud-world/" />
|
||||||
|
<meta name="twitter:image" content="http://blog.cloudflare.com/content/images/2017/11/Cloudflare_Multi_Cloud-1.png" />
|
||||||
|
<meta name="twitter:label1" content="Written by" />
|
||||||
|
<meta name="twitter:data1" content="Sergi Isasi" />
|
||||||
|
<meta name="twitter:label2" content="Filed under" />
|
||||||
|
<meta name="twitter:data2" content="Google Cloud, Cloud Computing, Internet Summit" />
|
||||||
|
<meta name="twitter:site" content="@cloudflare" />
|
||||||
|
<meta name="twitter:creator" content="@sgisasi" />
|
||||||
|
<meta property="og:image:width" content="2002" />
|
||||||
|
<meta property="og:image:height" content="934" />
|
||||||
|
|
||||||
|
<script type="application/ld+json">
|
||||||
|
{
|
||||||
|
"@context": "https://schema.org",
|
||||||
|
"@type": "Article",
|
||||||
|
"publisher": {
|
||||||
|
"@type": "Organization",
|
||||||
|
"name": "Cloudflare Blog",
|
||||||
|
"logo": {
|
||||||
|
"@type": "ImageObject",
|
||||||
|
"url": "http://blog.cloudflare.com/content/images/2016/09/logo-for-blog_thumb.png",
|
||||||
|
"width": 189,
|
||||||
|
"height": 47
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"author": {
|
||||||
|
"@type": "Person",
|
||||||
|
"name": "Sergi Isasi",
|
||||||
|
"image": {
|
||||||
|
"@type": "ImageObject",
|
||||||
|
"url": "http://blog.cloudflare.com/content/images/2017/11/FullSizeRender_jpeg.png",
|
||||||
|
"width": 487,
|
||||||
|
"height": 487
|
||||||
|
},
|
||||||
|
"url": "http://blog.cloudflare.com/author/sergi/",
|
||||||
|
"sameAs": [
|
||||||
|
"https://twitter.com/sgisasi"
|
||||||
|
],
|
||||||
|
"description": "Product Management @ Cloudflare. "
|
||||||
|
},
|
||||||
|
"headline": "Living In A Multi-Cloud World",
|
||||||
|
"url": "https://blog.cloudflare.com/living-in-a-multi-cloud-world/",
|
||||||
|
"datePublished": "2017-11-21T16:30:00.000Z",
|
||||||
|
"dateModified": "2017-11-21T16:35:36.000Z",
|
||||||
|
"image": {
|
||||||
|
"@type": "ImageObject",
|
||||||
|
"url": "http://blog.cloudflare.com/content/images/2017/11/Cloudflare_Multi_Cloud-1.png",
|
||||||
|
"width": 2002,
|
||||||
|
"height": 934
|
||||||
|
},
|
||||||
|
"keywords": "Google Cloud, Cloud Computing, Internet Summit",
|
||||||
|
"description": "At our recent Internet Summit, we hosted a discussion on A Cloud Without Handcuffs with Joe Beda, one of the creators of Kubernetes, and Brandon Phillips, the co-founder of CoreOS.",
|
||||||
|
"mainEntityOfPage": {
|
||||||
|
"@type": "WebPage",
|
||||||
|
"@id": "http://blog.cloudflare.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<script type="text/javascript" src="/shared/ghost-url.min.js?v=b6cf3f99a6"></script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
ghost.init({
|
||||||
|
clientId: "ghost-frontend",
|
||||||
|
clientSecret: "cf0df60d1ab4"
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
<meta name="generator" content="Ghost 0.11" />
|
||||||
|
<link rel="alternate" type="application/rss+xml" title="Cloudflare Blog" href="https://blog.cloudflare.com/rss/" />
|
||||||
|
<meta name="msvalidate.01" content="CF295E1604697F9CAD18B5A232E871F6" />
|
||||||
|
<meta class="swiftype" name="language" data-type="string" content="en" />
|
||||||
|
<script src="https://s3-us-west-1.amazonaws.com/cf-ghost-assets-hotfix/js/index.js"></script>
|
||||||
|
<script type="text/javascript" src="//cdn.bizible.com/scripts/bizible.js" async=""></script>
|
||||||
|
<script>
|
||||||
|
var trackRecruitingLink = function(role, url) {
|
||||||
|
ga('send', 'event', 'recruiting', 'jobscore-click', role, {
|
||||||
|
'transport': 'beacon',
|
||||||
|
'hitCallback': function(){document.location = url;}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
(function() {
|
||||||
|
var didInit = false;
|
||||||
|
function initMunchkin() {
|
||||||
|
if(didInit === false) {
|
||||||
|
didInit = true;
|
||||||
|
Munchkin.init('713-XSC-918');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var s = document.createElement('script');
|
||||||
|
s.type = 'text/javascript';
|
||||||
|
s.async = true;
|
||||||
|
s.src = '//munchkin.marketo.net/munchkin.js';
|
||||||
|
s.onreadystatechange = function() {
|
||||||
|
if (this.readyState == 'complete' || this.readyState == 'loaded') {
|
||||||
|
initMunchkin();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
s.onload = initMunchkin;
|
||||||
|
document.getElementsByTagName('head')[0].appendChild(s);
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
<script>
|
||||||
|
var HTMLAttrToAdd = document.querySelector("html");
|
||||||
|
HTMLAttrToAdd.setAttribute("lang", "en");
|
||||||
|
</script>
|
||||||
|
<style>
|
||||||
|
table {
|
||||||
|
background-color: transparent;
|
||||||
|
}
|
||||||
|
td {
|
||||||
|
padding: 5px 1em;
|
||||||
|
}
|
||||||
|
pre {
|
||||||
|
max-height: 500px;
|
||||||
|
overflow-y: scroll;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
<link href="https://s3-us-west-1.amazonaws.com/cf-ghost-assets-hotfix/css/screen.css" rel="stylesheet">
|
||||||
|
<link href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.1/themes/prism.min.css" rel="stylesheet">
|
||||||
|
|
||||||
|
<style>
|
||||||
|
.st-default-search-input {
|
||||||
|
font-family: Helvetica, Arial, "Lucida Grande", sans-serif;
|
||||||
|
font-size: 14px;
|
||||||
|
line-height: 16px;
|
||||||
|
font-weight: 400;
|
||||||
|
-moz-transition: opacity 0.2s;
|
||||||
|
-o-transition: opacity 0.2s;
|
||||||
|
-webkit-transition: opacity 0.2s;
|
||||||
|
transition: opacity 0.2s;
|
||||||
|
display: inline-block;
|
||||||
|
width: 190px;
|
||||||
|
height: 16px;
|
||||||
|
padding: 7px 11px 7px 28px;
|
||||||
|
border: 1px solid rgba(0, 0, 0, 0.25);
|
||||||
|
color: #444;
|
||||||
|
-moz-box-sizing: content-box;
|
||||||
|
box-sizing: content-box;
|
||||||
|
-moz-border-radius: 5px;
|
||||||
|
-webkit-border-radius: 5px;
|
||||||
|
border-radius: 5px;
|
||||||
|
background: #fff 8px 8px no-repeat url("data:image/png;base64, iVBORw0KGgoAAAANSUhEUgAAAA0AAAANCAYAAABy6%2BR8AAAACXBIWXMAAAsTAAALEwEAmpwYAAAAIGNIUk0AAG11AABzoAAA%2FN0AAINkAABw6AAA7GgAADA%2BAAAQkOTsmeoAAAESSURBVHjajNCxS9VRGMbxz71E4OwgoXPQxVEpXCI47%2BZqGP0LCoJO7UVD3QZzb3SwcHB7F3Uw3Zpd%2FAPCcJKG7Dj4u%2FK7Pwp94HDg5Xyf5z1Pr9YKImKANTzFXxzjU2ae6qhXaxURr%2FAFl9hHDy%2FwEK8z89sYVEp5gh84wMvMvGiSJ%2FEV85jNzLMR1McqfmN5BEBmnmMJFSvtpH7jdJiZv7q7Z%2BZPfMdcF6rN%2FT%2F1m2LGBkd4HhFT3dcRMY2FpskxaLNpayciHrWAGeziD7b%2BVfkithuTk8bkGa4wgWFmbrSTZOYeBvjc%2BucQj%2FEe6xHx4Taq1nrnKaW8K6XUUsrHWuvNevdRRLzFGwzvDbXAB9cDAHvhedDruuxSAAAAAElFTkSuQmCC")
|
||||||
|
}
|
||||||
|
|
||||||
|
.st-ui-close-button {
|
||||||
|
-moz-transition: none;
|
||||||
|
-o-transition: none;
|
||||||
|
-webkit-transition: none;
|
||||||
|
transition: none
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body class="post-template tag-google-cloud tag-cloud-computing tag-internet-summit">
|
||||||
|
<div id="fb-root"></div>
|
||||||
|
<header id="header" class="header">
|
||||||
|
<div class="wrapper">
|
||||||
|
<a href="https://www.cloudflare.com" class="logo logo-header">Cloudflare</a>
|
||||||
|
<nav id="main-menu" class="header-navigation navigation" role="navigation">
|
||||||
|
<ul class="menu menu-header">
|
||||||
|
<li><a href="https://blog.cloudflare.com/">Blog home</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/overview" tabindex="1">What we do</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/support" tabindex="9">Support</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/community" tabindex="9">Community</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/login" tabindex="10">Login</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/sign-up" class="btn btn-success" tabindex="11">Sign up</a></li>
|
||||||
|
</ul>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<div class="wrapper reverse-sidebar">
|
||||||
|
<section class="primary-content" role="main">
|
||||||
|
|
||||||
|
<article class="post tag-google-cloud tag-cloud-computing tag-internet-summit">
|
||||||
|
|
||||||
|
|
||||||
|
<header class="post-header">
|
||||||
|
<h1 class="title">Living In A Multi-Cloud World</h1>
|
||||||
|
<div class="meta">
|
||||||
|
<time class="meta-date" datetime="2017-11-21">21 Nov 2017</time>
|
||||||
|
by <a href="/author/sergi/">Sergi Isasi</a>.
|
||||||
|
</div>
|
||||||
|
<div class="social">
|
||||||
|
<div class="g-plusone" data-size="medium" data-href="https://blog.cloudflare.com/living-in-a-multi-cloud-world/"></div>
|
||||||
|
<script type="IN/Share" data-url="https://blog.cloudflare.com/living-in-a-multi-cloud-world/" data-counter="right"></script>
|
||||||
|
<div class="fb-like" data-href="https://blog.cloudflare.com/living-in-a-multi-cloud-world/" data-layout="button_count" data-action="like" data-show-faces="false" data-share="false"></div>
|
||||||
|
<a href="https://twitter.com/share" class="twitter-share-button" data-url="https://blog.cloudflare.com/living-in-a-multi-cloud-world/" data-text="Living In A Multi-Cloud World" data-via="cloudflare" data-related="cloudflare">Tweet</a>
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<div class="post-content">
|
||||||
|
<p>A few months ago at Cloudflare’s <a href="https://www.cloudflare.com/internet-summit/">Internet Summit</a>, we hosted a discussion on <a href="https://blog.cloudflare.com/a-cloud-without-handcuffs/">A Cloud Without Handcuffs</a> with Joe Beda, one of the creators of Kubernetes, and Brandon Phillips, the co-founder of CoreOS. The conversation touched on multiple areas, but it’s clear that more and more companies are recognizing the need to have some strategy around hosting their applications on multiple cloud providers.</p>
|
||||||
|
|
||||||
|
<p>Earlier this year, Mary Meeker published her annual <a href="http://www.kpcb.com/internet-trends">Internet Trends</a> report which revealed that 22% of respondents viewed Cloud Vendor Lock-In as a top 3 concern, up from just 7% in 2012. This is in contrast to previous top concerns, Data Security and Cost & Savings, both of which dropped amongst those surveyed.</p>
|
||||||
|
|
||||||
|
<p><img src="/content/images/2017/11/Mary-Meeker-Internet-Trends-2017.png" alt="Internet Trends" /></p>
|
||||||
|
|
||||||
|
<p>At Cloudflare, our mission is to help build a better internet. To fulfill this mission, our customers need to have consistent access to the best technology and services, over time. This is especially the case with respect to storage and compute providers. This means not becoming locked-in to any single provider and taking advantage of multiple cloud computing vendors (such as Amazon Web Services or Google Cloud Platform) for the same end user services. </p>
|
||||||
|
|
||||||
|
<h3 id="thebenefitsofhavingmultiplecloudvendors">The Benefits of Having Multiple Cloud Vendors</h3>
|
||||||
|
|
||||||
|
<p>There are a number of potential challenges when selecting a single cloud provider. Though there may be scenarios where it makes sense to consolidate on a single vendor, our belief is that it is important that customers are aware of their choice and downsides of being potentially locked-in to that particular vendor. In short, know what trade offs you are making should you decide to continue to consolidate parts of your network, compute, and storage with a single cloud provider. While not comprehensive, here are a few trade-offs you may be making if you are locked-in to one cloud.</p>
|
||||||
|
|
||||||
|
<h4 id="costefficiences">Cost Efficiences</h4>
|
||||||
|
|
||||||
|
<p>For some companies, there may be a cost savings involved in spreading traffic across multiple vendors. Some can take advantage of free or reduced cost tiers at lower volumes. Vendors may provide reduced costs for certain times of day that are lower utilized on their infrastructure. Applications can have varying compute requirements amongst layers of the application: some may require faster, immediate processing while others may benefit from delayed processing at a lower cost. </p>
|
||||||
|
|
||||||
|
<h4 id="negotiationstrength">Negotiation Strength</h4>
|
||||||
|
|
||||||
|
<p>One of the most important reasons to consider deploying in multiple cloud providers is to minimize your reliance on a single vendor’s technology for your critical business processes. As you become more vertically integrated with any vendor, your negotiation posture for pricing or favorable contract terms becomes diminished. Having production ready code available on multiple providers allows you to have less technical debt should you need to change. If you go a step further and are already sending traffic to multiple providers, you have minimized the technical debt required to switch and can negotiate from a position of strength.</p>
|
||||||
|
|
||||||
|
<h4 id="businesscontinuityorhighavailability">Business Continuity or High Availability</h4>
|
||||||
|
|
||||||
|
<p>While the major cloud providers are generally reliable, there have been a few notable outages in recent years. The most significant in recent memory being Amazon’s <a href="https://aws.amazon.com/message/41926/">US-EAST S3</a> outage in February. Some organizations may have a policy specifying multiple providers for high availability while others should consider it where necessary and feasible as a best practice. A multi-cloud strategy can lower operational risk from a single vendor’s mistakes causing a significant outage for a mission critical application.</p>
|
||||||
|
|
||||||
|
<h4 id="experimentation">Experimentation</h4>
|
||||||
|
|
||||||
|
<p>One of the exciting things about having competition in the space is the level of innovation and feature velocity of each provider. Every year there are major announcements of new products or features that may have a significant impact on improving your organization's competitive advantage. Having test and production environments in multiple providers gives your engineers the ability to understand and experiment with a new capability in the context of your technology stack and data. You may even try these features for a portion of your traffic and get real world data on any benefits realized.</p>
|
||||||
|
|
||||||
|
<h3 id="cloudflaresrole">Cloudflare’s Role</h3>
|
||||||
|
|
||||||
|
<p>Cloudflare is an independent third party in your multi-cloud strategy. Our goal is to minimize the layers of lock-in between you and a provider and lower the effort of change. In particular, one area where we can help right away is to minimize the operational changes necessary at the network, similar to what Kubernetes can do at the storage and compute level. As a benefit of our network, you can also have a centralized point for security and operational control.</p>
|
||||||
|
|
||||||
|
<p><img src="/content/images/2017/11/Cloudflare_Multi_Cloud.png" alt="Cloudflare Multi Cloud" /></p>
|
||||||
|
|
||||||
|
<p>Cloudflare’s Load Balancing can easily be configured to act as your global application traffic aggregator and distribute your traffic amongst origins at as many clouds as you choose to utilize. Active layer 7 health checks continually probe your origins and can automatically move traffic in the case of network or application failure. All consolidated web traffic can be inspected and acted upon by Cloudflare’s best of breed <a href="https://www.cloudflare.com/security/">Security</a> services, providing a single control point and visibility across all application traffic, regardless of which cloud the origin may be on. You also have the benefit of Cloudflare’s <a href="https://www.cloudflare.com/network/">Global Anycast Network</a>, providing for better speed and higher availability regardless of which clouds your origins are hosted on.</p>
|
||||||
|
|
||||||
|
<h3 id="billforwardusingcloudflaretoimplementmulticloud">Billforward: Using Cloudflare to Implement Multi-Cloud</h3>
|
||||||
|
|
||||||
|
<p>Billforward is a San Francisco and London based startup that is focused and mission driven on changing the way people bill and charge their customers, providing a solution to the complexities of Quote-to-Cash. Their platform is built on a number of Rest APIs that other developers call to bill and generate revenue for their own companies. </p>
|
||||||
|
|
||||||
|
<p>Billforward is using Cloudflare for its core customer facing application to failover traffic between Google Compute Engine and Amazon Web Services. Acting as a reverse proxy, Cloudflare receives all requests for and decides which of Billforward’s two configured cloud origins to use based upon the availability of that origin in near real-time. This allows Billforward to completely manage the connections to and from two disparate cloud providers using Cloudflare’s UI or API. Billforward is in the process of migrating all of their customer facing domains to a similar setup.</p>
|
||||||
|
|
||||||
|
<h4 id="configuration">Configuration</h4>
|
||||||
|
|
||||||
|
<p>Billforward has a single load balanced hostname with two available Pools. They’ve named the two Pools with “gce” and “aws” labels and each Pool has one Origin associated with it. All of the Pools are enabled and the entire LB/hostname is proxied through Cloudflare (as indicated by the orange cloud).</p>
|
||||||
|
|
||||||
|
<p><img src="/content/images/2017/11/Billforward_Config_UI.png" alt="Billforward Configuration UI" /></p>
|
||||||
|
|
||||||
|
<p>Cloudflare probes Billforward’s Origins once every minute from all of Cloudflare’s data centers around the world (a feature available to all Load Balancing Enterprise customers). If Billforward’s GCE Origin goes down, Cloudflare will quickly and automatically failover to the AWS Origin with no actions required from Billforward’s team.</p>
|
||||||
|
|
||||||
|
<p>Google Compute Engine was chosen as the primary provider for this application by virtue of cost. Martin Lee, Site Reliability Engineer at Billforward says, “Essentially, GCE is cheaper for our general purpose computing needs but we're more experienced with deployments in AWS. This strategy allows us to switch back and forth at will and avoid being tied in to either platform.” It is likely that Billforward will change the priority as pricing models evolve. <br />
|
||||||
|
<br> </p>
|
||||||
|
|
||||||
|
<blockquote>
|
||||||
|
<p>“It's a fairly fast moving world and features released by cloud providers can have a meaningful impact on performance and cost on a week by week basis - it helps to stay flexible,” says Martin. “We may also change priority based on features.”</p>
|
||||||
|
</blockquote>
|
||||||
|
|
||||||
|
<p><br>For orchestration of the compute and storage layers, Billforward uses <a href="https://www.docker.com/">Docker</a> containers managed through <a href="http://www.rancher.com/">Rancher</a>. They use distinct environments between cloud providers but are considering bridging an environment across cloud providers and using VPNs between them, which will enable them to move load between providers even more easily. “Our system is loosely coupled through a message queue,” adds Martin. “Having a container system across clouds means we can really take advantage of this - we can very easily move workloads across clouds without any danger of dropping tasks or ending up in an inconsistent state.”</p>
|
||||||
|
|
||||||
|
<h4 id="benefits">Benefits</h4>
|
||||||
|
|
||||||
|
<p>Billforward manages these connections at Cloudflare’s edge. Through this interface (or via the Cloudflare APIs), they can also manually move traffic from GCE to AWS by just disabling the GCE pool or by rearranging the Pool priority and make AWS the primary. These changes are near instant on the Cloudflare network and require no downtime to Billforward’s customer facing application. This allows them to act on potential advantageous pricing changes between the two cloud providers or move traffic to hit pricing tiers. </p>
|
||||||
|
|
||||||
|
<p>In addition, Billforward is now not “locked-in” to either provider’s network; being able to move traffic and without any downtime means they can make traffic changes independent of Amazon or Google. They can also integrate additional cloud providers any time they deem fit: adding Microsoft Azure, for example, as a third Origin would be as simple as creating a new Pool and adding it to the Load Balancer. </p>
|
||||||
|
|
||||||
|
<p>Billforward is a good example of a forward thinking company that is taking advantage of technologies from multiple providers to best serve their business and customers, while not being reliant on a single vendor. For further detail on their setup using Cloudflare, please check their <a href="https://www.billforward.net/blog/being-multi-cloud-with-cloudflare/">blog</a>.</p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<small>
|
||||||
|
Tagged with <a href="/tag/google-cloud/">Google Cloud</a>, <a href="/tag/cloud-computing/">Cloud Computing</a>, <a href="/tag/internet-summit/">Internet Summit</a>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
<aside class="section learn-more">
|
||||||
|
<h5>Want to learn more about Cloudflare?</h5>
|
||||||
|
<p><a href="https://www.cloudflare.com" class="btn btn-success">Learn more</a></p>
|
||||||
|
</aside>
|
||||||
|
|
||||||
|
<aside class="section comments">
|
||||||
|
<h3>Comments</h3>
|
||||||
|
|
||||||
|
</aside>
|
||||||
|
|
||||||
|
|
||||||
|
<div id="disqus_thread"></div>
|
||||||
|
<script type="text/javascript">
|
||||||
|
var disqus_shortname = 'cloudflare';
|
||||||
|
(function() {
|
||||||
|
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
|
||||||
|
dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js';
|
||||||
|
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
<noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript>
|
||||||
|
<a href="http://disqus.com" class="dsq-brlink">comments powered by <span class="logo-disqus">Disqus</span></a>
|
||||||
|
|
||||||
|
</article>
|
||||||
|
|
||||||
|
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');
|
||||||
|
</script>
|
||||||
|
<script>(function(d, s, id) {
|
||||||
|
var js, fjs = d.getElementsByTagName(s)[0];
|
||||||
|
if (d.getElementById(id)) return;
|
||||||
|
js = d.createElement(s); js.id = id;
|
||||||
|
js.src = "//connect.facebook.net/en_US/all.js#xfbml=1&appId=596756540369391";
|
||||||
|
fjs.parentNode.insertBefore(js, fjs);
|
||||||
|
}(document, 'script', 'facebook-jssdk'));
|
||||||
|
</script>
|
||||||
|
<script src="//platform.linkedin.com/in.js" type="text/javascript">lang: en_US</script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
(function() {
|
||||||
|
var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true;
|
||||||
|
po.src = 'https://apis.google.com/js/platform.js';
|
||||||
|
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<aside class="sidebar">
|
||||||
|
<div class="widget">
|
||||||
|
<input type="text" placeholder="Search the blog" class="st-default-search-input"></input>
|
||||||
|
<script type="text/javascript">
|
||||||
|
(function(w,d,t,u,n,s,e){w['SwiftypeObject']=n;w[n]=w[n]||function(){
|
||||||
|
(w[n].q=w[n].q||[]).push(arguments);};s=d.createElement(t);
|
||||||
|
e=d.getElementsByTagName(t)[0];s.async=0;s.src=u;e.parentNode.insertBefore(s,e);
|
||||||
|
})(window,document,'script','//s.swiftypecdn.com/install/v2/st.js','_st');
|
||||||
|
_st('install','_KobMC_zsd_tDx_7NWiX','2.0.0');
|
||||||
|
</script>
|
||||||
|
</div>
|
||||||
|
<div class="widget">
|
||||||
|
<h4 class="widget-title">Cloudflare blog</h4>
|
||||||
|
<p style="margin-top: 20px">
|
||||||
|
<a href="https://www.cloudflare.com/enterprise-service-request" class="btn btn-success" tabindex="11" target="_blank">Contact our team</a>
|
||||||
|
</p>
|
||||||
|
<p>
|
||||||
|
<strong>US callers</strong><br/>
|
||||||
|
1 (888) 99-FLARE <br/>
|
||||||
|
<strong>UK callers</strong><br/>
|
||||||
|
+44 (0)20 3514 6970<br/>
|
||||||
|
<strong>International callers</strong><br/>
|
||||||
|
+1 (650) 319-8930 <BR/><BR/>
|
||||||
|
<a href="https://www.cloudflare.com/plans" target="_blank">Full feature list and plan types</a>
|
||||||
|
</p>
|
||||||
|
<p>Cloudflare provides performance and security for any website. More than 6 million websites use Cloudflare.</p>
|
||||||
|
<p>There is no hardware or software. Cloudflare works at the DNS level. It takes only 5 minutes to sign up. To learn more, please visit our website</p>
|
||||||
|
</div>
|
||||||
|
<div class="widget">
|
||||||
|
<h4 class="widget-title">Cloudflare features</h4>
|
||||||
|
<ul class="menu menu-sidebar">
|
||||||
|
<li><a href="https://www.cloudflare.com/">Overview</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/cdn/">CDN</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/website-optimization/">Optimizer</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/security/">Security</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/analytics/">Analytics</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/apps">Apps</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/network/">Network map</a></li>
|
||||||
|
<li><a href="https://www.cloudflarestatus.com">System status</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<div id="mc_embed_signup" class="widget">
|
||||||
|
<form action="https://cloudflare.us5.list-manage.com/subscribe/post?u=d80d4d74266c0c044b0bcd7ca&id=8dc0bf9dea" method="post" id="mc-embedded-subscribe-form" name="mc-embedded-subscribe-form" class="validate" target="_blank" novalidate>
|
||||||
|
<input type="email" value="" name="EMAIL" class="width-full required email" id="mce-EMAIL" placeholder="Enter your email address"/>
|
||||||
|
<div id="mce-responses" class="clearfix">
|
||||||
|
<div class="response" id="mce-error-response" style="display:none"></div>
|
||||||
|
<div class="response" id="mce-success-response" style="display:none"></div>
|
||||||
|
</div>
|
||||||
|
<div class="clearfix">
|
||||||
|
<button type="submit" name="subscribe" id="mc-embedded-subscribe" class="btn btn-primary width-full">Sign up for email updates</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</aside>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<footer id="footer" class="footer">
|
||||||
|
<div class="wrapper">
|
||||||
|
<nav class="navigation footer-nav">
|
||||||
|
<ul role="navigation">
|
||||||
|
<li id="cf_nav_menu-2" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">What We Do</h6>
|
||||||
|
<div class="menu-what-we-do-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/plans">Plans</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/performance/">Performance</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/security/">Security</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/reliability/">Reliability</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/apps">Apps</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/network-map">Network</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-3" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">Resources</h6>
|
||||||
|
<div class="menu-support-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/support">Help Center</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/community">Community</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/video">Video Guides</a></li>
|
||||||
|
<li><a href="https://www.cloudflarestatus.com">System Status</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/contact">Contact Us</a></li>
|
||||||
|
|
||||||
|
<li class="active"><a href="/">Blog</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-4" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">Not a Developer?</h6>
|
||||||
|
<div class="menu-resources-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/case-studies">Case Studies</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/resources/">White Papers</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/internet-summit/">Internet Summit</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/hosting-partners">Partners</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/hosting-partners">Integrations</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-5" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">About Us</h6>
|
||||||
|
<div class="menu-about-us-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/people">Our Team</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/join-our-team">Careers</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/press-center">Press</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/terms">Terms of Service</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/security-policy/">Privacy & Security</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/abuse/">Trust & Safety</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-6" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">Connect</h6>
|
||||||
|
<div class="menu-connect-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="http://twitter.com/cloudflare">Twitter</a></li>
|
||||||
|
<li><a href="https://www.facebook.com/Cloudflare">Facebook</a></li>
|
||||||
|
<li><a href="https://www.linkedin.com/company/cloudflare-inc-">LinkedIn</a></li>
|
||||||
|
<li><a href="https://www.youtube.com/cloudflare-">YouTube</a></li>
|
||||||
|
<li><a href="https://plus.google.com/+cloudflare/posts">Google+</a></li>
|
||||||
|
<li><a href="/rss/">RSS</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
<div class="credits">All content © 2017 <a href="https://cloudflare.com">Cloudflare</a>. Proudly published with <a href="https://ghost.org">Ghost</a>.</div>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
var links = document.links;
|
||||||
|
|
||||||
|
for (var i = 0, linksLength = links.length; i < linksLength; i++) {
|
||||||
|
if (links[i].hostname != window.location.hostname) {
|
||||||
|
links[i].target = '_blank';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.1/prism.min.js"></script>
|
||||||
|
<script type="text/javascript" src="/assets/js/jquery.fitvids.js?v=b6cf3f99a6"></script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
$(document).ready(function(){ $(".post-content").fitVids(); });
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<script type="text/javascript">
|
||||||
|
|
||||||
|
var disqus_shortname = 'cloudflare';
|
||||||
|
(function () {
|
||||||
|
var s = document.createElement('script'); s.async = true;
|
||||||
|
s.type = 'text/javascript';
|
||||||
|
s.src = '//' + disqus_shortname + '.disqus.com/count.js';
|
||||||
|
(document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s);
|
||||||
|
}());
|
||||||
|
</script>
|
||||||
|
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -0,0 +1,502 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8" />
|
||||||
|
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
||||||
|
|
||||||
|
<title>SCOTUS Wanders into Patent Troll Fight</title>
|
||||||
|
<meta name="description" content="" />
|
||||||
|
<meta name="HandheldFriendly" content="True">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<meta name="msvalidate.01" content="CF295E1604697F9CAD18B5A232E871F6" />
|
||||||
|
|
||||||
|
<link rel="shortcut icon" href="/assets/images/favicon.ico?v=b6cf3f99a6">
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="57x57" href="/assets/images/apple-touch-icon-57x57-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="/assets/images/apple-touch-icon-72x72-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="/assets/images/apple-touch-icon-114x114-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
<link rel="apple-touch-icon-precomposed" sizes="144x144" href="/assets/images/apple-touch-icon-144x144-precomposed.png?v=b6cf3f99a6" />
|
||||||
|
|
||||||
|
<link rel="stylesheet" type="text/css" href="/assets/css/screen.css?v=b6cf3f99a6" />
|
||||||
|
<!--[if lt IE 9]><link rel="stylesheet" type="text/css" href="/assets/css/ie.css?v=b6cf3f99a6" /><![endif]-->
|
||||||
|
|
||||||
|
<!--<link href="http://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,400,700,300,600" rel="stylesheet" type="text/css">-->
|
||||||
|
|
||||||
|
<script>(function(G,o,O,g,l){G.GoogleAnalyticsObject=O;G[O]||(G[O]=function(){(G[O].q=G[O].q||[]).push(arguments)});G[O].l=+new Date;g=o.createElement('script'),l=o.scripts[0];g.src='//www.google-analytics.com/analytics.js';l.parentNode.insertBefore(g,l)}(this,document,'ga'));ga('create','UA-10218544-12', 'auto');ga('send','pageview')</script>
|
||||||
|
|
||||||
|
<link rel="canonical" href="http://blog.cloudflare.com/supreme-court-wanders-into-patent-troll-fight/" />
|
||||||
|
<meta name="referrer" content="no-referrer-when-downgrade" />
|
||||||
|
<link rel="amphtml" href="http://blog.cloudflare.com/supreme-court-wanders-into-patent-troll-fight/amp/" />
|
||||||
|
|
||||||
|
<meta property="og:site_name" content="Cloudflare Blog" />
|
||||||
|
<meta property="og:type" content="article" />
|
||||||
|
<meta property="og:title" content="SCOTUS Wanders into Patent Troll Fight" />
|
||||||
|
<meta property="og:description" content="Next Monday, the US Supreme Court will hear oral arguments in Oil States Energy Services, LLC vs. Greene’s Energy Group, LLC, which is a case to determine whether the Inter Partes Review (IPR) administrative process at the US Patent and Trademark Office (USPTO) used to determine the validity of" />
|
||||||
|
<meta property="og:url" content="http://blog.cloudflare.com/supreme-court-wanders-into-patent-troll-fight/" />
|
||||||
|
<meta property="og:image" content="http://blog.cloudflare.com/content/images/2017/11/Thomas_Rowlandson_-_The_Privy_Council_of_a_King_-_Google_Art_Project--1-.jpg" />
|
||||||
|
<meta property="article:published_time" content="2017-11-20T18:18:00.000Z" />
|
||||||
|
<meta property="article:modified_time" content="2017-11-20T22:51:13.000Z" />
|
||||||
|
<meta property="article:tag" content="Legal" />
|
||||||
|
<meta property="article:tag" content="Jengo" />
|
||||||
|
<meta property="article:tag" content="Patents" />
|
||||||
|
|
||||||
|
<meta property="article:publisher" content="https://www.facebook.com/Cloudflare" />
|
||||||
|
<meta name="twitter:card" content="summary_large_image" />
|
||||||
|
<meta name="twitter:title" content="SCOTUS Wanders into Patent Troll Fight" />
|
||||||
|
<meta name="twitter:description" content="Next Monday, the US Supreme Court will hear oral arguments in Oil States Energy Services, LLC vs. Greene’s Energy Group, LLC, which is a case to determine whether the Inter Partes Review (IPR) administrative process at the US Patent and Trademark Office (USPTO) used to determine the validity of" />
|
||||||
|
<meta name="twitter:url" content="http://blog.cloudflare.com/supreme-court-wanders-into-patent-troll-fight/" />
|
||||||
|
<meta name="twitter:image" content="http://blog.cloudflare.com/content/images/2017/11/Thomas_Rowlandson_-_The_Privy_Council_of_a_King_-_Google_Art_Project--1-.jpg" />
|
||||||
|
<meta name="twitter:label1" content="Written by" />
|
||||||
|
<meta name="twitter:data1" content="Edo Royker" />
|
||||||
|
<meta name="twitter:label2" content="Filed under" />
|
||||||
|
<meta name="twitter:data2" content="Legal, Jengo, Patents" />
|
||||||
|
<meta name="twitter:site" content="@cloudflare" />
|
||||||
|
<meta property="og:image:width" content="4468" />
|
||||||
|
<meta property="og:image:height" content="3183" />
|
||||||
|
|
||||||
|
<script type="application/ld+json">
|
||||||
|
{
|
||||||
|
"@context": "https://schema.org",
|
||||||
|
"@type": "Article",
|
||||||
|
"publisher": {
|
||||||
|
"@type": "Organization",
|
||||||
|
"name": "Cloudflare Blog",
|
||||||
|
"logo": {
|
||||||
|
"@type": "ImageObject",
|
||||||
|
"url": "http://blog.cloudflare.com/content/images/2016/09/logo-for-blog_thumb.png",
|
||||||
|
"width": 189,
|
||||||
|
"height": 47
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"author": {
|
||||||
|
"@type": "Person",
|
||||||
|
"name": "Edo Royker",
|
||||||
|
"image": {
|
||||||
|
"@type": "ImageObject",
|
||||||
|
"url": "http://blog.cloudflare.com/content/images/2017/11/AAEAAQAAAAAAAAdiAAAAJDdiMzU0OWYxLTBmOTMtNGZhZi1hNDQ1LTBhNjJhZDdmMGRlZA.jpg",
|
||||||
|
"width": 200,
|
||||||
|
"height": 200
|
||||||
|
},
|
||||||
|
"url": "http://blog.cloudflare.com/author/edo-royker/",
|
||||||
|
"sameAs": []
|
||||||
|
},
|
||||||
|
"headline": "SCOTUS Wanders into Patent Troll Fight",
|
||||||
|
"url": "https://blog.cloudflare.com/supreme-court-wanders-into-patent-troll-fight/",
|
||||||
|
"datePublished": "2017-11-20T18:18:00.000Z",
|
||||||
|
"dateModified": "2017-11-20T22:51:13.000Z",
|
||||||
|
"image": {
|
||||||
|
"@type": "ImageObject",
|
||||||
|
"url": "http://blog.cloudflare.com/content/images/2017/11/Thomas_Rowlandson_-_The_Privy_Council_of_a_King_-_Google_Art_Project--1-.jpg",
|
||||||
|
"width": 4468,
|
||||||
|
"height": 3183
|
||||||
|
},
|
||||||
|
"keywords": "Legal, Jengo, Patents",
|
||||||
|
"description": "Next Monday, the US Supreme Court will hear oral arguments in Oil States Energy Services, LLC vs. Greene’s Energy Group, LLC, which is a case to determine whether the Inter Partes Review (IPR) administrative process at the US Patent and Trademark Office (USPTO) used to determine the validity of",
|
||||||
|
"mainEntityOfPage": {
|
||||||
|
"@type": "WebPage",
|
||||||
|
"@id": "http://blog.cloudflare.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<script type="text/javascript" src="/shared/ghost-url.min.js?v=b6cf3f99a6"></script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
ghost.init({
|
||||||
|
clientId: "ghost-frontend",
|
||||||
|
clientSecret: "cf0df60d1ab4"
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
<meta name="generator" content="Ghost 0.11" />
|
||||||
|
<link rel="alternate" type="application/rss+xml" title="Cloudflare Blog" href="https://blog.cloudflare.com/rss/" />
|
||||||
|
<meta name="msvalidate.01" content="CF295E1604697F9CAD18B5A232E871F6" />
|
||||||
|
<meta class="swiftype" name="language" data-type="string" content="en" />
|
||||||
|
<script src="https://s3-us-west-1.amazonaws.com/cf-ghost-assets-hotfix/js/index.js"></script>
|
||||||
|
<script type="text/javascript" src="//cdn.bizible.com/scripts/bizible.js" async=""></script>
|
||||||
|
<script>
|
||||||
|
var trackRecruitingLink = function(role, url) {
|
||||||
|
ga('send', 'event', 'recruiting', 'jobscore-click', role, {
|
||||||
|
'transport': 'beacon',
|
||||||
|
'hitCallback': function(){document.location = url;}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
(function() {
|
||||||
|
var didInit = false;
|
||||||
|
function initMunchkin() {
|
||||||
|
if(didInit === false) {
|
||||||
|
didInit = true;
|
||||||
|
Munchkin.init('713-XSC-918');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var s = document.createElement('script');
|
||||||
|
s.type = 'text/javascript';
|
||||||
|
s.async = true;
|
||||||
|
s.src = '//munchkin.marketo.net/munchkin.js';
|
||||||
|
s.onreadystatechange = function() {
|
||||||
|
if (this.readyState == 'complete' || this.readyState == 'loaded') {
|
||||||
|
initMunchkin();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
s.onload = initMunchkin;
|
||||||
|
document.getElementsByTagName('head')[0].appendChild(s);
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
<script>
|
||||||
|
var HTMLAttrToAdd = document.querySelector("html");
|
||||||
|
HTMLAttrToAdd.setAttribute("lang", "en");
|
||||||
|
</script>
|
||||||
|
<style>
|
||||||
|
table {
|
||||||
|
background-color: transparent;
|
||||||
|
}
|
||||||
|
td {
|
||||||
|
padding: 5px 1em;
|
||||||
|
}
|
||||||
|
pre {
|
||||||
|
max-height: 500px;
|
||||||
|
overflow-y: scroll;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
<link href="https://s3-us-west-1.amazonaws.com/cf-ghost-assets-hotfix/css/screen.css" rel="stylesheet">
|
||||||
|
<link href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.1/themes/prism.min.css" rel="stylesheet">
|
||||||
|
|
||||||
|
<style>
|
||||||
|
.st-default-search-input {
|
||||||
|
font-family: Helvetica, Arial, "Lucida Grande", sans-serif;
|
||||||
|
font-size: 14px;
|
||||||
|
line-height: 16px;
|
||||||
|
font-weight: 400;
|
||||||
|
-moz-transition: opacity 0.2s;
|
||||||
|
-o-transition: opacity 0.2s;
|
||||||
|
-webkit-transition: opacity 0.2s;
|
||||||
|
transition: opacity 0.2s;
|
||||||
|
display: inline-block;
|
||||||
|
width: 190px;
|
||||||
|
height: 16px;
|
||||||
|
padding: 7px 11px 7px 28px;
|
||||||
|
border: 1px solid rgba(0, 0, 0, 0.25);
|
||||||
|
color: #444;
|
||||||
|
-moz-box-sizing: content-box;
|
||||||
|
box-sizing: content-box;
|
||||||
|
-moz-border-radius: 5px;
|
||||||
|
-webkit-border-radius: 5px;
|
||||||
|
border-radius: 5px;
|
||||||
|
background: #fff 8px 8px no-repeat url("data:image/png;base64, iVBORw0KGgoAAAANSUhEUgAAAA0AAAANCAYAAABy6%2BR8AAAACXBIWXMAAAsTAAALEwEAmpwYAAAAIGNIUk0AAG11AABzoAAA%2FN0AAINkAABw6AAA7GgAADA%2BAAAQkOTsmeoAAAESSURBVHjajNCxS9VRGMbxz71E4OwgoXPQxVEpXCI47%2BZqGP0LCoJO7UVD3QZzb3SwcHB7F3Uw3Zpd%2FAPCcJKG7Dj4u%2FK7Pwp94HDg5Xyf5z1Pr9YKImKANTzFXxzjU2ae6qhXaxURr%2FAFl9hHDy%2FwEK8z89sYVEp5gh84wMvMvGiSJ%2FEV85jNzLMR1McqfmN5BEBmnmMJFSvtpH7jdJiZv7q7Z%2BZPfMdcF6rN%2FT%2F1m2LGBkd4HhFT3dcRMY2FpskxaLNpayciHrWAGeziD7b%2BVfkithuTk8bkGa4wgWFmbrSTZOYeBvjc%2BucQj%2FEe6xHx4Taq1nrnKaW8K6XUUsrHWuvNevdRRLzFGwzvDbXAB9cDAHvhedDruuxSAAAAAElFTkSuQmCC")
|
||||||
|
}
|
||||||
|
|
||||||
|
.st-ui-close-button {
|
||||||
|
-moz-transition: none;
|
||||||
|
-o-transition: none;
|
||||||
|
-webkit-transition: none;
|
||||||
|
transition: none
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body class="post-template tag-legal tag-jengo tag-patents">
|
||||||
|
<div id="fb-root"></div>
|
||||||
|
<header id="header" class="header">
|
||||||
|
<div class="wrapper">
|
||||||
|
<a href="https://www.cloudflare.com" class="logo logo-header">Cloudflare</a>
|
||||||
|
<nav id="main-menu" class="header-navigation navigation" role="navigation">
|
||||||
|
<ul class="menu menu-header">
|
||||||
|
<li><a href="https://blog.cloudflare.com/">Blog home</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/overview" tabindex="1">What we do</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/support" tabindex="9">Support</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/community" tabindex="9">Community</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/login" tabindex="10">Login</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/sign-up" class="btn btn-success" tabindex="11">Sign up</a></li>
|
||||||
|
</ul>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<div class="wrapper reverse-sidebar">
|
||||||
|
<section class="primary-content" role="main">
|
||||||
|
|
||||||
|
<article class="post tag-legal tag-jengo tag-patents">
|
||||||
|
|
||||||
|
|
||||||
|
<header class="post-header">
|
||||||
|
<h1 class="title">The Supreme Court Wanders into the Patent Troll Fight</h1>
|
||||||
|
<div class="meta">
|
||||||
|
<time class="meta-date" datetime="2017-11-20">20 Nov 2017</time>
|
||||||
|
by <a href="/author/edo-royker/">Edo Royker</a>.
|
||||||
|
</div>
|
||||||
|
<div class="social">
|
||||||
|
<div class="g-plusone" data-size="medium" data-href="https://blog.cloudflare.com/supreme-court-wanders-into-patent-troll-fight/"></div>
|
||||||
|
<script type="IN/Share" data-url="https://blog.cloudflare.com/supreme-court-wanders-into-patent-troll-fight/" data-counter="right"></script>
|
||||||
|
<div class="fb-like" data-href="https://blog.cloudflare.com/supreme-court-wanders-into-patent-troll-fight/" data-layout="button_count" data-action="like" data-show-faces="false" data-share="false"></div>
|
||||||
|
<a href="https://twitter.com/share" class="twitter-share-button" data-url="https://blog.cloudflare.com/supreme-court-wanders-into-patent-troll-fight/" data-text="The Supreme Court Wanders into the Patent Troll Fight" data-via="cloudflare" data-related="cloudflare">Tweet</a>
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<div class="post-content">
|
||||||
|
<p>Next Monday, the US Supreme Court will hear oral arguments in <em>Oil States Energy Services, LLC vs. Greene’s Energy Group, LLC</em>, which is a case to determine whether the Inter Partes Review (IPR) administrative process at the US Patent and Trademark Office (USPTO) used to determine the validity of patents is constitutional. </p>
|
||||||
|
|
||||||
|
<p>The constitutionality of the IPR process is one of the biggest legal issues facing innovative technology companies, as the availability of this process has greatly reduced the anticipated costs, and thereby lessened the threat, of patent troll litigation. As we discuss in this blog post, it is ironic that the outcome of a case that is of such great importance to the technology community today may hinge on what courts in Britain were and were not doing more than 200 years ago.</p>
|
||||||
|
|
||||||
|
<p><img src="/content/images/2017/11/Thomas_Rowlandson_-_The_Privy_Council_of_a_King_-_Google_Art_Project.jpg" alt="" title="" /><small>Thomas Rowlandson [Public domain], via <a href="https://commons.wikimedia.org/wiki/File%3AThomas_Rowlandson_-_The_Privy_Council_of_a_King_-_Google_Art_Project.jpg">Wikimedia Commons</a></small></p>
|
||||||
|
|
||||||
|
<p>As we have discussed in prior <a href="https://blog.cloudflare.com/project-jengo-challenges/">blog posts</a>, the stakes are high: if the Supreme Court finds IPR unconstitutional, then the entire system of administrative review by the USPTO — including IPR and ex parte processes — will be shuttered. This would be a mistake, as administrative recourse at the USPTO is one of the few ways to avoid the considerable costs and delays of federal court litigation, which can take years and run into the millions of dollars. Those heavy costs are often leveraged by patent trolls when they threaten litigation in the effort to procure easy and lucrative settlements from their targets. </p>
|
||||||
|
|
||||||
|
<h3 id="cloudflareispursuingourfightagainstpatenttrollsallthewaytothestepsofthesupremecourt">Cloudflare is Pursuing Our Fight Against Patent Trolls All the Way to the Steps of the Supreme Court</h3>
|
||||||
|
|
||||||
|
<p>Cloudflare joined Dell, Facebook, and a number of other companies, all practicing entities with large patent portfolios, in a <em>brief amici curiae</em> (or ‘friend of the court’ brief) in support of the IPR process, because it has a substantial positive impact on technological innovation in the United States. Amicus briefs allow parties who are interested in the outcome of a case, but are not parties to the immediate dispute before the court, to have input into the court’s deliberations. </p>
|
||||||
|
|
||||||
|
<p>As many of you are aware, we were sued by Blackbird Technologies, a notorious patent troll, earlier this year for patent infringement, and initiated <a href="https://blog.cloudflare.com/project-jengo/">Project Jengo</a> to crowd source prior art searches and invalidate Blackbird’s patents. One of our strategies for quickly and efficiently invalidating Blackbird’s patents is to take advantage of the IPR process at the USPTO, which can be completed in about half the time and at one tenth of the cost of a federal court case, and to initiate ex parte proceedings against Blackbird’s other patents that are overly broad and invalid. </p>
|
||||||
|
|
||||||
|
<p>A full copy of the Amicus Brief we joined in the Oil States case is <a href="http://www.scotusblog.com/wp-content/uploads/2017/11/16-712-bsac-Dell.pdf">available here</a>, and a summary of the argument follows. </p>
|
||||||
|
|
||||||
|
<h3 id="oilstatesmakesitscase">Oil States Makes its Case</h3>
|
||||||
|
|
||||||
|
<p>Oil States is an oilfield services and drilling equipment manufacturing company. The USPTO invalidated one of its patents related to oil drilling technology in an IPR proceeding while Oil States had a lawsuit pending against one of its competitors claiming infringement of its patent. After it lost the IPR, Oil States lost an appeal in a lower federal court based on the findings of the IPR proceeding. The Supreme Court agreed to hear the case to determine whether once the USPTO issues a patent, an inventor has a constitutionally protected property right that — under <a href="http://www.heritage.org/constitution/#!/articles/3">Article III</a> of the U.S. Constitution (which outlines the powers of the judicial branch of the government), and the <a href="https://constitutioncenter.org/interactive-constitution/amendments/amendment-vii">7th Amendment</a> (which addresses the right to a jury trial in certain types of cases) — cannot be revoked without intervention by the court system. </p>
|
||||||
|
|
||||||
|
<p><img src="/content/images/2017/11/2770193028_68edc662a9_b.jpg" alt="" title="" /><small><a href="https://www.flickr.com/photos/paul_lowry/2770193028">Image</a> by <a href="https://creativecommons.org/licenses/by/2.0/">Paul Lowry</a></small></p>
|
||||||
|
|
||||||
|
<p>As the patent owner, Oil States argues that the IPR process violates the relevant provisions of the constitution by allowing an administrative body, the Patent Trial and Appeal Board (PTAB)--a non-judicial forum, to decide a matter which was historically handled by the judiciary. This argument rests upon the premise that there was a historical analogue to cancellation of patent claims available in the judiciary. Since cancellation of patent claims was historically available in the judiciary, the cancellation of patent claims today must be consistent with that history and done exclusively by courts. </p>
|
||||||
|
|
||||||
|
<p>This argument is flawed on multiple counts, which are set forth in the “friend of the court” brief we joined.</p>
|
||||||
|
|
||||||
|
<h4 id="firstflawanadministrativeprocessevenanoriginalistcanlove">First Flaw: An Administrative Process Even an Originalist Can Love</h4>
|
||||||
|
|
||||||
|
<p>As the amicus brief we joined points out, patent revocation did not historically rest within the <em>exclusive</em> province of the common law and chancery courts, the historical equivalents in Britain to the judiciary in the United States. Rather, prior to the Founding of the United States, patent revocation rested entirely with the Crown of England’s Privy Council, a non-judicial body comprising of advisors to the king or queen of England. It wasn’t until later that the Privy Council granted the chancery court (the judiciary branch) concurrent authority to revoke patents. Because a non-judicial body had the authority to revoke patents when the US Constitution was framed, the general principles of separation of powers and the right to trial in the Constitution do not require that patentability challenges be decided solely by courts. </p>
|
||||||
|
|
||||||
|
<h4 id="secondflawthejudicialrolewaslimited">Second Flaw: The Judicial Role was Limited</h4>
|
||||||
|
|
||||||
|
<p>Not only did British courts share the power to address patent rights historically, the part shared by the the courts was significantly limited. Historically, the common-law and chancery courts only received a partial delegation of the Privy Council’s authority to invalidate patents. Courts only had the authority to invalidate patents for issues related to things like inequitable conduct (e.g., making false statements in the original patent application). The limited authority delegated to the England Courts did not include the authority to seek claim <em>cancellation</em> based on elements intrinsic to the patent or patent application, like lack of novelty or obviousness as done under an IPR proceeding. Rather, such authority remained with the Privy Council, a non-court authority, which decided questions like whether the invention was really new. Thus, like the PTAB, the Privy Council was a non-judicial body charged with responsibility to assess patent validity based on criteria that included the novelty of the invention.</p>
|
||||||
|
|
||||||
|
<p>We think these arguments are compelling and provide very strong reasons why the Supreme Court should resist the request that such matters be resolved exclusively in federal courts. We hope that’s the position they do take because the real world implications are significant. </p>
|
||||||
|
|
||||||
|
<h3 id="dontmesswithagoodthing">Don’t Mess with a Good Thing</h3>
|
||||||
|
|
||||||
|
<p>The IPR process is not only consistent with the US Constitution, but it also advances the Patent Clause’s objective of promoting the progress of science and useful arts. That is, the “quid pro quo of the patent system; the public must receive meaningful disclosure in exchange for being excluded from practicing the invention for a limited period of time” by patent rights. (<a href="http://caselaw.findlaw.com/us-federal-circuit/1330083.html">Enzo Biochem, Inc. v. Gen-probe Inc.</a>) Congress created the IPR process in the America Invents Act in 2011 to use administrative review to weed out poor-quality patents that did not satisfy this quid pro quo because they had not actually disclosed very much. Congress sought to provide quick and cost effective administrative procedures for challenging the validity of patent claims that did not disclose novel inventions, or that claimed to disclose substantially more innovation than they actually did, to improve patent quality and restore confidence in the presumption of validity. In other words, Congress created a system to specifically permit the efficient challenge of the zealous assertion of vague and overly broad patents. </p>
|
||||||
|
|
||||||
|
<p>As a recent study by the Congressional Research Service found, non-practicing entity (i.e., patent troll) patent litigation “activity cost defendants and licensees $29 billion in 2011, a 400 percent increase over $7 billion in 2005” and “the losses are mostly deadweight, with less than 25 percent flowing to innovation and at least that much going towards legal fees.” (<em>see</em> <a href="https://fas.org/sgp/crs/misc/R42668.pdf">Brian T. Yeh, Cong. Research sERV., R42668</a>) The IPR process enables innovative companies to navigate patent troll activity in an efficient manner and devote a greater proportion of their resources to research and development, rather than litigation or cost-of-litigation settlement fees for invalid patents. </p>
|
||||||
|
|
||||||
|
<p><img src="/content/images/2017/11/Troll-slip.jpg" alt="" title="" /><small>By EFF-Graphics (<a href="http://creativecommons.org/licenses/by/3.0/us/deed.en">Own work</a>), via <a href="https://commons.wikimedia.org/wiki/File%3ATroll-slip.jpg">Wikimedia Commons</a></small></p>
|
||||||
|
|
||||||
|
<p>Additionally, the IPR process reduces the total number and associated costs of patent disputes in a number of ways.</p>
|
||||||
|
|
||||||
|
<ul>
|
||||||
|
<li><p>Patent owners, especially patent trolls, are less likely to threaten litigation or file an infringement suit based on patent claims that they know or suspect to be invalid. In fact, patent owners who threaten or file suit merely to seek cost-of-litigation settlements have become far less prevalent because of the availability of the IPR process to reduce the cost of litigation.</p></li>
|
||||||
|
<li><p>Patent owners are less likely to initiate litigation out of concerns that the IPR proceedings may culminate in PTAB’s cancellation of all patent claims asserted in the infringement suit.</p></li>
|
||||||
|
<li><p>Where the PTAB does not cancel all asserted claims, statutory estoppel and the PTAB’s claim construction may serve to narrow the infringement issues to be resolved by the district court.</p></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<p>Our hope is that the US Supreme Court justices take into full consideration the larger community of innovative companies that are helped by the IPR system in battling patent trolls, and do not limit their consideration to the implications on the parties to <em>Oil States</em> (neither of which is a non-practicing entity). As we have explained, not only does the IPR process enable innovative companies to focus their resources on technological innovation, instead of legal fees, but allowing the USPTO to administer IPR and ex parte proceedings is entirely consistent with the US Constitution.</p>
|
||||||
|
|
||||||
|
<p>While we await a decision in <em>Oil States</em>, expect to see Cloudflare initiate IPR and ex parte proceedings against Blackbird Technologies patents in the coming months. </p>
|
||||||
|
|
||||||
|
<p>We will make sure to keep you updated. </p>
|
||||||
|
</div>
|
||||||
|
<footer>
|
||||||
|
<small>
|
||||||
|
Tagged with <a href="/tag/legal/">Legal</a>, <a href="/tag/jengo/">Jengo</a>, <a href="/tag/patents/">Patents</a>
|
||||||
|
</small>
|
||||||
|
</footer>
|
||||||
|
<aside class="section learn-more">
|
||||||
|
<h5>Want to learn more about Cloudflare?</h5>
|
||||||
|
<p><a href="https://www.cloudflare.com" class="btn btn-success">Learn more</a></p>
|
||||||
|
</aside>
|
||||||
|
|
||||||
|
<aside class="section comments">
|
||||||
|
<h3>Comments</h3>
|
||||||
|
|
||||||
|
</aside>
|
||||||
|
|
||||||
|
|
||||||
|
<div id="disqus_thread"></div>
|
||||||
|
<script type="text/javascript">
|
||||||
|
var disqus_shortname = 'cloudflare';
|
||||||
|
(function() {
|
||||||
|
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
|
||||||
|
dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js';
|
||||||
|
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
<noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript>
|
||||||
|
<a href="http://disqus.com" class="dsq-brlink">comments powered by <span class="logo-disqus">Disqus</span></a>
|
||||||
|
|
||||||
|
</article>
|
||||||
|
|
||||||
|
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');
|
||||||
|
</script>
|
||||||
|
<script>(function(d, s, id) {
|
||||||
|
var js, fjs = d.getElementsByTagName(s)[0];
|
||||||
|
if (d.getElementById(id)) return;
|
||||||
|
js = d.createElement(s); js.id = id;
|
||||||
|
js.src = "//connect.facebook.net/en_US/all.js#xfbml=1&appId=596756540369391";
|
||||||
|
fjs.parentNode.insertBefore(js, fjs);
|
||||||
|
}(document, 'script', 'facebook-jssdk'));
|
||||||
|
</script>
|
||||||
|
<script src="//platform.linkedin.com/in.js" type="text/javascript">lang: en_US</script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
(function() {
|
||||||
|
var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true;
|
||||||
|
po.src = 'https://apis.google.com/js/platform.js';
|
||||||
|
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<aside class="sidebar">
|
||||||
|
<div class="widget">
|
||||||
|
<input type="text" placeholder="Search the blog" class="st-default-search-input"></input>
|
||||||
|
<script type="text/javascript">
|
||||||
|
(function(w,d,t,u,n,s,e){w['SwiftypeObject']=n;w[n]=w[n]||function(){
|
||||||
|
(w[n].q=w[n].q||[]).push(arguments);};s=d.createElement(t);
|
||||||
|
e=d.getElementsByTagName(t)[0];s.async=0;s.src=u;e.parentNode.insertBefore(s,e);
|
||||||
|
})(window,document,'script','//s.swiftypecdn.com/install/v2/st.js','_st');
|
||||||
|
_st('install','_KobMC_zsd_tDx_7NWiX','2.0.0');
|
||||||
|
</script>
|
||||||
|
</div>
|
||||||
|
<div class="widget">
|
||||||
|
<h4 class="widget-title">Cloudflare blog</h4>
|
||||||
|
<p style="margin-top: 20px">
|
||||||
|
<a href="https://www.cloudflare.com/enterprise-service-request" class="btn btn-success" tabindex="11" target="_blank">Contact our team</a>
|
||||||
|
</p>
|
||||||
|
<p>
|
||||||
|
<strong>US callers</strong><br/>
|
||||||
|
1 (888) 99-FLARE <br/>
|
||||||
|
<strong>UK callers</strong><br/>
|
||||||
|
+44 (0)20 3514 6970<br/>
|
||||||
|
<strong>International callers</strong><br/>
|
||||||
|
+1 (650) 319-8930 <BR/><BR/>
|
||||||
|
<a href="https://www.cloudflare.com/plans" target="_blank">Full feature list and plan types</a>
|
||||||
|
</p>
|
||||||
|
<p>Cloudflare provides performance and security for any website. More than 6 million websites use Cloudflare.</p>
|
||||||
|
<p>There is no hardware or software. Cloudflare works at the DNS level. It takes only 5 minutes to sign up. To learn more, please visit our website</p>
|
||||||
|
</div>
|
||||||
|
<div class="widget">
|
||||||
|
<h4 class="widget-title">Cloudflare features</h4>
|
||||||
|
<ul class="menu menu-sidebar">
|
||||||
|
<li><a href="https://www.cloudflare.com/">Overview</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/cdn/">CDN</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/website-optimization/">Optimizer</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/security/">Security</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/analytics/">Analytics</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/apps">Apps</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/network/">Network map</a></li>
|
||||||
|
<li><a href="https://www.cloudflarestatus.com">System status</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<div id="mc_embed_signup" class="widget">
|
||||||
|
<form action="https://cloudflare.us5.list-manage.com/subscribe/post?u=d80d4d74266c0c044b0bcd7ca&id=8dc0bf9dea" method="post" id="mc-embedded-subscribe-form" name="mc-embedded-subscribe-form" class="validate" target="_blank" novalidate>
|
||||||
|
<input type="email" value="" name="EMAIL" class="width-full required email" id="mce-EMAIL" placeholder="Enter your email address"/>
|
||||||
|
<div id="mce-responses" class="clearfix">
|
||||||
|
<div class="response" id="mce-error-response" style="display:none"></div>
|
||||||
|
<div class="response" id="mce-success-response" style="display:none"></div>
|
||||||
|
</div>
|
||||||
|
<div class="clearfix">
|
||||||
|
<button type="submit" name="subscribe" id="mc-embedded-subscribe" class="btn btn-primary width-full">Sign up for email updates</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</aside>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<footer id="footer" class="footer">
|
||||||
|
<div class="wrapper">
|
||||||
|
<nav class="navigation footer-nav">
|
||||||
|
<ul role="navigation">
|
||||||
|
<li id="cf_nav_menu-2" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">What We Do</h6>
|
||||||
|
<div class="menu-what-we-do-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/plans">Plans</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/performance/">Performance</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/security/">Security</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/reliability/">Reliability</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/apps">Apps</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/network-map">Network</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-3" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">Resources</h6>
|
||||||
|
<div class="menu-support-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/support">Help Center</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/community">Community</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/video">Video Guides</a></li>
|
||||||
|
<li><a href="https://www.cloudflarestatus.com">System Status</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/contact">Contact Us</a></li>
|
||||||
|
|
||||||
|
<li class="active"><a href="/">Blog</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-4" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">Not a Developer?</h6>
|
||||||
|
<div class="menu-resources-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/case-studies">Case Studies</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/resources/">White Papers</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/internet-summit/">Internet Summit</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/hosting-partners">Partners</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/hosting-partners">Integrations</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-5" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">About Us</h6>
|
||||||
|
<div class="menu-about-us-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="https://www.cloudflare.com/people">Our Team</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/join-our-team">Careers</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/press-center">Press</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/terms">Terms of Service</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/security-policy/">Privacy & Security</a></li>
|
||||||
|
<li><a href="https://www.cloudflare.com/abuse/">Trust & Safety</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
<li id="cf_nav_menu-6" class="footer-column widget_cf_nav_menu">
|
||||||
|
<h6 class="widget-title">Connect</h6>
|
||||||
|
<div class="menu-connect-container">
|
||||||
|
<ul class="menu menu-footer">
|
||||||
|
<li><a href="http://twitter.com/cloudflare">Twitter</a></li>
|
||||||
|
<li><a href="https://www.facebook.com/Cloudflare">Facebook</a></li>
|
||||||
|
<li><a href="https://www.linkedin.com/company/cloudflare-inc-">LinkedIn</a></li>
|
||||||
|
<li><a href="https://www.youtube.com/cloudflare-">YouTube</a></li>
|
||||||
|
<li><a href="https://plus.google.com/+cloudflare/posts">Google+</a></li>
|
||||||
|
<li><a href="/rss/">RSS</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
<div class="credits">All content © 2017 <a href="https://cloudflare.com">Cloudflare</a>. Proudly published with <a href="https://ghost.org">Ghost</a>.</div>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
var links = document.links;
|
||||||
|
|
||||||
|
for (var i = 0, linksLength = links.length; i < linksLength; i++) {
|
||||||
|
if (links[i].hostname != window.location.hostname) {
|
||||||
|
links[i].target = '_blank';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/prism/1.8.1/prism.min.js"></script>
|
||||||
|
<script type="text/javascript" src="/assets/js/jquery.fitvids.js?v=b6cf3f99a6"></script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
$(document).ready(function(){ $(".post-content").fitVids(); });
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<script type="text/javascript">
|
||||||
|
|
||||||
|
var disqus_shortname = 'cloudflare';
|
||||||
|
(function () {
|
||||||
|
var s = document.createElement('script'); s.async = true;
|
||||||
|
s.type = 'text/javascript';
|
||||||
|
s.src = '//' + disqus_shortname + '.disqus.com/count.js';
|
||||||
|
(document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s);
|
||||||
|
}());
|
||||||
|
</script>
|
||||||
|
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -0,0 +1,74 @@
|
||||||
|
/*global jQuery */
|
||||||
|
/*jshint multistr:true browser:true */
|
||||||
|
/*!
|
||||||
|
* FitVids 1.0.3
|
||||||
|
*
|
||||||
|
* Copyright 2013, Chris Coyier - http://css-tricks.com + Dave Rupert - http://daverupert.com
|
||||||
|
* Credit to Thierry Koblentz - http://www.alistapart.com/articles/creating-intrinsic-ratios-for-video/
|
||||||
|
* Released under the WTFPL license - http://sam.zoy.org/wtfpl/
|
||||||
|
*
|
||||||
|
* Date: Thu Sept 01 18:00:00 2011 -0500
|
||||||
|
*/
|
||||||
|
|
||||||
|
(function( $ ){
|
||||||
|
|
||||||
|
"use strict";
|
||||||
|
|
||||||
|
$.fn.fitVids = function( options ) {
|
||||||
|
var settings = {
|
||||||
|
customSelector: null
|
||||||
|
};
|
||||||
|
|
||||||
|
if(!document.getElementById('fit-vids-style')) {
|
||||||
|
|
||||||
|
var div = document.createElement('div'),
|
||||||
|
ref = document.getElementsByTagName('base')[0] || document.getElementsByTagName('script')[0],
|
||||||
|
cssStyles = '­<style>.fluid-width-video-wrapper{width:100%;position:relative;padding:0;}.fluid-width-video-wrapper iframe,.fluid-width-video-wrapper object,.fluid-width-video-wrapper embed {position:absolute;top:0;left:0;width:100%;height:100%;}</style>';
|
||||||
|
|
||||||
|
div.className = 'fit-vids-style';
|
||||||
|
div.id = 'fit-vids-style';
|
||||||
|
div.style.display = 'none';
|
||||||
|
div.innerHTML = cssStyles;
|
||||||
|
|
||||||
|
ref.parentNode.insertBefore(div,ref);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( options ) {
|
||||||
|
$.extend( settings, options );
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.each(function(){
|
||||||
|
var selectors = [
|
||||||
|
"iframe[src*='player.vimeo.com']",
|
||||||
|
"iframe[src*='youtube.com']",
|
||||||
|
"iframe[src*='youtube-nocookie.com']",
|
||||||
|
"iframe[src*='kickstarter.com'][src*='video.html']",
|
||||||
|
"object",
|
||||||
|
"embed"
|
||||||
|
];
|
||||||
|
|
||||||
|
if (settings.customSelector) {
|
||||||
|
selectors.push(settings.customSelector);
|
||||||
|
}
|
||||||
|
|
||||||
|
var $allVideos = $(this).find(selectors.join(','));
|
||||||
|
$allVideos = $allVideos.not("object object"); // SwfObj conflict patch
|
||||||
|
|
||||||
|
$allVideos.each(function(){
|
||||||
|
var $this = $(this);
|
||||||
|
if (this.tagName.toLowerCase() === 'embed' && $this.parent('object').length || $this.parent('.fluid-width-video-wrapper').length) { return; }
|
||||||
|
var height = ( this.tagName.toLowerCase() === 'object' || ($this.attr('height') && !isNaN(parseInt($this.attr('height'), 10))) ) ? parseInt($this.attr('height'), 10) : $this.height(),
|
||||||
|
width = !isNaN(parseInt($this.attr('width'), 10)) ? parseInt($this.attr('width'), 10) : $this.width(),
|
||||||
|
aspectRatio = height / width;
|
||||||
|
if(!$this.attr('id')){
|
||||||
|
var videoID = 'fitvid' + Math.floor(Math.random()*999999);
|
||||||
|
$this.attr('id', videoID);
|
||||||
|
}
|
||||||
|
$this.wrap('<div class="fluid-width-video-wrapper"></div>').parent('.fluid-width-video-wrapper').css('padding-top', (aspectRatio * 100)+"%");
|
||||||
|
$this.removeAttr('height').removeAttr('width');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
};
|
||||||
|
// Works with either jQuery or Zepto
|
||||||
|
})( window.jQuery || window.Zepto );
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,67 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SharedBuffer struct {
|
||||||
|
cond *sync.Cond
|
||||||
|
buffer bytes.Buffer
|
||||||
|
eof bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSharedBuffer() *SharedBuffer {
|
||||||
|
return &SharedBuffer{
|
||||||
|
cond: sync.NewCond(&sync.Mutex{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SharedBuffer) Read(p []byte) (n int, err error) {
|
||||||
|
totalRead := 0
|
||||||
|
s.cond.L.Lock()
|
||||||
|
for totalRead == 0 {
|
||||||
|
n, err = s.buffer.Read(p[totalRead:])
|
||||||
|
totalRead += n
|
||||||
|
if err == io.EOF {
|
||||||
|
if s.eof {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
err = nil
|
||||||
|
if n > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.cond.Wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.cond.L.Unlock()
|
||||||
|
return totalRead, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SharedBuffer) Write(p []byte) (n int, err error) {
|
||||||
|
s.cond.L.Lock()
|
||||||
|
defer s.cond.L.Unlock()
|
||||||
|
if s.eof {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n, err = s.buffer.Write(p)
|
||||||
|
s.cond.Signal()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SharedBuffer) Close() error {
|
||||||
|
s.cond.L.Lock()
|
||||||
|
defer s.cond.L.Unlock()
|
||||||
|
if !s.eof {
|
||||||
|
s.eof = true
|
||||||
|
s.cond.Signal()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SharedBuffer) Closed() bool {
|
||||||
|
s.cond.L.Lock()
|
||||||
|
defer s.cond.L.Unlock()
|
||||||
|
return s.eof
|
||||||
|
}
|
|
@ -0,0 +1,127 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func AssertIOReturnIsGood(t *testing.T, expected int) func(int, error) {
|
||||||
|
return func(actual int, err error) {
|
||||||
|
if expected != actual {
|
||||||
|
t.Fatalf("Expected %d bytes, got %d", expected, actual)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSharedBuffer(t *testing.T) {
|
||||||
|
b := NewSharedBuffer()
|
||||||
|
testData := []byte("Hello world")
|
||||||
|
AssertIOReturnIsGood(t, len(testData))(b.Write(testData))
|
||||||
|
bytesRead := make([]byte, len(testData))
|
||||||
|
AssertIOReturnIsGood(t, len(testData))(b.Read(bytesRead))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSharedBufferBlockingRead(t *testing.T) {
|
||||||
|
b := NewSharedBuffer()
|
||||||
|
testData1 := []byte("Hello")
|
||||||
|
testData2 := []byte(" world")
|
||||||
|
result := make(chan []byte)
|
||||||
|
go func() {
|
||||||
|
bytesRead := make([]byte, len(testData1)+len(testData2))
|
||||||
|
nRead, err := b.Read(bytesRead)
|
||||||
|
AssertIOReturnIsGood(t, len(testData1))(nRead, err)
|
||||||
|
result <- bytesRead[:nRead]
|
||||||
|
nRead, err = b.Read(bytesRead)
|
||||||
|
AssertIOReturnIsGood(t, len(testData2))(nRead, err)
|
||||||
|
result <- bytesRead[:nRead]
|
||||||
|
}()
|
||||||
|
time.Sleep(time.Millisecond * 250)
|
||||||
|
select {
|
||||||
|
case <-result:
|
||||||
|
t.Fatalf("read returned early")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
AssertIOReturnIsGood(t, len(testData1))(b.Write([]byte(testData1)))
|
||||||
|
select {
|
||||||
|
case r := <-result:
|
||||||
|
assert.Equal(t, testData1, r)
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("read timed out")
|
||||||
|
}
|
||||||
|
AssertIOReturnIsGood(t, len(testData2))(b.Write([]byte(testData2)))
|
||||||
|
select {
|
||||||
|
case r := <-result:
|
||||||
|
assert.Equal(t, testData2, r)
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("read timed out")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is quite slow under the race detector
|
||||||
|
func TestSharedBufferConcurrentReadWrite(t *testing.T) {
|
||||||
|
b := NewSharedBuffer()
|
||||||
|
var expectedResult, actualResult bytes.Buffer
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
block := make([]byte, 256)
|
||||||
|
for i := range block {
|
||||||
|
block[i] = byte(i)
|
||||||
|
}
|
||||||
|
for blockSize := 1; blockSize <= 256; blockSize++ {
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
expectedResult.Write(block[:blockSize])
|
||||||
|
n, err := b.Write(block[:blockSize])
|
||||||
|
if n != blockSize || err != nil {
|
||||||
|
t.Fatalf("write error: %d %s", n, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
block := make([]byte, 256)
|
||||||
|
// Change block sizes in opposition to the write thread, to test blocking for new data.
|
||||||
|
for blockSize := 256; blockSize > 0; blockSize-- {
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
n, err := io.ReadFull(b, block[:blockSize])
|
||||||
|
if n != blockSize || err != nil {
|
||||||
|
t.Fatalf("read error: %d %s", n, err)
|
||||||
|
}
|
||||||
|
actualResult.Write(block[:blockSize])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
if bytes.Compare(expectedResult.Bytes(), actualResult.Bytes()) != 0 {
|
||||||
|
t.Fatal("Result diverged")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSharedBufferClose(t *testing.T) {
|
||||||
|
b := NewSharedBuffer()
|
||||||
|
testData := []byte("Hello world")
|
||||||
|
AssertIOReturnIsGood(t, len(testData))(b.Write(testData))
|
||||||
|
err := b.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error from Close: %s", err)
|
||||||
|
}
|
||||||
|
bytesRead := make([]byte, len(testData))
|
||||||
|
AssertIOReturnIsGood(t, len(testData))(b.Read(bytesRead))
|
||||||
|
n, err := b.Read(bytesRead)
|
||||||
|
if n != 0 {
|
||||||
|
t.Fatalf("extra bytes received: %d", n)
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("expected EOF, got %s", err)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
// Signal describes an event that can be waited on for at least one signal.
|
||||||
|
// Signalling the event while it is in the signalled state is a noop.
|
||||||
|
// When the waiter wakes up, the signal is set to unsignalled.
|
||||||
|
// It is a way for any number of writers to inform a reader (without blocking)
|
||||||
|
// that an event has happened.
|
||||||
|
type Signal struct {
|
||||||
|
c chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSignal creates a new Signal.
|
||||||
|
func NewSignal() Signal {
|
||||||
|
return Signal{c: make(chan struct{}, 1)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signal signals the event.
|
||||||
|
func (s Signal) Signal() {
|
||||||
|
// This channel is buffered, so the nonblocking send will always succeed if the buffer is empty.
|
||||||
|
select {
|
||||||
|
case s.c <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the event to be signalled.
|
||||||
|
func (s Signal) Wait() {
|
||||||
|
<-s.c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitChannel returns a channel that is readable after Signal is called.
|
||||||
|
func (s Signal) WaitChannel() <-chan struct{} {
|
||||||
|
return s.c
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
package h2mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StreamErrorMap is used to track stream errors. This is a separate structure to ActiveStreamMap because
|
||||||
|
// errors can be raised against non-existent or closed streams.
|
||||||
|
type StreamErrorMap struct {
|
||||||
|
sync.RWMutex
|
||||||
|
// errors tracks per-stream errors
|
||||||
|
errors map[uint32]http2.ErrCode
|
||||||
|
// hasError is signaled whenever an error is raised.
|
||||||
|
hasError Signal
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStreamErrorMap creates a new StreamErrorMap.
|
||||||
|
func NewStreamErrorMap() *StreamErrorMap {
|
||||||
|
return &StreamErrorMap{
|
||||||
|
errors: make(map[uint32]http2.ErrCode),
|
||||||
|
hasError: NewSignal(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RaiseError raises a stream error.
|
||||||
|
func (s *StreamErrorMap) RaiseError(streamID uint32, err http2.ErrCode) {
|
||||||
|
s.Lock()
|
||||||
|
s.errors[streamID] = err
|
||||||
|
s.Unlock()
|
||||||
|
s.hasError.Signal()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSignalChan returns a channel that is signalled when an error is raised.
|
||||||
|
func (s *StreamErrorMap) GetSignalChan() <-chan struct{} {
|
||||||
|
return s.hasError.WaitChannel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetErrors retrieves all errors currently raised. This resets the currently-tracked errors.
|
||||||
|
func (s *StreamErrorMap) GetErrors() map[uint32]http2.ErrCode {
|
||||||
|
s.Lock()
|
||||||
|
errors := s.errors
|
||||||
|
s.errors = make(map[uint32]http2.ErrCode)
|
||||||
|
s.Unlock()
|
||||||
|
return errors
|
||||||
|
}
|
|
@ -0,0 +1,197 @@
|
||||||
|
package hello
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"html/template"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gorilla/websocket"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/tlsconfig"
|
||||||
|
)
|
||||||
|
|
||||||
|
type templateData struct {
|
||||||
|
ServerName string
|
||||||
|
Request *http.Request
|
||||||
|
Body string
|
||||||
|
}
|
||||||
|
|
||||||
|
type OriginUpTime struct {
|
||||||
|
StartTime time.Time `json:"startTime"`
|
||||||
|
UpTime string `json:"uptime"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultServerName = "the Argo Tunnel test server"
|
||||||
|
const indexTemplate = `
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<meta http-equiv="X-UA-Compatible" content="IE=Edge">
|
||||||
|
<title>
|
||||||
|
Argo Tunnel Connection
|
||||||
|
</title>
|
||||||
|
<meta name="author" content="">
|
||||||
|
<meta name="description" content="Argo Tunnel Connection">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<style>
|
||||||
|
html{line-height:1.15;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}section{display:block}h1{font-size:2em;margin:.67em 0}a{background-color:transparent;-webkit-text-decoration-skip:objects}/* 1 */::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}/* 1 */a,body,dd,div,dl,dt,h1,h4,html,p,section{box-sizing:border-box}.bt{border-top-style:solid;border-top-width:1px}.bl{border-left-style:solid;border-left-width:1px}.b--orange{border-color:#f38020}.br1{border-radius:.125rem}.bw2{border-width:.25rem}.dib{display:inline-block}.sans-serif{font-family:open sans,-apple-system,BlinkMacSystemFont,avenir next,avenir,helvetica neue,helvetica,ubuntu,roboto,noto,segoe ui,arial,sans-serif}.code{font-family:Consolas,monaco,monospace}.b{font-weight:700}.fw3{font-weight:300}.fw4{font-weight:400}.fw5{font-weight:500}.fw6{font-weight:600}.lh-copy{line-height:1.5}.link{text-decoration:none}.link,.link:active,.link:focus,.link:hover,.link:link,.link:visited{transition:color .15s ease-in}.link:focus{outline:1px dotted currentColor}.mw-100{max-width:100%}.mw4{max-width:8rem}.mw7{max-width:48rem}.bg-light-gray{background-color:#f7f7f7}.link-hover:hover{background-color:#1f679e}.white{color:#fff}.bg-white{background-color:#fff}.bg-blue{background-color:#408bc9}.pb2{padding-bottom:.5rem}.pb6{padding-bottom:8rem}.pt3{padding-top:1rem}.pt5{padding-top:4rem}.pv2{padding-top:.5rem;padding-bottom:.5rem}.ph3{padding-left:1rem;padding-right:1rem}.ph4{padding-left:2rem;padding-right:2rem}.ml0{margin-left:0}.mb1{margin-bottom:.25rem}.mb2{margin-bottom:.5rem}.mb3{margin-bottom:1rem}.mt5{margin-top:4rem}.ttu{text-transform:uppercase}.f4{font-size:1.25rem}.f5{font-size:1rem}.f6{font-size:.875rem}.f7{font-size:.75rem}.measure{max-width:30em}.center{margin-left:auto}.center{margin-right:auto}@media screen and (min-width:30em){.f2-ns{font-size:2.25rem}}@media screen and (min-width:30em) and (max-width:60em){.f5-m{font-size:1rem}}@media screen and (min-width:60em){.f4-l{font-size:1.25rem}}
|
||||||
|
.st0{fill:#FFF}.st1{fill:#f48120}.st2{fill:#faad3f}.st3{fill:#404041}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body class="sans-serif black">
|
||||||
|
<div class="bt bw2 b--orange bg-white pb6">
|
||||||
|
<div class="mw7 center ph4 pt3">
|
||||||
|
<svg id="Layer_2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 109 40.5" class="mw4">
|
||||||
|
<path class="st0" d="M98.6 14.2L93 12.9l-1-.4-25.7.2v12.4l32.3.1z"/>
|
||||||
|
<path class="st1" d="M88.1 24c.3-1 .2-2-.3-2.6-.5-.6-1.2-1-2.1-1.1l-17.4-.2c-.1 0-.2-.1-.3-.1-.1-.1-.1-.2 0-.3.1-.2.2-.3.4-.3l17.5-.2c2.1-.1 4.3-1.8 5.1-3.8l1-2.6c0-.1.1-.2 0-.3-1.1-5.1-5.7-8.9-11.1-8.9-5 0-9.3 3.2-10.8 7.7-1-.7-2.2-1.1-3.6-1-2.4.2-4.3 2.2-4.6 4.6-.1.6 0 1.2.1 1.8-3.9.1-7.1 3.3-7.1 7.3 0 .4 0 .7.1 1.1 0 .2.2.3.3.3h32.1c.2 0 .4-.1.4-.3l.3-1.1z"/>
|
||||||
|
<path class="st2" d="M93.6 12.8h-.5c-.1 0-.2.1-.3.2l-.7 2.4c-.3 1-.2 2 .3 2.6.5.6 1.2 1 2.1 1.1l3.7.2c.1 0 .2.1.3.1.1.1.1.2 0 .3-.1.2-.2.3-.4.3l-3.8.2c-2.1.1-4.3 1.8-5.1 3.8l-.2.9c-.1.1 0 .3.2.3h13.2c.2 0 .3-.1.3-.3.2-.8.4-1.7.4-2.6 0-5.2-4.3-9.5-9.5-9.5"/>
|
||||||
|
<path class="st3" d="M104.4 30.8c-.5 0-.9-.4-.9-.9s.4-.9.9-.9.9.4.9.9-.4.9-.9.9m0-1.6c-.4 0-.7.3-.7.7 0 .4.3.7.7.7.4 0 .7-.3.7-.7 0-.4-.3-.7-.7-.7m.4 1.2h-.2l-.2-.3h-.2v.3h-.2v-.9h.5c.2 0 .3.1.3.3 0 .1-.1.2-.2.3l.2.3zm-.3-.5c.1 0 .1 0 .1-.1s-.1-.1-.1-.1h-.3v.3h.3zM14.8 29H17v6h3.8v1.9h-6zM23.1 32.9c0-2.3 1.8-4.1 4.3-4.1s4.2 1.8 4.2 4.1-1.8 4.1-4.3 4.1c-2.4 0-4.2-1.8-4.2-4.1m6.3 0c0-1.2-.8-2.2-2-2.2s-2 1-2 2.1.8 2.1 2 2.1c1.2.2 2-.8 2-2M34.3 33.4V29h2.2v4.4c0 1.1.6 1.7 1.5 1.7s1.5-.5 1.5-1.6V29h2.2v4.4c0 2.6-1.5 3.7-3.7 3.7-2.3-.1-3.7-1.2-3.7-3.7M45 29h3.1c2.8 0 4.5 1.6 4.5 3.9s-1.7 4-4.5 4h-3V29zm3.1 5.9c1.3 0 2.2-.7 2.2-2s-.9-2-2.2-2h-.9v4h.9zM55.7 29H62v1.9h-4.1v1.3h3.7V34h-3.7v2.9h-2.2zM65.1 29h2.2v6h3.8v1.9h-6zM76.8 28.9H79l3.4 8H80l-.6-1.4h-3.1l-.6 1.4h-2.3l3.4-8zm2 4.9l-.9-2.2-.9 2.2h1.8zM85.2 29h3.7c1.2 0 2 .3 2.6.9.5.5.7 1.1.7 1.8 0 1.2-.6 2-1.6 2.4l1.9 2.8H90l-1.6-2.4h-1v2.4h-2.2V29zm3.6 3.8c.7 0 1.2-.4 1.2-.9 0-.6-.5-.9-1.2-.9h-1.4v1.9h1.4zM95.3 29h6.4v1.8h-4.2V32h3.8v1.8h-3.8V35h4.3v1.9h-6.5zM10 33.9c-.3.7-1 1.2-1.8 1.2-1.2 0-2-1-2-2.1s.8-2.1 2-2.1c.9 0 1.6.6 1.9 1.3h2.3c-.4-1.9-2-3.3-4.2-3.3-2.4 0-4.3 1.8-4.3 4.1s1.8 4.1 4.2 4.1c2.1 0 3.7-1.4 4.2-3.2H10z"/>
|
||||||
|
</svg>
|
||||||
|
<h1 class="f4 f2-ns mt5 fw5">Congrats! You created your first tunnel!</h1>
|
||||||
|
<p class="f6 f5-m f4-l measure lh-copy fw3">
|
||||||
|
Argo Tunnel exposes locally running applications to the internet by
|
||||||
|
running an encrypted, virtual tunnel from your laptop or server to
|
||||||
|
Cloudflare's edge network.
|
||||||
|
</p>
|
||||||
|
<p class="b f5 mt5 fw6">Ready for the next step?</p>
|
||||||
|
<a
|
||||||
|
class="fw6 link white bg-blue ph4 pv2 br1 dib f5 link-hover"
|
||||||
|
style="border-bottom: 1px solid #1f679e"
|
||||||
|
href="https://developers.cloudflare.com/argo-tunnel/">
|
||||||
|
Get started here
|
||||||
|
</a>
|
||||||
|
<section>
|
||||||
|
<h4 class="f6 fw4 pt5 mb2">Request</h4>
|
||||||
|
<dl class="bl bw2 b--orange ph3 pt3 pb2 bg-light-gray f7 code overflow-x-auto mw-100">
|
||||||
|
<dd class="ml0 mb3 f5">Method: {{.Request.Method}}</dd>
|
||||||
|
<dd class="ml0 mb3 f5">Protocol: {{.Request.Proto}}</dd>
|
||||||
|
<dd class="ml0 mb3 f5">Request URL: {{.Request.URL}}</dd>
|
||||||
|
<dd class="ml0 mb3 f5">Transfer encoding: {{.Request.TransferEncoding}}</dd>
|
||||||
|
<dd class="ml0 mb3 f5">Host: {{.Request.Host}}</dd>
|
||||||
|
<dd class="ml0 mb3 f5">Remote address: {{.Request.RemoteAddr}}</dd>
|
||||||
|
<dd class="ml0 mb3 f5">Request URI: {{.Request.RequestURI}}</dd>
|
||||||
|
{{range $key, $value := .Request.Header}}
|
||||||
|
<dd class="ml0 mb3 f5">Header: {{$key}}, Value: {{$value}}</dd>
|
||||||
|
{{end}}
|
||||||
|
<dd class="ml0 mb3 f5">Body: {{.Body}}</dd>
|
||||||
|
</dl>
|
||||||
|
</section>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
`
|
||||||
|
|
||||||
|
|
||||||
|
func StartHelloWorldServer(logger *logrus.Logger, listener net.Listener, shutdownC <-chan struct{}) error {
|
||||||
|
logger.Infof("Starting Hello World server at %s", listener.Addr())
|
||||||
|
serverName := defaultServerName
|
||||||
|
if hostname, err := os.Hostname(); err == nil {
|
||||||
|
serverName = hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
upgrader := websocket.Upgrader{
|
||||||
|
ReadBufferSize: 1024,
|
||||||
|
WriteBufferSize: 1024,
|
||||||
|
}
|
||||||
|
|
||||||
|
httpServer := &http.Server{Addr: listener.Addr().String(), Handler: nil}
|
||||||
|
go func() {
|
||||||
|
<-shutdownC
|
||||||
|
httpServer.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
http.HandleFunc("/uptime", uptimeHandler(time.Now()))
|
||||||
|
http.HandleFunc("/ws", websocketHandler(logger, upgrader))
|
||||||
|
http.HandleFunc("/", rootHandler(serverName))
|
||||||
|
err := httpServer.Serve(listener)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateTLSListener(address string) (net.Listener, error) {
|
||||||
|
certificate, err := tlsconfig.GetHelloCertificate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the port in address is empty, a port number is automatically chosen
|
||||||
|
listener, err := tls.Listen(
|
||||||
|
"tcp",
|
||||||
|
address,
|
||||||
|
&tls.Config{Certificates: []tls.Certificate{certificate}})
|
||||||
|
|
||||||
|
return listener, err
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
func uptimeHandler(startTime time.Time) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Note that if autoupdate is enabled, the uptime is reset when a new client
|
||||||
|
// release is available
|
||||||
|
resp := &OriginUpTime{StartTime: startTime, UpTime: time.Now().Sub(startTime).String()}
|
||||||
|
respJson, err := json.Marshal(resp)
|
||||||
|
if err != nil {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
} else {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.Write(respJson)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This handler will echo message
|
||||||
|
func websocketHandler(logger *logrus.Logger, upgrader websocket.Upgrader) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
conn, err := upgrader.Upgrade(w, r, nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
for {
|
||||||
|
mt, message, err := conn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Error("websocket read message error")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := conn.WriteMessage(mt, message); err != nil {
|
||||||
|
logger.WithError(err).Error("websocket write message error")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func rootHandler(serverName string) http.HandlerFunc {
|
||||||
|
responseTemplate := template.Must(template.New("index").Parse(indexTemplate))
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
var body string
|
||||||
|
rawBody, err := ioutil.ReadAll(r.Body)
|
||||||
|
if err == nil {
|
||||||
|
body = string(rawBody)
|
||||||
|
} else {
|
||||||
|
body = ""
|
||||||
|
}
|
||||||
|
err = responseTemplate.Execute(&buffer, &templateData{
|
||||||
|
ServerName: serverName,
|
||||||
|
Request: r,
|
||||||
|
Body: body,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
fmt.Fprintf(w, "error: %v", err)
|
||||||
|
} else {
|
||||||
|
buffer.WriteTo(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
package hello
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCreateTLSListenerHostAndPortSuccess(t *testing.T) {
|
||||||
|
listener, err := CreateTLSListener("localhost:1234")
|
||||||
|
defer listener.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if listener.Addr().String() == "" {
|
||||||
|
t.Fatal("Fail to find available port")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateTLSListenerOnlyHostSuccess(t *testing.T) {
|
||||||
|
listener, err := CreateTLSListener("localhost:")
|
||||||
|
defer listener.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if listener.Addr().String() == "" {
|
||||||
|
t.Fatal("Fail to find available port")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateTLSListenerOnlyPortSuccess(t *testing.T) {
|
||||||
|
listener, err := CreateTLSListener(":8888")
|
||||||
|
defer listener.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if listener.Addr().String() == "" {
|
||||||
|
t.Fatal("Fail to find available port")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,85 @@
|
||||||
|
// this forks the logrus json formatter to rename msg -> message as that's the
|
||||||
|
// expected field. Ideally the logger should make it easier for us.
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/mattn/go-colorable"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
DefaultTimestampFormat = time.RFC3339
|
||||||
|
)
|
||||||
|
|
||||||
|
type JSONFormatter struct {
|
||||||
|
// TimestampFormat sets the format used for marshaling timestamps.
|
||||||
|
TimestampFormat string
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateLogger() *logrus.Logger {
|
||||||
|
logger := logrus.New()
|
||||||
|
logger.Out = colorable.NewColorableStderr()
|
||||||
|
logger.Formatter = &logrus.TextFormatter{ForceColors: runtime.GOOS == "windows"}
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||||
|
data := make(logrus.Fields, len(entry.Data)+3)
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case error:
|
||||||
|
// Otherwise errors are ignored by `encoding/json`
|
||||||
|
// https://github.com/sirupsen/logrus/issues/137
|
||||||
|
data[k] = v.Error()
|
||||||
|
default:
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prefixFieldClashes(data)
|
||||||
|
|
||||||
|
timestampFormat := f.TimestampFormat
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
data["time"] = entry.Time.Format(timestampFormat)
|
||||||
|
data["message"] = entry.Message
|
||||||
|
data["level"] = entry.Level.String()
|
||||||
|
|
||||||
|
serialized, err := json.Marshal(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||||
|
// dumping it. If this code wasn't there doing:
|
||||||
|
//
|
||||||
|
// logrus.WithField("level", 1).Info("hello")
|
||||||
|
//
|
||||||
|
// Would just silently drop the user provided level. Instead with this code
|
||||||
|
// it'll logged as:
|
||||||
|
//
|
||||||
|
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||||
|
//
|
||||||
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||||
|
// avoid code duplication between the two default formatters.
|
||||||
|
func prefixFieldClashes(data logrus.Fields) {
|
||||||
|
if t, ok := data["time"]; ok {
|
||||||
|
data["fields.time"] = t
|
||||||
|
}
|
||||||
|
|
||||||
|
if m, ok := data["msg"]; ok {
|
||||||
|
data["fields.msg"] = m
|
||||||
|
}
|
||||||
|
|
||||||
|
if l, ok := data["level"]; ok {
|
||||||
|
data["fields.level"] = l
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
type HistogramConfig struct {
|
||||||
|
BucketsStart float64
|
||||||
|
BucketsWidth float64
|
||||||
|
BucketsCount int
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
_ "net/http/pprof"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/trace"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
shutdownTimeout = time.Second * 15
|
||||||
|
startupTime = time.Millisecond * 500
|
||||||
|
)
|
||||||
|
|
||||||
|
func ServeMetrics(l net.Listener, shutdownC <-chan struct{}, logger *logrus.Logger) (err error) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
// Metrics port is privileged, so no need for further access control
|
||||||
|
trace.AuthRequest = func(*http.Request) (bool, bool) { return true, true }
|
||||||
|
// TODO: parameterize ReadTimeout and WriteTimeout. The maximum time we can
|
||||||
|
// profile CPU usage depends on WriteTimeout
|
||||||
|
server := &http.Server{
|
||||||
|
ReadTimeout: 10 * time.Second,
|
||||||
|
WriteTimeout: 10 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
http.Handle("/metrics", promhttp.Handler())
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
err = server.Serve(l)
|
||||||
|
}()
|
||||||
|
logger.WithField("addr", l.Addr()).Info("Starting metrics server")
|
||||||
|
// server.Serve will hang if server.Shutdown is called before the server is
|
||||||
|
// fully started up. So add artificial delay.
|
||||||
|
time.Sleep(startupTime)
|
||||||
|
|
||||||
|
<-shutdownC
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
||||||
|
server.Shutdown(ctx)
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
if err == http.ErrServerClosed {
|
||||||
|
logger.Info("Metrics server stopped")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
logger.WithError(err).Error("Metrics server quit with error")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterBuildInfo(buildTime string, version string) {
|
||||||
|
buildInfo := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
// Don't namespace build_info, since we want it to be consistent across all Cloudflare services
|
||||||
|
Name: "build_info",
|
||||||
|
Help: "Build and version information",
|
||||||
|
},
|
||||||
|
[]string{"goversion", "revision", "version"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(buildInfo)
|
||||||
|
buildInfo.WithLabelValues(runtime.Version(), buildTime, version).Set(1)
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Timer assumes the metrics is partitioned by one label
|
||||||
|
type Timer struct {
|
||||||
|
startTime map[string]time.Time
|
||||||
|
metrics *prometheus.HistogramVec
|
||||||
|
measureUnit time.Duration
|
||||||
|
labelKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTimer(metrics *prometheus.HistogramVec, unit time.Duration, labelKey string) *Timer {
|
||||||
|
return &Timer{
|
||||||
|
startTime: make(map[string]time.Time),
|
||||||
|
measureUnit: unit,
|
||||||
|
metrics: metrics,
|
||||||
|
labelKey: labelKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Timer) Start(labelVal string) {
|
||||||
|
i.startTime[labelVal] = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Timer) End(labelVal string) time.Duration {
|
||||||
|
if start, ok := i.startTime[labelVal]; ok {
|
||||||
|
return Latency(start, time.Now())
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Timer) Observe(measurement time.Duration, labelVal string) {
|
||||||
|
metricsLabels := prometheus.Labels{i.labelKey: labelVal}
|
||||||
|
i.metrics.With(metricsLabels).Observe(float64(measurement / i.measureUnit))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Timer) EndAndObserve(labelVal string) {
|
||||||
|
i.Observe(i.End(labelVal), labelVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Latency(startTime, endTime time.Time) time.Duration {
|
||||||
|
return endTime.Sub(startTime)
|
||||||
|
}
|
|
@ -0,0 +1,24 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEnd(t *testing.T) {
|
||||||
|
m := prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "TestCallLatencyWithoutMeasurement",
|
||||||
|
Name: "Latency",
|
||||||
|
Buckets: prometheus.LinearBuckets(0, 50, 100),
|
||||||
|
},
|
||||||
|
[]string{"key"},
|
||||||
|
)
|
||||||
|
timer := NewTimer(m, time.Millisecond, "key")
|
||||||
|
assert.Equal(t, time.Duration(0), timer.End("dne"))
|
||||||
|
timer.Start("test")
|
||||||
|
assert.NotEqual(t, time.Duration(0), timer.End("test"))
|
||||||
|
}
|
|
@ -0,0 +1,95 @@
|
||||||
|
package origin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Redeclare time functions so they can be overridden in tests.
|
||||||
|
var (
|
||||||
|
timeNow = time.Now
|
||||||
|
timeAfter = time.After
|
||||||
|
)
|
||||||
|
|
||||||
|
// BackoffHandler manages exponential backoff and limits the maximum number of retries.
|
||||||
|
// The base time period is 1 second, doubling with each retry.
|
||||||
|
// After initial success, a grace period can be set to reset the backoff timer if
|
||||||
|
// a connection is maintained successfully for a long enough period. The base grace period
|
||||||
|
// is 2 seconds, doubling with each retry.
|
||||||
|
type BackoffHandler struct {
|
||||||
|
// MaxRetries sets the maximum number of retries to perform. The default value
|
||||||
|
// of 0 disables retry completely.
|
||||||
|
MaxRetries uint
|
||||||
|
// RetryForever caps the exponential backoff period according to MaxRetries
|
||||||
|
// but allows you to retry indefinitely.
|
||||||
|
RetryForever bool
|
||||||
|
// BaseTime sets the initial backoff period.
|
||||||
|
BaseTime time.Duration
|
||||||
|
|
||||||
|
retries uint
|
||||||
|
resetDeadline time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b BackoffHandler) GetBackoffDuration(ctx context.Context) (time.Duration, bool) {
|
||||||
|
// Follows the same logic as Backoff, but without mutating the receiver.
|
||||||
|
// This select has to happen first to reflect the actual behaviour of the Backoff function.
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return time.Duration(0), false
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if !b.resetDeadline.IsZero() && timeNow().After(b.resetDeadline) {
|
||||||
|
// b.retries would be set to 0 at this point
|
||||||
|
return time.Second, true
|
||||||
|
}
|
||||||
|
if b.retries >= b.MaxRetries && !b.RetryForever {
|
||||||
|
return time.Duration(0), false
|
||||||
|
}
|
||||||
|
return time.Duration(b.GetBaseTime() * 1 << b.retries), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackoffTimer returns a channel that sends the current time when the exponential backoff timeout expires.
|
||||||
|
// Returns nil if the maximum number of retries have been used.
|
||||||
|
func (b *BackoffHandler) BackoffTimer() <-chan time.Time {
|
||||||
|
if !b.resetDeadline.IsZero() && timeNow().After(b.resetDeadline) {
|
||||||
|
b.retries = 0
|
||||||
|
b.resetDeadline = time.Time{}
|
||||||
|
}
|
||||||
|
if b.retries >= b.MaxRetries {
|
||||||
|
if !b.RetryForever {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
b.retries++
|
||||||
|
}
|
||||||
|
return timeAfter(time.Duration(b.GetBaseTime() * 1 << (b.retries - 1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backoff is used to wait according to exponential backoff. Returns false if the
|
||||||
|
// maximum number of retries have been used or if the underlying context has been cancelled.
|
||||||
|
func (b *BackoffHandler) Backoff(ctx context.Context) bool {
|
||||||
|
c := b.BackoffTimer()
|
||||||
|
if c == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-c:
|
||||||
|
return true
|
||||||
|
case <-ctx.Done():
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets a grace period within which the the backoff timer is maintained. After the grace
|
||||||
|
// period expires, the number of retries & backoff duration is reset.
|
||||||
|
func (b *BackoffHandler) SetGracePeriod() {
|
||||||
|
b.resetDeadline = timeNow().Add(time.Duration(b.GetBaseTime() * 2 << b.retries))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b BackoffHandler) GetBaseTime() time.Duration {
|
||||||
|
if b.BaseTime == 0 {
|
||||||
|
return time.Second
|
||||||
|
}
|
||||||
|
return b.BaseTime
|
||||||
|
}
|
|
@ -0,0 +1,148 @@
|
||||||
|
package origin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
func immediateTimeAfter(time.Duration) <-chan time.Time {
|
||||||
|
c := make(chan time.Time, 1)
|
||||||
|
c <- time.Now()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackoffRetries(t *testing.T) {
|
||||||
|
// make backoff return immediately
|
||||||
|
timeAfter = immediateTimeAfter
|
||||||
|
ctx := context.Background()
|
||||||
|
backoff := BackoffHandler{MaxRetries: 3}
|
||||||
|
if !backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff failed immediately")
|
||||||
|
}
|
||||||
|
if !backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff failed after 1 retry")
|
||||||
|
}
|
||||||
|
if !backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff failed after 2 retry")
|
||||||
|
}
|
||||||
|
if backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff allowed after 3 (max) retries")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackoffCancel(t *testing.T) {
|
||||||
|
// prevent backoff from returning normally
|
||||||
|
timeAfter = func(time.Duration) <-chan time.Time { return make(chan time.Time) }
|
||||||
|
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||||
|
backoff := BackoffHandler{MaxRetries: 3}
|
||||||
|
cancelFunc()
|
||||||
|
if backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff allowed after cancel")
|
||||||
|
}
|
||||||
|
if _, ok := backoff.GetBackoffDuration(ctx); ok {
|
||||||
|
t.Fatalf("backoff allowed after cancel")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackoffGracePeriod(t *testing.T) {
|
||||||
|
currentTime := time.Now()
|
||||||
|
// make timeNow return whatever we like
|
||||||
|
timeNow = func() time.Time { return currentTime }
|
||||||
|
// make backoff return immediately
|
||||||
|
timeAfter = immediateTimeAfter
|
||||||
|
ctx := context.Background()
|
||||||
|
backoff := BackoffHandler{MaxRetries: 1}
|
||||||
|
if !backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff failed immediately")
|
||||||
|
}
|
||||||
|
// the next call to Backoff would fail unless it's after the grace period
|
||||||
|
backoff.SetGracePeriod()
|
||||||
|
// advance time to after the grace period (~4 seconds) and see what happens
|
||||||
|
currentTime = currentTime.Add(time.Second * 5)
|
||||||
|
if !backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff failed after the grace period expired")
|
||||||
|
}
|
||||||
|
// confirm we ignore grace period after backoff
|
||||||
|
if backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff allowed after 1 (max) retry")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBackoffDurationRetries(t *testing.T) {
|
||||||
|
// make backoff return immediately
|
||||||
|
timeAfter = immediateTimeAfter
|
||||||
|
ctx := context.Background()
|
||||||
|
backoff := BackoffHandler{MaxRetries: 3}
|
||||||
|
if _, ok := backoff.GetBackoffDuration(ctx); !ok {
|
||||||
|
t.Fatalf("backoff failed immediately")
|
||||||
|
}
|
||||||
|
backoff.Backoff(ctx) // noop
|
||||||
|
if _, ok := backoff.GetBackoffDuration(ctx); !ok {
|
||||||
|
t.Fatalf("backoff failed after 1 retry")
|
||||||
|
}
|
||||||
|
backoff.Backoff(ctx) // noop
|
||||||
|
if _, ok := backoff.GetBackoffDuration(ctx); !ok {
|
||||||
|
t.Fatalf("backoff failed after 2 retry")
|
||||||
|
}
|
||||||
|
backoff.Backoff(ctx) // noop
|
||||||
|
if _, ok := backoff.GetBackoffDuration(ctx); ok {
|
||||||
|
t.Fatalf("backoff allowed after 3 (max) retries")
|
||||||
|
}
|
||||||
|
if backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff allowed after 3 (max) retries")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBackoffDuration(t *testing.T) {
|
||||||
|
// make backoff return immediately
|
||||||
|
timeAfter = immediateTimeAfter
|
||||||
|
ctx := context.Background()
|
||||||
|
backoff := BackoffHandler{MaxRetries: 3}
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); !ok || duration != time.Second {
|
||||||
|
t.Fatalf("backoff didn't return 1 second on first retry")
|
||||||
|
}
|
||||||
|
backoff.Backoff(ctx) // noop
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); !ok || duration != time.Second*2 {
|
||||||
|
t.Fatalf("backoff didn't return 2 seconds on second retry")
|
||||||
|
}
|
||||||
|
backoff.Backoff(ctx) // noop
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); !ok || duration != time.Second*4 {
|
||||||
|
t.Fatalf("backoff didn't return 4 seconds on third retry")
|
||||||
|
}
|
||||||
|
backoff.Backoff(ctx) // noop
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); ok || duration != 0 {
|
||||||
|
t.Fatalf("backoff didn't return 0 seconds on fourth retry (exceeding limit)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackoffRetryForever(t *testing.T) {
|
||||||
|
// make backoff return immediately
|
||||||
|
timeAfter = immediateTimeAfter
|
||||||
|
ctx := context.Background()
|
||||||
|
backoff := BackoffHandler{MaxRetries: 3, RetryForever: true}
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); !ok || duration != time.Second {
|
||||||
|
t.Fatalf("backoff didn't return 1 second on first retry")
|
||||||
|
}
|
||||||
|
backoff.Backoff(ctx) // noop
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); !ok || duration != time.Second*2 {
|
||||||
|
t.Fatalf("backoff didn't return 2 seconds on second retry")
|
||||||
|
}
|
||||||
|
backoff.Backoff(ctx) // noop
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); !ok || duration != time.Second*4 {
|
||||||
|
t.Fatalf("backoff didn't return 4 seconds on third retry")
|
||||||
|
}
|
||||||
|
if !backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff refused on fourth retry despire RetryForever")
|
||||||
|
}
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); !ok || duration != time.Second*8 {
|
||||||
|
t.Fatalf("backoff returned %v instead of 8 seconds on fourth retry", duration)
|
||||||
|
}
|
||||||
|
if !backoff.Backoff(ctx) {
|
||||||
|
t.Fatalf("backoff refused on fifth retry despire RetryForever")
|
||||||
|
}
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); !ok || duration != time.Second*8 {
|
||||||
|
t.Fatalf("backoff returned %v instead of 8 seconds on fifth retry", duration)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
package origin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BuildInfo struct {
|
||||||
|
GoOS string `json:"go_os"`
|
||||||
|
GoVersion string `json:"go_version"`
|
||||||
|
GoArch string `json:"go_arch"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetBuildInfo() *BuildInfo {
|
||||||
|
return &BuildInfo{
|
||||||
|
GoOS: runtime.GOOS,
|
||||||
|
GoVersion: runtime.Version(),
|
||||||
|
GoArch: runtime.GOARCH,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
package origin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Used to discover HA Warp servers
|
||||||
|
srvService = "warp"
|
||||||
|
srvProto = "tcp"
|
||||||
|
srvName = "cloudflarewarp.com"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ResolveEdgeIPs(addresses []string) ([]*net.TCPAddr, error) {
|
||||||
|
if len(addresses) > 0 {
|
||||||
|
var tcpAddrs []*net.TCPAddr
|
||||||
|
for _, address := range addresses {
|
||||||
|
// Addresses specified (for testing, usually)
|
||||||
|
tcpAddr, err := net.ResolveTCPAddr("tcp", address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tcpAddrs = append(tcpAddrs, tcpAddr)
|
||||||
|
}
|
||||||
|
return tcpAddrs, nil
|
||||||
|
}
|
||||||
|
// HA service discovery lookup
|
||||||
|
_, addrs, err := net.LookupSRV(srvService, srvProto, srvName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var resolvedIPsPerCNAME [][]*net.TCPAddr
|
||||||
|
var lookupErr error
|
||||||
|
for _, addr := range addrs {
|
||||||
|
ips, err := ResolveSRVToTCP(addr)
|
||||||
|
if err != nil || len(ips) == 0 {
|
||||||
|
// don't return early, we might be able to resolve other addresses
|
||||||
|
lookupErr = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
resolvedIPsPerCNAME = append(resolvedIPsPerCNAME, ips)
|
||||||
|
}
|
||||||
|
ips := FlattenServiceIPs(resolvedIPsPerCNAME)
|
||||||
|
if lookupErr == nil && len(ips) == 0 {
|
||||||
|
return nil, fmt.Errorf("Unknown service discovery error")
|
||||||
|
}
|
||||||
|
return ips, lookupErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func ResolveSRVToTCP(srv *net.SRV) ([]*net.TCPAddr, error) {
|
||||||
|
ips, err := net.LookupIP(srv.Target)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
addrs := make([]*net.TCPAddr, len(ips))
|
||||||
|
for i, ip := range ips {
|
||||||
|
addrs[i] = &net.TCPAddr{IP: ip, Port: int(srv.Port)}
|
||||||
|
}
|
||||||
|
return addrs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlattenServiceIPs transposes and flattens the input slices such that the
|
||||||
|
// first element of the n inner slices are the first n elements of the result.
|
||||||
|
func FlattenServiceIPs(ipsByService [][]*net.TCPAddr) []*net.TCPAddr {
|
||||||
|
var result []*net.TCPAddr
|
||||||
|
for len(ipsByService) > 0 {
|
||||||
|
filtered := ipsByService[:0]
|
||||||
|
for _, ips := range ipsByService {
|
||||||
|
if len(ips) == 0 {
|
||||||
|
// sanity check
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result = append(result, ips[0])
|
||||||
|
if len(ips) > 1 {
|
||||||
|
filtered = append(filtered, ips[1:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ipsByService = filtered
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
package origin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFlattenServiceIPs(t *testing.T) {
|
||||||
|
result := FlattenServiceIPs([][]*net.TCPAddr{
|
||||||
|
[]*net.TCPAddr{
|
||||||
|
&net.TCPAddr{Port: 1},
|
||||||
|
&net.TCPAddr{Port: 2},
|
||||||
|
&net.TCPAddr{Port: 3},
|
||||||
|
&net.TCPAddr{Port: 4},
|
||||||
|
},
|
||||||
|
[]*net.TCPAddr{
|
||||||
|
&net.TCPAddr{Port: 10},
|
||||||
|
&net.TCPAddr{Port: 12},
|
||||||
|
&net.TCPAddr{Port: 13},
|
||||||
|
},
|
||||||
|
[]*net.TCPAddr{
|
||||||
|
&net.TCPAddr{Port: 21},
|
||||||
|
&net.TCPAddr{Port: 22},
|
||||||
|
&net.TCPAddr{Port: 23},
|
||||||
|
&net.TCPAddr{Port: 24},
|
||||||
|
&net.TCPAddr{Port: 25},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
assert.EqualValues(t, []*net.TCPAddr{
|
||||||
|
&net.TCPAddr{Port: 1},
|
||||||
|
&net.TCPAddr{Port: 10},
|
||||||
|
&net.TCPAddr{Port: 21},
|
||||||
|
&net.TCPAddr{Port: 2},
|
||||||
|
&net.TCPAddr{Port: 12},
|
||||||
|
&net.TCPAddr{Port: 22},
|
||||||
|
&net.TCPAddr{Port: 3},
|
||||||
|
&net.TCPAddr{Port: 13},
|
||||||
|
&net.TCPAddr{Port: 23},
|
||||||
|
&net.TCPAddr{Port: 4},
|
||||||
|
&net.TCPAddr{Port: 24},
|
||||||
|
&net.TCPAddr{Port: 25},
|
||||||
|
}, result)
|
||||||
|
}
|
|
@ -0,0 +1,421 @@
|
||||||
|
package origin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type muxerMetrics struct {
|
||||||
|
rtt *prometheus.GaugeVec
|
||||||
|
rttMin *prometheus.GaugeVec
|
||||||
|
rttMax *prometheus.GaugeVec
|
||||||
|
receiveWindowAve *prometheus.GaugeVec
|
||||||
|
sendWindowAve *prometheus.GaugeVec
|
||||||
|
receiveWindowMin *prometheus.GaugeVec
|
||||||
|
receiveWindowMax *prometheus.GaugeVec
|
||||||
|
sendWindowMin *prometheus.GaugeVec
|
||||||
|
sendWindowMax *prometheus.GaugeVec
|
||||||
|
inBoundRateCurr *prometheus.GaugeVec
|
||||||
|
inBoundRateMin *prometheus.GaugeVec
|
||||||
|
inBoundRateMax *prometheus.GaugeVec
|
||||||
|
outBoundRateCurr *prometheus.GaugeVec
|
||||||
|
outBoundRateMin *prometheus.GaugeVec
|
||||||
|
outBoundRateMax *prometheus.GaugeVec
|
||||||
|
compBytesBefore *prometheus.GaugeVec
|
||||||
|
compBytesAfter *prometheus.GaugeVec
|
||||||
|
compRateAve *prometheus.GaugeVec
|
||||||
|
}
|
||||||
|
|
||||||
|
type TunnelMetrics struct {
|
||||||
|
haConnections prometheus.Gauge
|
||||||
|
totalRequests prometheus.Counter
|
||||||
|
requestsPerTunnel *prometheus.CounterVec
|
||||||
|
// concurrentRequestsLock is a mutex for concurrentRequests and maxConcurrentRequests
|
||||||
|
concurrentRequestsLock sync.Mutex
|
||||||
|
concurrentRequestsPerTunnel *prometheus.GaugeVec
|
||||||
|
// concurrentRequests records count of concurrent requests for each tunnel
|
||||||
|
concurrentRequests map[string]uint64
|
||||||
|
maxConcurrentRequestsPerTunnel *prometheus.GaugeVec
|
||||||
|
// concurrentRequests records max count of concurrent requests for each tunnel
|
||||||
|
maxConcurrentRequests map[string]uint64
|
||||||
|
timerRetries prometheus.Gauge
|
||||||
|
responseByCode *prometheus.CounterVec
|
||||||
|
responseCodePerTunnel *prometheus.CounterVec
|
||||||
|
serverLocations *prometheus.GaugeVec
|
||||||
|
// locationLock is a mutex for oldServerLocations
|
||||||
|
locationLock sync.Mutex
|
||||||
|
// oldServerLocations stores the last server the tunnel was connected to
|
||||||
|
oldServerLocations map[string]string
|
||||||
|
|
||||||
|
muxerMetrics *muxerMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMuxerMetrics() *muxerMetrics {
|
||||||
|
rtt := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "rtt",
|
||||||
|
Help: "Round-trip time in millisecond",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(rtt)
|
||||||
|
|
||||||
|
rttMin := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "rtt_min",
|
||||||
|
Help: "Shortest round-trip time in millisecond",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(rttMin)
|
||||||
|
|
||||||
|
rttMax := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "rtt_max",
|
||||||
|
Help: "Longest round-trip time in millisecond",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(rttMax)
|
||||||
|
|
||||||
|
receiveWindowAve := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "receive_window_ave",
|
||||||
|
Help: "Average receive window size in bytes",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(receiveWindowAve)
|
||||||
|
|
||||||
|
sendWindowAve := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "send_window_ave",
|
||||||
|
Help: "Average send window size in bytes",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(sendWindowAve)
|
||||||
|
|
||||||
|
receiveWindowMin := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "receive_window_min",
|
||||||
|
Help: "Smallest receive window size in bytes",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(receiveWindowMin)
|
||||||
|
|
||||||
|
receiveWindowMax := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "receive_window_max",
|
||||||
|
Help: "Largest receive window size in bytes",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(receiveWindowMax)
|
||||||
|
|
||||||
|
sendWindowMin := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "send_window_min",
|
||||||
|
Help: "Smallest send window size in bytes",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(sendWindowMin)
|
||||||
|
|
||||||
|
sendWindowMax := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "send_window_max",
|
||||||
|
Help: "Largest send window size in bytes",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(sendWindowMax)
|
||||||
|
|
||||||
|
inBoundRateCurr := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "inbound_bytes_per_sec_curr",
|
||||||
|
Help: "Current inbounding bytes per second, 0 if there is no incoming connection",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(inBoundRateCurr)
|
||||||
|
|
||||||
|
inBoundRateMin := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "inbound_bytes_per_sec_min",
|
||||||
|
Help: "Minimum non-zero inbounding bytes per second",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(inBoundRateMin)
|
||||||
|
|
||||||
|
inBoundRateMax := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "inbound_bytes_per_sec_max",
|
||||||
|
Help: "Maximum inbounding bytes per second",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(inBoundRateMax)
|
||||||
|
|
||||||
|
outBoundRateCurr := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "outbound_bytes_per_sec_curr",
|
||||||
|
Help: "Current outbounding bytes per second, 0 if there is no outgoing traffic",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(outBoundRateCurr)
|
||||||
|
|
||||||
|
outBoundRateMin := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "outbound_bytes_per_sec_min",
|
||||||
|
Help: "Minimum non-zero outbounding bytes per second",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(outBoundRateMin)
|
||||||
|
|
||||||
|
outBoundRateMax := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "outbound_bytes_per_sec_max",
|
||||||
|
Help: "Maximum outbounding bytes per second",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(outBoundRateMax)
|
||||||
|
|
||||||
|
compBytesBefore := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "comp_bytes_before",
|
||||||
|
Help: "Bytes sent via cross-stream compression, pre compression",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(compBytesBefore)
|
||||||
|
|
||||||
|
compBytesAfter := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "comp_bytes_after",
|
||||||
|
Help: "Bytes sent via cross-stream compression, post compression",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(compBytesAfter)
|
||||||
|
|
||||||
|
compRateAve := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "comp_rate_ave",
|
||||||
|
Help: "Average outbound cross-stream compression ratio",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(compRateAve)
|
||||||
|
|
||||||
|
return &muxerMetrics{
|
||||||
|
rtt: rtt,
|
||||||
|
rttMin: rttMin,
|
||||||
|
rttMax: rttMax,
|
||||||
|
receiveWindowAve: receiveWindowAve,
|
||||||
|
sendWindowAve: sendWindowAve,
|
||||||
|
receiveWindowMin: receiveWindowMin,
|
||||||
|
receiveWindowMax: receiveWindowMax,
|
||||||
|
sendWindowMin: sendWindowMin,
|
||||||
|
sendWindowMax: sendWindowMax,
|
||||||
|
inBoundRateCurr: inBoundRateCurr,
|
||||||
|
inBoundRateMin: inBoundRateMin,
|
||||||
|
inBoundRateMax: inBoundRateMax,
|
||||||
|
outBoundRateCurr: outBoundRateCurr,
|
||||||
|
outBoundRateMin: outBoundRateMin,
|
||||||
|
outBoundRateMax: outBoundRateMax,
|
||||||
|
compBytesBefore: compBytesBefore,
|
||||||
|
compBytesAfter: compBytesAfter,
|
||||||
|
compRateAve: compRateAve,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *muxerMetrics) update(connectionID string, metrics *h2mux.MuxerMetrics) {
|
||||||
|
m.rtt.WithLabelValues(connectionID).Set(convertRTTMilliSec(metrics.RTT))
|
||||||
|
m.rttMin.WithLabelValues(connectionID).Set(convertRTTMilliSec(metrics.RTTMin))
|
||||||
|
m.rttMax.WithLabelValues(connectionID).Set(convertRTTMilliSec(metrics.RTTMax))
|
||||||
|
m.receiveWindowAve.WithLabelValues(connectionID).Set(metrics.ReceiveWindowAve)
|
||||||
|
m.sendWindowAve.WithLabelValues(connectionID).Set(metrics.SendWindowAve)
|
||||||
|
m.receiveWindowMin.WithLabelValues(connectionID).Set(float64(metrics.ReceiveWindowMin))
|
||||||
|
m.receiveWindowMax.WithLabelValues(connectionID).Set(float64(metrics.ReceiveWindowMax))
|
||||||
|
m.sendWindowMin.WithLabelValues(connectionID).Set(float64(metrics.SendWindowMin))
|
||||||
|
m.sendWindowMax.WithLabelValues(connectionID).Set(float64(metrics.SendWindowMax))
|
||||||
|
m.inBoundRateCurr.WithLabelValues(connectionID).Set(float64(metrics.InBoundRateCurr))
|
||||||
|
m.inBoundRateMin.WithLabelValues(connectionID).Set(float64(metrics.InBoundRateMin))
|
||||||
|
m.inBoundRateMax.WithLabelValues(connectionID).Set(float64(metrics.InBoundRateMax))
|
||||||
|
m.outBoundRateCurr.WithLabelValues(connectionID).Set(float64(metrics.OutBoundRateCurr))
|
||||||
|
m.outBoundRateMin.WithLabelValues(connectionID).Set(float64(metrics.OutBoundRateMin))
|
||||||
|
m.outBoundRateMax.WithLabelValues(connectionID).Set(float64(metrics.OutBoundRateMax))
|
||||||
|
m.compBytesBefore.WithLabelValues(connectionID).Set(float64(metrics.CompBytesBefore.Value()))
|
||||||
|
m.compBytesAfter.WithLabelValues(connectionID).Set(float64(metrics.CompBytesAfter.Value()))
|
||||||
|
m.compRateAve.WithLabelValues(connectionID).Set(float64(metrics.CompRateAve()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertRTTMilliSec(t time.Duration) float64 {
|
||||||
|
return float64(t / time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metrics that can be collected without asking the edge
|
||||||
|
func NewTunnelMetrics() *TunnelMetrics {
|
||||||
|
haConnections := prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "ha_connections",
|
||||||
|
Help: "Number of active ha connections",
|
||||||
|
})
|
||||||
|
prometheus.MustRegister(haConnections)
|
||||||
|
|
||||||
|
totalRequests := prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "total_requests",
|
||||||
|
Help: "Amount of requests proxied through all the tunnels",
|
||||||
|
})
|
||||||
|
prometheus.MustRegister(totalRequests)
|
||||||
|
|
||||||
|
requestsPerTunnel := prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "requests_per_tunnel",
|
||||||
|
Help: "Amount of requests proxied through each tunnel",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(requestsPerTunnel)
|
||||||
|
|
||||||
|
concurrentRequestsPerTunnel := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "concurrent_requests_per_tunnel",
|
||||||
|
Help: "Concurrent requests proxied through each tunnel",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(concurrentRequestsPerTunnel)
|
||||||
|
|
||||||
|
maxConcurrentRequestsPerTunnel := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "max_concurrent_requests_per_tunnel",
|
||||||
|
Help: "Largest number of concurrent requests proxied through each tunnel so far",
|
||||||
|
},
|
||||||
|
[]string{"connection_id"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(maxConcurrentRequestsPerTunnel)
|
||||||
|
|
||||||
|
timerRetries := prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "timer_retries",
|
||||||
|
Help: "Unacknowledged heart beats count",
|
||||||
|
})
|
||||||
|
prometheus.MustRegister(timerRetries)
|
||||||
|
|
||||||
|
responseByCode := prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "response_by_code",
|
||||||
|
Help: "Count of responses by HTTP status code",
|
||||||
|
},
|
||||||
|
[]string{"status_code"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(responseByCode)
|
||||||
|
|
||||||
|
responseCodePerTunnel := prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "response_code_per_tunnel",
|
||||||
|
Help: "Count of responses by HTTP status code fore each tunnel",
|
||||||
|
},
|
||||||
|
[]string{"connection_id", "status_code"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(responseCodePerTunnel)
|
||||||
|
|
||||||
|
serverLocations := prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "server_locations",
|
||||||
|
Help: "Where each tunnel is connected to. 1 means current location, 0 means previous locations.",
|
||||||
|
},
|
||||||
|
[]string{"connection_id", "location"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(serverLocations)
|
||||||
|
|
||||||
|
return &TunnelMetrics{
|
||||||
|
haConnections: haConnections,
|
||||||
|
totalRequests: totalRequests,
|
||||||
|
requestsPerTunnel: requestsPerTunnel,
|
||||||
|
concurrentRequestsPerTunnel: concurrentRequestsPerTunnel,
|
||||||
|
concurrentRequests: make(map[string]uint64),
|
||||||
|
maxConcurrentRequestsPerTunnel: maxConcurrentRequestsPerTunnel,
|
||||||
|
maxConcurrentRequests: make(map[string]uint64),
|
||||||
|
timerRetries: timerRetries,
|
||||||
|
responseByCode: responseByCode,
|
||||||
|
responseCodePerTunnel: responseCodePerTunnel,
|
||||||
|
serverLocations: serverLocations,
|
||||||
|
oldServerLocations: make(map[string]string),
|
||||||
|
muxerMetrics: newMuxerMetrics(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TunnelMetrics) incrementHaConnections() {
|
||||||
|
t.haConnections.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TunnelMetrics) decrementHaConnections() {
|
||||||
|
t.haConnections.Dec()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TunnelMetrics) updateMuxerMetrics(connectionID string, metrics *h2mux.MuxerMetrics) {
|
||||||
|
t.muxerMetrics.update(connectionID, metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TunnelMetrics) incrementRequests(connectionID string) {
|
||||||
|
t.concurrentRequestsLock.Lock()
|
||||||
|
var concurrentRequests uint64
|
||||||
|
var ok bool
|
||||||
|
if concurrentRequests, ok = t.concurrentRequests[connectionID]; ok {
|
||||||
|
t.concurrentRequests[connectionID] += 1
|
||||||
|
concurrentRequests++
|
||||||
|
} else {
|
||||||
|
t.concurrentRequests[connectionID] = 1
|
||||||
|
concurrentRequests = 1
|
||||||
|
}
|
||||||
|
if maxConcurrentRequests, ok := t.maxConcurrentRequests[connectionID]; (ok && maxConcurrentRequests < concurrentRequests) || !ok {
|
||||||
|
t.maxConcurrentRequests[connectionID] = concurrentRequests
|
||||||
|
t.maxConcurrentRequestsPerTunnel.WithLabelValues(connectionID).Set(float64(concurrentRequests))
|
||||||
|
}
|
||||||
|
t.concurrentRequestsLock.Unlock()
|
||||||
|
|
||||||
|
t.totalRequests.Inc()
|
||||||
|
t.requestsPerTunnel.WithLabelValues(connectionID).Inc()
|
||||||
|
t.concurrentRequestsPerTunnel.WithLabelValues(connectionID).Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TunnelMetrics) decrementConcurrentRequests(connectionID string) {
|
||||||
|
t.concurrentRequestsLock.Lock()
|
||||||
|
if _, ok := t.concurrentRequests[connectionID]; ok {
|
||||||
|
t.concurrentRequests[connectionID] -= 1
|
||||||
|
}
|
||||||
|
t.concurrentRequestsLock.Unlock()
|
||||||
|
|
||||||
|
t.concurrentRequestsPerTunnel.WithLabelValues(connectionID).Dec()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TunnelMetrics) incrementResponses(connectionID, code string) {
|
||||||
|
t.responseByCode.WithLabelValues(code).Inc()
|
||||||
|
t.responseCodePerTunnel.WithLabelValues(connectionID, code).Inc()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TunnelMetrics) registerServerLocation(connectionID, loc string) {
|
||||||
|
t.locationLock.Lock()
|
||||||
|
defer t.locationLock.Unlock()
|
||||||
|
if oldLoc, ok := t.oldServerLocations[connectionID]; ok && oldLoc == loc {
|
||||||
|
return
|
||||||
|
} else if ok {
|
||||||
|
t.serverLocations.WithLabelValues(connectionID, oldLoc).Dec()
|
||||||
|
}
|
||||||
|
t.serverLocations.WithLabelValues(connectionID, loc).Inc()
|
||||||
|
t.oldServerLocations[connectionID] = loc
|
||||||
|
}
|
|
@ -0,0 +1,121 @@
|
||||||
|
package origin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// can only be called once
|
||||||
|
var m = NewTunnelMetrics()
|
||||||
|
|
||||||
|
func TestConcurrentRequestsSingleTunnel(t *testing.T) {
|
||||||
|
routines := 20
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(routines)
|
||||||
|
for i := 0; i < routines; i++ {
|
||||||
|
go func() {
|
||||||
|
m.incrementRequests("0")
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
assert.Len(t, m.concurrentRequests, 1)
|
||||||
|
assert.Equal(t, uint64(routines), m.concurrentRequests["0"])
|
||||||
|
assert.Len(t, m.maxConcurrentRequests, 1)
|
||||||
|
assert.Equal(t, uint64(routines), m.maxConcurrentRequests["0"])
|
||||||
|
|
||||||
|
wg.Add(routines / 2)
|
||||||
|
for i := 0; i < routines/2; i++ {
|
||||||
|
go func() {
|
||||||
|
m.decrementConcurrentRequests("0")
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
assert.Equal(t, uint64(routines-routines/2), m.concurrentRequests["0"])
|
||||||
|
assert.Equal(t, uint64(routines), m.maxConcurrentRequests["0"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConcurrentRequestsMultiTunnel(t *testing.T) {
|
||||||
|
m.concurrentRequests = make(map[string]uint64)
|
||||||
|
m.maxConcurrentRequests = make(map[string]uint64)
|
||||||
|
tunnels := 20
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(tunnels)
|
||||||
|
for i := 0; i < tunnels; i++ {
|
||||||
|
go func(i int) {
|
||||||
|
// if we have j < i, then tunnel 0 won't have a chance to call incrementRequests
|
||||||
|
for j := 0; j < i+1; j++ {
|
||||||
|
id := strconv.Itoa(i)
|
||||||
|
m.incrementRequests(id)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
assert.Len(t, m.concurrentRequests, tunnels)
|
||||||
|
assert.Len(t, m.maxConcurrentRequests, tunnels)
|
||||||
|
for i := 0; i < tunnels; i++ {
|
||||||
|
id := strconv.Itoa(i)
|
||||||
|
assert.Equal(t, uint64(i+1), m.concurrentRequests[id])
|
||||||
|
assert.Equal(t, uint64(i+1), m.maxConcurrentRequests[id])
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(tunnels)
|
||||||
|
for i := 0; i < tunnels; i++ {
|
||||||
|
go func(i int) {
|
||||||
|
for j := 0; j < i+1; j++ {
|
||||||
|
id := strconv.Itoa(i)
|
||||||
|
m.decrementConcurrentRequests(id)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
assert.Len(t, m.concurrentRequests, tunnels)
|
||||||
|
assert.Len(t, m.maxConcurrentRequests, tunnels)
|
||||||
|
for i := 0; i < tunnels; i++ {
|
||||||
|
id := strconv.Itoa(i)
|
||||||
|
assert.Equal(t, uint64(0), m.concurrentRequests[id])
|
||||||
|
assert.Equal(t, uint64(i+1), m.maxConcurrentRequests[id])
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegisterServerLocation(t *testing.T) {
|
||||||
|
tunnels := 20
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(tunnels)
|
||||||
|
for i := 0; i < tunnels; i++ {
|
||||||
|
go func(i int) {
|
||||||
|
id := strconv.Itoa(i)
|
||||||
|
m.registerServerLocation(id, "LHR")
|
||||||
|
wg.Done()
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
for i := 0; i < tunnels; i++ {
|
||||||
|
id := strconv.Itoa(i)
|
||||||
|
assert.Equal(t, "LHR", m.oldServerLocations[id])
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(tunnels)
|
||||||
|
for i := 0; i < tunnels; i++ {
|
||||||
|
go func(i int) {
|
||||||
|
id := strconv.Itoa(i)
|
||||||
|
m.registerServerLocation(id, "AUS")
|
||||||
|
wg.Done()
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
for i := 0; i < tunnels; i++ {
|
||||||
|
id := strconv.Itoa(i)
|
||||||
|
assert.Equal(t, "AUS", m.oldServerLocations[id])
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,234 @@
|
||||||
|
package origin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Waiting time before retrying a failed tunnel connection
|
||||||
|
tunnelRetryDuration = time.Second * 10
|
||||||
|
// SRV record resolution TTL
|
||||||
|
resolveTTL = time.Hour
|
||||||
|
// Interval between registering new tunnels
|
||||||
|
registrationInterval = time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type Supervisor struct {
|
||||||
|
config *TunnelConfig
|
||||||
|
edgeIPs []*net.TCPAddr
|
||||||
|
// nextUnusedEdgeIP is the index of the next addr k edgeIPs to try
|
||||||
|
nextUnusedEdgeIP int
|
||||||
|
lastResolve time.Time
|
||||||
|
resolverC chan resolveResult
|
||||||
|
tunnelErrors chan tunnelError
|
||||||
|
tunnelsConnecting map[int]chan struct{}
|
||||||
|
// nextConnectedIndex and nextConnectedSignal are used to wait for all
|
||||||
|
// currently-connecting tunnels to finish connecting so we can reset backoff timer
|
||||||
|
nextConnectedIndex int
|
||||||
|
nextConnectedSignal chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type resolveResult struct {
|
||||||
|
edgeIPs []*net.TCPAddr
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type tunnelError struct {
|
||||||
|
index int
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSupervisor(config *TunnelConfig) *Supervisor {
|
||||||
|
return &Supervisor{
|
||||||
|
config: config,
|
||||||
|
tunnelErrors: make(chan tunnelError),
|
||||||
|
tunnelsConnecting: map[int]chan struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) Run(ctx context.Context, connectedSignal chan struct{}) error {
|
||||||
|
logger := s.config.Logger
|
||||||
|
if err := s.initialize(ctx, connectedSignal); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var tunnelsWaiting []int
|
||||||
|
backoff := BackoffHandler{MaxRetries: s.config.Retries, BaseTime: tunnelRetryDuration, RetryForever: true}
|
||||||
|
var backoffTimer <-chan time.Time
|
||||||
|
tunnelsActive := s.config.HAConnections
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// Context cancelled
|
||||||
|
case <-ctx.Done():
|
||||||
|
for tunnelsActive > 0 {
|
||||||
|
<-s.tunnelErrors
|
||||||
|
tunnelsActive--
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
// startTunnel returned with error
|
||||||
|
// (note that this may also be caused by context cancellation)
|
||||||
|
case tunnelError := <-s.tunnelErrors:
|
||||||
|
tunnelsActive--
|
||||||
|
if tunnelError.err != nil {
|
||||||
|
logger.WithError(tunnelError.err).Warn("Tunnel disconnected due to error")
|
||||||
|
tunnelsWaiting = append(tunnelsWaiting, tunnelError.index)
|
||||||
|
s.waitForNextTunnel(tunnelError.index)
|
||||||
|
|
||||||
|
if backoffTimer == nil {
|
||||||
|
backoffTimer = backoff.BackoffTimer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the error is a dial error, the problem is likely to be network related
|
||||||
|
// try another addr before refreshing since we are likely to get back the
|
||||||
|
// same IPs in the same order. Same problem with duplicate connection error.
|
||||||
|
if s.unusedIPs() {
|
||||||
|
s.replaceEdgeIP(tunnelError.index)
|
||||||
|
} else {
|
||||||
|
s.refreshEdgeIPs()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Backoff was set and its timer expired
|
||||||
|
case <-backoffTimer:
|
||||||
|
backoffTimer = nil
|
||||||
|
for _, index := range tunnelsWaiting {
|
||||||
|
go s.startTunnel(ctx, index, s.newConnectedTunnelSignal(index))
|
||||||
|
}
|
||||||
|
tunnelsActive += len(tunnelsWaiting)
|
||||||
|
tunnelsWaiting = nil
|
||||||
|
// Tunnel successfully connected
|
||||||
|
case <-s.nextConnectedSignal:
|
||||||
|
if !s.waitForNextTunnel(s.nextConnectedIndex) && len(tunnelsWaiting) == 0 {
|
||||||
|
// No more tunnels outstanding, clear backoff timer
|
||||||
|
backoff.SetGracePeriod()
|
||||||
|
}
|
||||||
|
// DNS resolution returned
|
||||||
|
case result := <-s.resolverC:
|
||||||
|
s.lastResolve = time.Now()
|
||||||
|
s.resolverC = nil
|
||||||
|
if result.err == nil {
|
||||||
|
logger.Debug("Service discovery refresh complete")
|
||||||
|
s.edgeIPs = result.edgeIPs
|
||||||
|
} else {
|
||||||
|
logger.WithError(result.err).Error("Service discovery error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) initialize(ctx context.Context, connectedSignal chan struct{}) error {
|
||||||
|
logger := s.config.Logger
|
||||||
|
edgeIPs, err := ResolveEdgeIPs(s.config.EdgeAddrs)
|
||||||
|
if err != nil {
|
||||||
|
logger.Infof("ResolveEdgeIPs err")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.edgeIPs = edgeIPs
|
||||||
|
if s.config.HAConnections > len(edgeIPs) {
|
||||||
|
logger.Warnf("You requested %d HA connections but I can give you at most %d.", s.config.HAConnections, len(edgeIPs))
|
||||||
|
s.config.HAConnections = len(edgeIPs)
|
||||||
|
}
|
||||||
|
s.lastResolve = time.Now()
|
||||||
|
// check entitlement and version too old error before attempting to register more tunnels
|
||||||
|
s.nextUnusedEdgeIP = s.config.HAConnections
|
||||||
|
go s.startFirstTunnel(ctx, connectedSignal)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
<-s.tunnelErrors
|
||||||
|
// Error can't be nil. A nil error signals that initialization succeed
|
||||||
|
return fmt.Errorf("context was canceled")
|
||||||
|
case tunnelError := <-s.tunnelErrors:
|
||||||
|
return tunnelError.err
|
||||||
|
case <-connectedSignal:
|
||||||
|
}
|
||||||
|
// At least one successful connection, so start the rest
|
||||||
|
for i := 1; i < s.config.HAConnections; i++ {
|
||||||
|
go s.startTunnel(ctx, i, make(chan struct{}))
|
||||||
|
time.Sleep(registrationInterval)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// startTunnel starts the first tunnel connection. The resulting error will be sent on
|
||||||
|
// s.tunnelErrors. It will send a signal via connectedSignal if registration succeed
|
||||||
|
func (s *Supervisor) startFirstTunnel(ctx context.Context, connectedSignal chan struct{}) {
|
||||||
|
err := ServeTunnelLoop(ctx, s.config, s.getEdgeIP(0), 0, connectedSignal)
|
||||||
|
defer func() {
|
||||||
|
s.tunnelErrors <- tunnelError{index: 0, err: err}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for s.unusedIPs() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
switch err.(type) {
|
||||||
|
case nil:
|
||||||
|
return
|
||||||
|
// try the next address if it was a dialError(network problem) or
|
||||||
|
// dupConnRegisterTunnelError
|
||||||
|
case dialError, dupConnRegisterTunnelError:
|
||||||
|
s.replaceEdgeIP(0)
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = ServeTunnelLoop(ctx, s.config, s.getEdgeIP(0), 0, connectedSignal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// startTunnel starts a new tunnel connection. The resulting error will be sent on
|
||||||
|
// s.tunnelErrors.
|
||||||
|
func (s *Supervisor) startTunnel(ctx context.Context, index int, connectedSignal chan struct{}) {
|
||||||
|
err := ServeTunnelLoop(ctx, s.config, s.getEdgeIP(index), uint8(index), connectedSignal)
|
||||||
|
s.tunnelErrors <- tunnelError{index: index, err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) newConnectedTunnelSignal(index int) chan struct{} {
|
||||||
|
signal := make(chan struct{})
|
||||||
|
s.tunnelsConnecting[index] = signal
|
||||||
|
s.nextConnectedSignal = signal
|
||||||
|
s.nextConnectedIndex = index
|
||||||
|
return signal
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) waitForNextTunnel(index int) bool {
|
||||||
|
delete(s.tunnelsConnecting, index)
|
||||||
|
s.nextConnectedSignal = nil
|
||||||
|
for k, v := range s.tunnelsConnecting {
|
||||||
|
s.nextConnectedIndex = k
|
||||||
|
s.nextConnectedSignal = v
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) getEdgeIP(index int) *net.TCPAddr {
|
||||||
|
return s.edgeIPs[index%len(s.edgeIPs)]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) refreshEdgeIPs() {
|
||||||
|
if s.resolverC != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if time.Since(s.lastResolve) < resolveTTL {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.resolverC = make(chan resolveResult)
|
||||||
|
go func() {
|
||||||
|
edgeIPs, err := ResolveEdgeIPs(s.config.EdgeAddrs)
|
||||||
|
s.resolverC <- resolveResult{edgeIPs: edgeIPs, err: err}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) unusedIPs() bool {
|
||||||
|
return s.nextUnusedEdgeIP < len(s.edgeIPs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Supervisor) replaceEdgeIP(badIPIndex int) {
|
||||||
|
s.edgeIPs[badIPIndex] = s.edgeIPs[s.nextUnusedEdgeIP]
|
||||||
|
s.nextUnusedEdgeIP++
|
||||||
|
}
|
|
@ -0,0 +1,629 @@
|
||||||
|
package origin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
|
"github.com/cloudflare/cloudflared/tunnelrpc"
|
||||||
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
"github.com/cloudflare/cloudflared/validation"
|
||||||
|
"github.com/cloudflare/cloudflared/websocket"
|
||||||
|
|
||||||
|
raven "github.com/getsentry/raven-go"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
_ "github.com/prometheus/client_golang/prometheus"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
rpc "zombiezen.com/go/capnproto2/rpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dialTimeout = 15 * time.Second
|
||||||
|
lbProbeUserAgentPrefix = "Mozilla/5.0 (compatible; Cloudflare-Traffic-Manager/1.0; +https://www.cloudflare.com/traffic-manager/;"
|
||||||
|
TagHeaderNamePrefix = "Cf-Warp-Tag-"
|
||||||
|
DuplicateConnectionError = "EDUPCONN"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TunnelConfig struct {
|
||||||
|
EdgeAddrs []string
|
||||||
|
OriginUrl string
|
||||||
|
Hostname string
|
||||||
|
OriginCert []byte
|
||||||
|
TlsConfig *tls.Config
|
||||||
|
ClientTlsConfig *tls.Config
|
||||||
|
Retries uint
|
||||||
|
HeartbeatInterval time.Duration
|
||||||
|
MaxHeartbeats uint64
|
||||||
|
ClientID string
|
||||||
|
BuildInfo *BuildInfo
|
||||||
|
ReportedVersion string
|
||||||
|
LBPool string
|
||||||
|
Tags []tunnelpogs.Tag
|
||||||
|
HAConnections int
|
||||||
|
HTTPTransport http.RoundTripper
|
||||||
|
Metrics *TunnelMetrics
|
||||||
|
MetricsUpdateFreq time.Duration
|
||||||
|
ProtocolLogger *log.Logger
|
||||||
|
Logger *log.Logger
|
||||||
|
IsAutoupdated bool
|
||||||
|
GracePeriod time.Duration
|
||||||
|
RunFromTerminal bool
|
||||||
|
NoChunkedEncoding bool
|
||||||
|
WSGI bool
|
||||||
|
CompressionQuality uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type dialError struct {
|
||||||
|
cause error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e dialError) Error() string {
|
||||||
|
return e.cause.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
type dupConnRegisterTunnelError struct{}
|
||||||
|
|
||||||
|
func (e dupConnRegisterTunnelError) Error() string {
|
||||||
|
return "already connected to this server"
|
||||||
|
}
|
||||||
|
|
||||||
|
type muxerShutdownError struct{}
|
||||||
|
|
||||||
|
func (e muxerShutdownError) Error() string {
|
||||||
|
return "muxer shutdown"
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterTunnel error from server
|
||||||
|
type serverRegisterTunnelError struct {
|
||||||
|
cause error
|
||||||
|
permanent bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e serverRegisterTunnelError) Error() string {
|
||||||
|
return e.cause.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterTunnel error from client
|
||||||
|
type clientRegisterTunnelError struct {
|
||||||
|
cause error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e clientRegisterTunnelError) Error() string {
|
||||||
|
return e.cause.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TunnelConfig) RegistrationOptions(connectionID uint8, OriginLocalIP string) *tunnelpogs.RegistrationOptions {
|
||||||
|
policy := tunnelrpc.ExistingTunnelPolicy_balance
|
||||||
|
if c.HAConnections <= 1 && c.LBPool == "" {
|
||||||
|
policy = tunnelrpc.ExistingTunnelPolicy_disconnect
|
||||||
|
}
|
||||||
|
return &tunnelpogs.RegistrationOptions{
|
||||||
|
ClientID: c.ClientID,
|
||||||
|
Version: c.ReportedVersion,
|
||||||
|
OS: fmt.Sprintf("%s_%s", c.BuildInfo.GoOS, c.BuildInfo.GoArch),
|
||||||
|
ExistingTunnelPolicy: policy,
|
||||||
|
PoolName: c.LBPool,
|
||||||
|
Tags: c.Tags,
|
||||||
|
ConnectionID: connectionID,
|
||||||
|
OriginLocalIP: OriginLocalIP,
|
||||||
|
IsAutoupdated: c.IsAutoupdated,
|
||||||
|
RunFromTerminal: c.RunFromTerminal,
|
||||||
|
CompressionQuality: c.CompressionQuality,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func StartTunnelDaemon(config *TunnelConfig, shutdownC <-chan struct{}, connectedSignal chan struct{}) error {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
go func() {
|
||||||
|
<-shutdownC
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
// If a user specified negative HAConnections, we will treat it as requesting 1 connection
|
||||||
|
if config.HAConnections > 1 {
|
||||||
|
return NewSupervisor(config).Run(ctx, connectedSignal)
|
||||||
|
} else {
|
||||||
|
addrs, err := ResolveEdgeIPs(config.EdgeAddrs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return ServeTunnelLoop(ctx, config, addrs[0], 0, connectedSignal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ServeTunnelLoop(ctx context.Context,
|
||||||
|
config *TunnelConfig,
|
||||||
|
addr *net.TCPAddr,
|
||||||
|
connectionID uint8,
|
||||||
|
connectedSignal chan struct{},
|
||||||
|
) error {
|
||||||
|
logger := config.Logger
|
||||||
|
config.Metrics.incrementHaConnections()
|
||||||
|
defer config.Metrics.decrementHaConnections()
|
||||||
|
backoff := BackoffHandler{MaxRetries: config.Retries}
|
||||||
|
// Used to close connectedSignal no more than once
|
||||||
|
connectedFuse := h2mux.NewBooleanFuse()
|
||||||
|
go func() {
|
||||||
|
if connectedFuse.Await() {
|
||||||
|
close(connectedSignal)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Ensure the above goroutine will terminate if we return without connecting
|
||||||
|
defer connectedFuse.Fuse(false)
|
||||||
|
for {
|
||||||
|
err, recoverable := ServeTunnel(ctx, config, addr, connectionID, connectedFuse, &backoff)
|
||||||
|
if recoverable {
|
||||||
|
if duration, ok := backoff.GetBackoffDuration(ctx); ok {
|
||||||
|
logger.Infof("Retrying in %s seconds", duration)
|
||||||
|
backoff.Backoff(ctx)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ServeTunnel(
|
||||||
|
ctx context.Context,
|
||||||
|
config *TunnelConfig,
|
||||||
|
addr *net.TCPAddr,
|
||||||
|
connectionID uint8,
|
||||||
|
connectedFuse *h2mux.BooleanFuse,
|
||||||
|
backoff *BackoffHandler,
|
||||||
|
) (err error, recoverable bool) {
|
||||||
|
// Treat panics as recoverable errors
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
var ok bool
|
||||||
|
err, ok = r.(error)
|
||||||
|
if !ok {
|
||||||
|
err = fmt.Errorf("ServeTunnel: %v", r)
|
||||||
|
}
|
||||||
|
recoverable = true
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
connectionTag := uint8ToString(connectionID)
|
||||||
|
logger := config.Logger.WithField("connectionID", connectionTag)
|
||||||
|
|
||||||
|
// additional tags to send other than hostname which is set in cloudflared main package
|
||||||
|
tags := make(map[string]string)
|
||||||
|
tags["ha"] = connectionTag
|
||||||
|
|
||||||
|
// Returns error from parsing the origin URL or handshake errors
|
||||||
|
handler, originLocalIP, err := NewTunnelHandler(ctx, config, addr.String(), connectionID)
|
||||||
|
if err != nil {
|
||||||
|
errLog := config.Logger.WithError(err)
|
||||||
|
switch err.(type) {
|
||||||
|
case dialError:
|
||||||
|
errLog.Error("Unable to dial edge")
|
||||||
|
case h2mux.MuxerHandshakeError:
|
||||||
|
errLog.Error("Handshake failed with edge server")
|
||||||
|
default:
|
||||||
|
errLog.Error("Tunnel creation failure")
|
||||||
|
return err, false
|
||||||
|
}
|
||||||
|
return err, true
|
||||||
|
}
|
||||||
|
|
||||||
|
errGroup, serveCtx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
errGroup.Go(func() error {
|
||||||
|
err := RegisterTunnel(serveCtx, handler.muxer, config, connectionID, originLocalIP)
|
||||||
|
if err == nil {
|
||||||
|
connectedFuse.Fuse(true)
|
||||||
|
backoff.SetGracePeriod()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
errGroup.Go(func() error {
|
||||||
|
updateMetricsTickC := time.Tick(config.MetricsUpdateFreq)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-serveCtx.Done():
|
||||||
|
// UnregisterTunnel blocks until the RPC call returns
|
||||||
|
err := UnregisterTunnel(handler.muxer, config.GracePeriod, config.Logger)
|
||||||
|
handler.muxer.Shutdown()
|
||||||
|
return err
|
||||||
|
case <-updateMetricsTickC:
|
||||||
|
handler.UpdateMetrics(connectionTag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
errGroup.Go(func() error {
|
||||||
|
// All routines should stop when muxer finish serving. When muxer is shutdown
|
||||||
|
// gracefully, it doesn't return an error, so we need to return errMuxerShutdown
|
||||||
|
// here to notify other routines to stop
|
||||||
|
err := handler.muxer.Serve(serveCtx)
|
||||||
|
if err == nil {
|
||||||
|
return muxerShutdownError{}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
err = errGroup.Wait()
|
||||||
|
if err != nil {
|
||||||
|
switch castedErr := err.(type) {
|
||||||
|
case dupConnRegisterTunnelError:
|
||||||
|
logger.Info("Already connected to this server, selecting a different one")
|
||||||
|
return err, true
|
||||||
|
case serverRegisterTunnelError:
|
||||||
|
logger.WithError(castedErr.cause).Error("Register tunnel error from server side")
|
||||||
|
// Don't send registration error return from server to Sentry. They are
|
||||||
|
// logged on server side
|
||||||
|
return castedErr.cause, !castedErr.permanent
|
||||||
|
case clientRegisterTunnelError:
|
||||||
|
logger.WithError(castedErr.cause).Error("Register tunnel error on client side")
|
||||||
|
raven.CaptureError(castedErr.cause, tags)
|
||||||
|
return err, true
|
||||||
|
case muxerShutdownError:
|
||||||
|
logger.Infof("Muxer shutdown")
|
||||||
|
return err, true
|
||||||
|
default:
|
||||||
|
logger.WithError(err).Error("Serve tunnel error")
|
||||||
|
raven.CaptureError(err, tags)
|
||||||
|
return err, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsRPCStreamResponse(headers []h2mux.Header) bool {
|
||||||
|
if len(headers) != 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if headers[0].Name != ":status" || headers[0].Value != "200" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterTunnel(ctx context.Context, muxer *h2mux.Muxer, config *TunnelConfig, connectionID uint8, originLocalIP string) error {
|
||||||
|
config.Logger.Debug("initiating RPC stream to register")
|
||||||
|
stream, err := muxer.OpenStream([]h2mux.Header{
|
||||||
|
{Name: ":method", Value: "RPC"},
|
||||||
|
{Name: ":scheme", Value: "capnp"},
|
||||||
|
{Name: ":path", Value: "*"},
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
// RPC stream open error
|
||||||
|
return clientRegisterTunnelError{cause: err}
|
||||||
|
}
|
||||||
|
if !IsRPCStreamResponse(stream.Headers) {
|
||||||
|
// stream response error
|
||||||
|
return clientRegisterTunnelError{cause: err}
|
||||||
|
}
|
||||||
|
conn := rpc.NewConn(
|
||||||
|
tunnelrpc.NewTransportLogger(config.Logger.WithField("subsystem", "rpc-register"), rpc.StreamTransport(stream)),
|
||||||
|
tunnelrpc.ConnLog(config.Logger.WithField("subsystem", "rpc-transport")),
|
||||||
|
)
|
||||||
|
defer conn.Close()
|
||||||
|
ts := tunnelpogs.TunnelServer_PogsClient{Client: conn.Bootstrap(ctx)}
|
||||||
|
// Request server info without blocking tunnel registration; must use capnp library directly.
|
||||||
|
tsClient := tunnelrpc.TunnelServer{Client: ts.Client}
|
||||||
|
serverInfoPromise := tsClient.GetServerInfo(ctx, func(tunnelrpc.TunnelServer_getServerInfo_Params) error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
registration, err := ts.RegisterTunnel(
|
||||||
|
ctx,
|
||||||
|
config.OriginCert,
|
||||||
|
config.Hostname,
|
||||||
|
config.RegistrationOptions(connectionID, originLocalIP),
|
||||||
|
)
|
||||||
|
LogServerInfo(serverInfoPromise.Result(), connectionID, config.Metrics, config.Logger)
|
||||||
|
if err != nil {
|
||||||
|
// RegisterTunnel RPC failure
|
||||||
|
return clientRegisterTunnelError{cause: err}
|
||||||
|
}
|
||||||
|
for _, logLine := range registration.LogLines {
|
||||||
|
config.Logger.Info(logLine)
|
||||||
|
}
|
||||||
|
if registration.Err == DuplicateConnectionError {
|
||||||
|
return dupConnRegisterTunnelError{}
|
||||||
|
} else if registration.Err != "" {
|
||||||
|
return serverRegisterTunnelError{
|
||||||
|
cause: fmt.Errorf("Server error: %s", registration.Err),
|
||||||
|
permanent: registration.PermanentFailure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config.Logger.Info("Tunnel ID: " + registration.TunnelID)
|
||||||
|
config.Logger.Infof("Route propagating, it may take up to 1 minute for your new route to become functional")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnregisterTunnel(muxer *h2mux.Muxer, gracePeriod time.Duration, logger *log.Logger) error {
|
||||||
|
logger.Debug("initiating RPC stream to unregister")
|
||||||
|
stream, err := muxer.OpenStream([]h2mux.Header{
|
||||||
|
{Name: ":method", Value: "RPC"},
|
||||||
|
{Name: ":scheme", Value: "capnp"},
|
||||||
|
{Name: ":path", Value: "*"},
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
// RPC stream open error
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !IsRPCStreamResponse(stream.Headers) {
|
||||||
|
// stream response error
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
conn := rpc.NewConn(
|
||||||
|
tunnelrpc.NewTransportLogger(logger.WithField("subsystem", "rpc-unregister"), rpc.StreamTransport(stream)),
|
||||||
|
tunnelrpc.ConnLog(logger.WithField("subsystem", "rpc-transport")),
|
||||||
|
)
|
||||||
|
defer conn.Close()
|
||||||
|
ts := tunnelpogs.TunnelServer_PogsClient{Client: conn.Bootstrap(ctx)}
|
||||||
|
// gracePeriod is encoded in int64 using capnproto
|
||||||
|
return ts.UnregisterTunnel(ctx, gracePeriod.Nanoseconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
func LogServerInfo(
|
||||||
|
promise tunnelrpc.ServerInfo_Promise,
|
||||||
|
connectionID uint8,
|
||||||
|
metrics *TunnelMetrics,
|
||||||
|
logger *log.Logger,
|
||||||
|
) {
|
||||||
|
serverInfoMessage, err := promise.Struct()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Warn("Failed to retrieve server information")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
serverInfo, err := tunnelpogs.UnmarshalServerInfo(serverInfoMessage)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Warn("Failed to retrieve server information")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logger.Infof("Connected to %s", serverInfo.LocationName)
|
||||||
|
metrics.registerServerLocation(uint8ToString(connectionID), serverInfo.LocationName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func H2RequestHeadersToH1Request(h2 []h2mux.Header, h1 *http.Request) error {
|
||||||
|
for _, header := range h2 {
|
||||||
|
switch header.Name {
|
||||||
|
case ":method":
|
||||||
|
h1.Method = header.Value
|
||||||
|
case ":scheme":
|
||||||
|
case ":authority":
|
||||||
|
// Otherwise the host header will be based on the origin URL
|
||||||
|
h1.Host = header.Value
|
||||||
|
case ":path":
|
||||||
|
u, err := url.Parse(header.Value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unparseable path")
|
||||||
|
}
|
||||||
|
resolved := h1.URL.ResolveReference(u)
|
||||||
|
// prevent escaping base URL
|
||||||
|
if !strings.HasPrefix(resolved.String(), h1.URL.String()) {
|
||||||
|
return fmt.Errorf("invalid path")
|
||||||
|
}
|
||||||
|
h1.URL = resolved
|
||||||
|
default:
|
||||||
|
h1.Header.Add(http.CanonicalHeaderKey(header.Name), header.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func H1ResponseToH2Response(h1 *http.Response) (h2 []h2mux.Header) {
|
||||||
|
h2 = []h2mux.Header{{Name: ":status", Value: fmt.Sprintf("%d", h1.StatusCode)}}
|
||||||
|
for headerName, headerValues := range h1.Header {
|
||||||
|
for _, headerValue := range headerValues {
|
||||||
|
h2 = append(h2, h2mux.Header{Name: strings.ToLower(headerName), Value: headerValue})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func FindCfRayHeader(h1 *http.Request) string {
|
||||||
|
return h1.Header.Get("Cf-Ray")
|
||||||
|
}
|
||||||
|
|
||||||
|
type TunnelHandler struct {
|
||||||
|
originUrl string
|
||||||
|
muxer *h2mux.Muxer
|
||||||
|
httpClient http.RoundTripper
|
||||||
|
tlsConfig *tls.Config
|
||||||
|
tags []tunnelpogs.Tag
|
||||||
|
metrics *TunnelMetrics
|
||||||
|
// connectionID is only used by metrics, and prometheus requires labels to be string
|
||||||
|
connectionID string
|
||||||
|
logger *log.Logger
|
||||||
|
noChunkedEncoding bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var dialer = net.Dialer{DualStack: true}
|
||||||
|
|
||||||
|
// NewTunnelHandler returns a TunnelHandler, origin LAN IP and error
|
||||||
|
func NewTunnelHandler(ctx context.Context,
|
||||||
|
config *TunnelConfig,
|
||||||
|
addr string,
|
||||||
|
connectionID uint8,
|
||||||
|
) (*TunnelHandler, string, error) {
|
||||||
|
originURL, err := validation.ValidateUrl(config.OriginUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("Unable to parse origin url %#v", originURL)
|
||||||
|
}
|
||||||
|
h := &TunnelHandler{
|
||||||
|
originUrl: originURL,
|
||||||
|
httpClient: config.HTTPTransport,
|
||||||
|
tlsConfig: config.ClientTlsConfig,
|
||||||
|
tags: config.Tags,
|
||||||
|
metrics: config.Metrics,
|
||||||
|
connectionID: uint8ToString(connectionID),
|
||||||
|
logger: config.Logger,
|
||||||
|
noChunkedEncoding: config.NoChunkedEncoding,
|
||||||
|
}
|
||||||
|
if h.httpClient == nil {
|
||||||
|
h.httpClient = http.DefaultTransport
|
||||||
|
}
|
||||||
|
// Inherit from parent context so we can cancel (Ctrl-C) while dialing
|
||||||
|
dialCtx, dialCancel := context.WithTimeout(ctx, dialTimeout)
|
||||||
|
// TUN-92: enforce a timeout on dial and handshake (as tls.Dial does not support one)
|
||||||
|
plaintextEdgeConn, err := dialer.DialContext(dialCtx, "tcp", addr)
|
||||||
|
dialCancel()
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", dialError{cause: errors.Wrap(err, "DialContext error")}
|
||||||
|
}
|
||||||
|
edgeConn := tls.Client(plaintextEdgeConn, config.TlsConfig)
|
||||||
|
edgeConn.SetDeadline(time.Now().Add(dialTimeout))
|
||||||
|
err = edgeConn.Handshake()
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", dialError{cause: errors.Wrap(err, "Handshake with edge error")}
|
||||||
|
}
|
||||||
|
// clear the deadline on the conn; h2mux has its own timeouts
|
||||||
|
edgeConn.SetDeadline(time.Time{})
|
||||||
|
// Establish a muxed connection with the edge
|
||||||
|
// Client mux handshake with agent server
|
||||||
|
h.muxer, err = h2mux.Handshake(edgeConn, edgeConn, h2mux.MuxerConfig{
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
Handler: h,
|
||||||
|
IsClient: true,
|
||||||
|
HeartbeatInterval: config.HeartbeatInterval,
|
||||||
|
MaxHeartbeats: config.MaxHeartbeats,
|
||||||
|
Logger: config.ProtocolLogger.WithFields(log.Fields{}),
|
||||||
|
CompressionQuality: h2mux.CompressionSetting(config.CompressionQuality),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return h, "", errors.New("TLS handshake error")
|
||||||
|
}
|
||||||
|
return h, edgeConn.LocalAddr().String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TunnelHandler) AppendTagHeaders(r *http.Request) {
|
||||||
|
for _, tag := range h.tags {
|
||||||
|
r.Header.Add(TagHeaderNamePrefix+tag.Name, tag.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TunnelHandler) ServeStream(stream *h2mux.MuxedStream) error {
|
||||||
|
h.metrics.incrementRequests(h.connectionID)
|
||||||
|
req, err := http.NewRequest("GET", h.originUrl, h2mux.MuxedStreamReader{MuxedStream: stream})
|
||||||
|
if err != nil {
|
||||||
|
h.logger.WithError(err).Panic("Unexpected error from http.NewRequest")
|
||||||
|
}
|
||||||
|
err = H2RequestHeadersToH1Request(stream.Headers, req)
|
||||||
|
if err != nil {
|
||||||
|
h.logger.WithError(err).Error("invalid request received")
|
||||||
|
}
|
||||||
|
h.AppendTagHeaders(req)
|
||||||
|
cfRay := FindCfRayHeader(req)
|
||||||
|
lbProbe := isLBProbeRequest(req)
|
||||||
|
h.logRequest(req, cfRay, lbProbe)
|
||||||
|
if websocket.IsWebSocketUpgrade(req) {
|
||||||
|
conn, response, err := websocket.ClientConnect(req, h.tlsConfig)
|
||||||
|
if err != nil {
|
||||||
|
h.logError(stream, err)
|
||||||
|
} else {
|
||||||
|
stream.WriteHeaders(H1ResponseToH2Response(response))
|
||||||
|
defer conn.Close()
|
||||||
|
// Copy to/from stream to the undelying connection. Use the underlying
|
||||||
|
// connection because cloudflared doesn't operate on the message themselves
|
||||||
|
websocket.Stream(conn.UnderlyingConn(), stream)
|
||||||
|
h.metrics.incrementResponses(h.connectionID, "200")
|
||||||
|
h.logResponse(response, cfRay, lbProbe)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Support for WSGI Servers by switching transfer encoding from chunked to gzip/deflate
|
||||||
|
if h.noChunkedEncoding {
|
||||||
|
req.TransferEncoding = []string{"gzip", "deflate"}
|
||||||
|
cLength, err := strconv.Atoi(req.Header.Get("Content-Length"))
|
||||||
|
if err == nil {
|
||||||
|
req.ContentLength = int64(cLength)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := h.httpClient.RoundTrip(req)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
h.logError(stream, err)
|
||||||
|
} else {
|
||||||
|
defer response.Body.Close()
|
||||||
|
stream.WriteHeaders(H1ResponseToH2Response(response))
|
||||||
|
if h.isEventStream(response) {
|
||||||
|
h.writeEventStream(stream, response.Body)
|
||||||
|
} else {
|
||||||
|
// Use CopyBuffer, because Copy only allocates a 32KiB buffer, and cross-stream
|
||||||
|
// compression generates dictionary on first write
|
||||||
|
io.CopyBuffer(stream, response.Body, make([]byte, 512*1024))
|
||||||
|
}
|
||||||
|
|
||||||
|
h.metrics.incrementResponses(h.connectionID, "200")
|
||||||
|
h.logResponse(response, cfRay, lbProbe)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.metrics.decrementConcurrentRequests(h.connectionID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TunnelHandler) writeEventStream(stream *h2mux.MuxedStream, responseBody io.ReadCloser) {
|
||||||
|
reader := bufio.NewReader(responseBody)
|
||||||
|
for {
|
||||||
|
line, err := reader.ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
stream.Write(line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TunnelHandler) isEventStream(response *http.Response) bool {
|
||||||
|
if response.Header.Get("content-type") == "text/event-stream" {
|
||||||
|
h.logger.Debug("Detected Server-Side Events from Origin")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TunnelHandler) logError(stream *h2mux.MuxedStream, err error) {
|
||||||
|
h.logger.WithError(err).Error("HTTP request error")
|
||||||
|
stream.WriteHeaders([]h2mux.Header{{Name: ":status", Value: "502"}})
|
||||||
|
stream.Write([]byte("502 Bad Gateway"))
|
||||||
|
h.metrics.incrementResponses(h.connectionID, "502")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TunnelHandler) logRequest(req *http.Request, cfRay string, lbProbe bool) {
|
||||||
|
if cfRay != "" {
|
||||||
|
h.logger.WithField("CF-RAY", cfRay).Debugf("%s %s %s", req.Method, req.URL, req.Proto)
|
||||||
|
} else if lbProbe {
|
||||||
|
h.logger.Debugf("Load Balancer health check %s %s %s", req.Method, req.URL, req.Proto)
|
||||||
|
} else {
|
||||||
|
h.logger.Warnf("All requests should have a CF-RAY header. Please open a support ticket with Cloudflare. %s %s %s ", req.Method, req.URL, req.Proto)
|
||||||
|
}
|
||||||
|
h.logger.Debugf("Request Headers %+v", req.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TunnelHandler) logResponse(r *http.Response, cfRay string, lbProbe bool) {
|
||||||
|
if cfRay != "" {
|
||||||
|
h.logger.WithField("CF-RAY", cfRay).Debugf("%s", r.Status)
|
||||||
|
} else if lbProbe {
|
||||||
|
h.logger.Debugf("Response to Load Balancer health check %s", r.Status)
|
||||||
|
} else {
|
||||||
|
h.logger.Infof("%s", r.Status)
|
||||||
|
}
|
||||||
|
h.logger.Debugf("Response Headers %+v", r.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TunnelHandler) UpdateMetrics(connectionID string) {
|
||||||
|
h.metrics.updateMuxerMetrics(connectionID, h.muxer.Metrics())
|
||||||
|
}
|
||||||
|
|
||||||
|
func uint8ToString(input uint8) string {
|
||||||
|
return strconv.FormatUint(uint64(input), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isLBProbeRequest(req *http.Request) bool {
|
||||||
|
return strings.HasPrefix(req.UserAgent(), lbProbeUserAgentPrefix)
|
||||||
|
}
|
|
@ -0,0 +1,62 @@
|
||||||
|
package tlsconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
tunnellog "github.com/cloudflare/cloudflared/log"
|
||||||
|
"github.com/getsentry/raven-go"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CertReloader can load and reload a TLS certificate from a particular filepath.
|
||||||
|
// Hooks into tls.Config's GetCertificate to allow a TLS server to update its certificate without restarting.
|
||||||
|
type CertReloader struct {
|
||||||
|
sync.Mutex
|
||||||
|
certificate *tls.Certificate
|
||||||
|
certPath string
|
||||||
|
keyPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCertReloader makes a CertReloader, memorizing the filepaths in the context/flags.
|
||||||
|
func NewCertReloader(c *cli.Context, f CLIFlags) (*CertReloader, error) {
|
||||||
|
if !c.IsSet(f.Cert) {
|
||||||
|
return nil, errors.New("CertReloader: cert not provided")
|
||||||
|
}
|
||||||
|
if !c.IsSet(f.Key) {
|
||||||
|
return nil, errors.New("CertReloader: key not provided")
|
||||||
|
}
|
||||||
|
cr := new(CertReloader)
|
||||||
|
cr.certPath = c.String(f.Cert)
|
||||||
|
cr.keyPath = c.String(f.Key)
|
||||||
|
cr.LoadCert()
|
||||||
|
return cr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cert returns the TLS certificate most recently read by the CertReloader.
|
||||||
|
func (cr *CertReloader) Cert(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
|
cr.Lock()
|
||||||
|
defer cr.Unlock()
|
||||||
|
return cr.certificate, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadCert loads a TLS certificate from the CertReloader's specified filepath.
|
||||||
|
// Call this after writing a new certificate to the disk (e.g. after renewing a certificate)
|
||||||
|
func (cr *CertReloader) LoadCert() {
|
||||||
|
cr.Lock()
|
||||||
|
defer cr.Unlock()
|
||||||
|
|
||||||
|
log.SetFormatter(&tunnellog.JSONFormatter{})
|
||||||
|
log.Info("Reloading certificate")
|
||||||
|
cert, err := tls.LoadX509KeyPair(cr.certPath, cr.keyPath)
|
||||||
|
|
||||||
|
// Keep the old certificate if there's a problem reading the new one.
|
||||||
|
if err != nil {
|
||||||
|
raven.CaptureError(fmt.Errorf("Error parsing X509 key pair: %v", err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cr.certificate = &cert
|
||||||
|
}
|
|
@ -0,0 +1,95 @@
|
||||||
|
package tlsconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: remove the Origin CA root certs when migrated to Authenticated Origin Pull certs
|
||||||
|
var cloudflareRootCA = []byte(`
|
||||||
|
Issuer: C=US, ST=California, L=San Francisco, O=CloudFlare, Inc., OU=CloudFlare Origin SSL ECC Certificate Authority
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIICiDCCAi6gAwIBAgIUXZP3MWb8MKwBE1Qbawsp1sfA/Y4wCgYIKoZIzj0EAwIw
|
||||||
|
gY8xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1T
|
||||||
|
YW4gRnJhbmNpc2NvMRkwFwYDVQQKExBDbG91ZEZsYXJlLCBJbmMuMTgwNgYDVQQL
|
||||||
|
Ey9DbG91ZEZsYXJlIE9yaWdpbiBTU0wgRUNDIENlcnRpZmljYXRlIEF1dGhvcml0
|
||||||
|
eTAeFw0xNjAyMjIxODI0MDBaFw0yMTAyMjIwMDI0MDBaMIGPMQswCQYDVQQGEwJV
|
||||||
|
UzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEZ
|
||||||
|
MBcGA1UEChMQQ2xvdWRGbGFyZSwgSW5jLjE4MDYGA1UECxMvQ2xvdWRGbGFyZSBP
|
||||||
|
cmlnaW4gU1NMIEVDQyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwWTATBgcqhkjOPQIB
|
||||||
|
BggqhkjOPQMBBwNCAASR+sGALuaGshnUbcxKry+0LEXZ4NY6JUAtSeA6g87K3jaA
|
||||||
|
xpIg9G50PokpfWkhbarLfpcZu0UAoYy2su0EhN7wo2YwZDAOBgNVHQ8BAf8EBAMC
|
||||||
|
AQYwEgYDVR0TAQH/BAgwBgEB/wIBAjAdBgNVHQ4EFgQUhTBdOypw1O3VkmcH/es5
|
||||||
|
tBoOOKcwHwYDVR0jBBgwFoAUhTBdOypw1O3VkmcH/es5tBoOOKcwCgYIKoZIzj0E
|
||||||
|
AwIDSAAwRQIgEiIEHQr5UKma50D1WRMJBUSgjg24U8n8E2mfw/8UPz0CIQCr5V/e
|
||||||
|
mcifak4CQsr+DH4pn5SJD7JxtCG3YGswW8QZsw==
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
Issuer: C=US, O=CloudFlare, Inc., OU=CloudFlare Origin SSL Certificate Authority, L=San Francisco, ST=California
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIID/DCCAuagAwIBAgIID+rOSdTGfGcwCwYJKoZIhvcNAQELMIGLMQswCQYDVQQG
|
||||||
|
EwJVUzEZMBcGA1UEChMQQ2xvdWRGbGFyZSwgSW5jLjE0MDIGA1UECxMrQ2xvdWRG
|
||||||
|
bGFyZSBPcmlnaW4gU1NMIENlcnRpZmljYXRlIEF1dGhvcml0eTEWMBQGA1UEBxMN
|
||||||
|
U2FuIEZyYW5jaXNjbzETMBEGA1UECBMKQ2FsaWZvcm5pYTAeFw0xNDExMTMyMDM4
|
||||||
|
NTBaFw0xOTExMTQwMTQzNTBaMIGLMQswCQYDVQQGEwJVUzEZMBcGA1UEChMQQ2xv
|
||||||
|
dWRGbGFyZSwgSW5jLjE0MDIGA1UECxMrQ2xvdWRGbGFyZSBPcmlnaW4gU1NMIENl
|
||||||
|
cnRpZmljYXRlIEF1dGhvcml0eTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzETMBEG
|
||||||
|
A1UECBMKQ2FsaWZvcm5pYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
|
||||||
|
AMBIlWf1KEKR5hbB75OYrAcUXobpD/AxvSYRXr91mbRu+lqE7YbyyRUShQh15lem
|
||||||
|
ef+umeEtPZoLFLhcLyczJxOhI+siLGDQm/a/UDkWvAXYa5DZ+pHU5ct5nZ8pGzqJ
|
||||||
|
p8G1Hy5RMVYDXZT9F6EaHjMG0OOffH6Ih25TtgfyyrjXycwDH0u6GXt+G/rywcqz
|
||||||
|
/9W4Aki3XNQMUHNQAtBLEEIYHMkyTYJxuL2tXO6ID5cCsoWw8meHufTeZW2DyUpl
|
||||||
|
yP3AHt4149RQSyWZMJ6AyntL9d8Xhfpxd9rJkh9Kge2iV9rQTFuE1rRT5s7OSJcK
|
||||||
|
xUsklgHcGHYMcNfNMilNHb8CAwEAAaNmMGQwDgYDVR0PAQH/BAQDAgAGMBIGA1Ud
|
||||||
|
EwEB/wQIMAYBAf8CAQIwHQYDVR0OBBYEFCToU1ddfDRAh6nrlNu64RZ4/CmkMB8G
|
||||||
|
A1UdIwQYMBaAFCToU1ddfDRAh6nrlNu64RZ4/CmkMAsGCSqGSIb3DQEBCwOCAQEA
|
||||||
|
cQDBVAoRrhhsGegsSFsv1w8v27zzHKaJNv6ffLGIRvXK8VKKK0gKXh2zQtN9SnaD
|
||||||
|
gYNe7Pr4C3I8ooYKRJJWLsmEHdGdnYYmj0OJfGrfQf6MLIc/11bQhLepZTxdhFYh
|
||||||
|
QGgDl6gRmb8aDwk7Q92BPvek5nMzaWlP82ixavvYI+okoSY8pwdcVKobx6rWzMWz
|
||||||
|
ZEC9M6H3F0dDYE23XcCFIdgNSAmmGyXPBstOe0aAJXwJTxOEPn36VWr0PKIQJy5Y
|
||||||
|
4o1wpMpqCOIwWc8J9REV/REzN6Z1LXImdUgXIXOwrz56gKUJzPejtBQyIGj0mveX
|
||||||
|
Fu6q54beR89jDc+oABmOgg==
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
Issuer: C=US, O=CloudFlare, Inc., OU=Origin Pull, L=San Francisco, ST=California, CN=origin-pull.cloudflare.net
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIGBjCCA/CgAwIBAgIIV5G6lVbCLmEwCwYJKoZIhvcNAQENMIGQMQswCQYDVQQG
|
||||||
|
EwJVUzEZMBcGA1UEChMQQ2xvdWRGbGFyZSwgSW5jLjEUMBIGA1UECxMLT3JpZ2lu
|
||||||
|
IFB1bGwxFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xEzARBgNVBAgTCkNhbGlmb3Ju
|
||||||
|
aWExIzAhBgNVBAMTGm9yaWdpbi1wdWxsLmNsb3VkZmxhcmUubmV0MB4XDTE1MDEx
|
||||||
|
MzAyNDc1M1oXDTIwMDExMjAyNTI1M1owgZAxCzAJBgNVBAYTAlVTMRkwFwYDVQQK
|
||||||
|
ExBDbG91ZEZsYXJlLCBJbmMuMRQwEgYDVQQLEwtPcmlnaW4gUHVsbDEWMBQGA1UE
|
||||||
|
BxMNU2FuIEZyYW5jaXNjbzETMBEGA1UECBMKQ2FsaWZvcm5pYTEjMCEGA1UEAxMa
|
||||||
|
b3JpZ2luLXB1bGwuY2xvdWRmbGFyZS5uZXQwggIiMA0GCSqGSIb3DQEBAQUAA4IC
|
||||||
|
DwAwggIKAoICAQDdsts6I2H5dGyn4adACQRXlfo0KmwsN7B5rxD8C5qgy6spyONr
|
||||||
|
WV0ecvdeGQfWa8Gy/yuTuOnsXfy7oyZ1dm93c3Mea7YkM7KNMc5Y6m520E9tHooc
|
||||||
|
f1qxeDpGSsnWc7HWibFgD7qZQx+T+yfNqt63vPI0HYBOYao6hWd3JQhu5caAcIS2
|
||||||
|
ms5tzSSZVH83ZPe6Lkb5xRgLl3eXEFcfI2DjnlOtLFqpjHuEB3Tr6agfdWyaGEEi
|
||||||
|
lRY1IB3k6TfLTaSiX2/SyJ96bp92wvTSjR7USjDV9ypf7AD6u6vwJZ3bwNisNw5L
|
||||||
|
ptph0FBnc1R6nDoHmvQRoyytoe0rl/d801i9Nru/fXa+l5K2nf1koR3IX440Z2i9
|
||||||
|
+Z4iVA69NmCbT4MVjm7K3zlOtwfI7i1KYVv+ATo4ycgBuZfY9f/2lBhIv7BHuZal
|
||||||
|
b9D+/EK8aMUfjDF4icEGm+RQfExv2nOpkR4BfQppF/dLmkYfjgtO1403X0ihkT6T
|
||||||
|
PYQdmYS6Jf53/KpqC3aA+R7zg2birtvprinlR14MNvwOsDOzsK4p8WYsgZOR4Qr2
|
||||||
|
gAx+z2aVOs/87+TVOR0r14irQsxbg7uP2X4t+EXx13glHxwG+CnzUVycDLMVGvuG
|
||||||
|
aUgF9hukZxlOZnrl6VOf1fg0Caf3uvV8smOkVw6DMsGhBZSJVwao0UQNqQIDAQAB
|
||||||
|
o2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/BAgwBgEB/wIBAjAdBgNVHQ4E
|
||||||
|
FgQUQ1lLK2mLgOERM2pXzVc42p59xeswHwYDVR0jBBgwFoAUQ1lLK2mLgOERM2pX
|
||||||
|
zVc42p59xeswCwYJKoZIhvcNAQENA4ICAQDKDQM1qPRVP/4Gltz0D6OU6xezFBKr
|
||||||
|
LWtDoA1qW2F7pkiYawCP9MrDPDJsHy7dx+xw3bBZxOsK5PA/T7p1dqpEl6i8F692
|
||||||
|
g//EuYOifLYw3ySPe3LRNhvPl/1f6Sn862VhPvLa8aQAAwR9e/CZvlY3fj+6G5ik
|
||||||
|
3it7fikmKUsVnugNOkjmwI3hZqXfJNc7AtHDFw0mEOV0dSeAPTo95N9cxBbm9PKv
|
||||||
|
qAEmTEXp2trQ/RjJ/AomJyfA1BQjsD0j++DI3a9/BbDwWmr1lJciKxiNKaa0BRLB
|
||||||
|
dKMrYQD+PkPNCgEuojT+paLKRrMyFUzHSG1doYm46NE9/WARTh3sFUp1B7HZSBqA
|
||||||
|
kHleoB/vQ/mDuW9C3/8Jk2uRUdZxR+LoNZItuOjU8oTy6zpN1+GgSj7bHjiy9rfA
|
||||||
|
F+ehdrz+IOh80WIiqs763PGoaYUyzxLvVowLWNoxVVoc9G+PqFKqD988XlipHVB6
|
||||||
|
Bz+1CD4D/bWrs3cC9+kk/jFmrrAymZlkFX8tDb5aXASSLJjUjcptci9SKqtI2h0J
|
||||||
|
wUGkD7+bQAr+7vr8/R+CBmNMe7csE8NeEX6lVMF7Dh0a1YKQa6hUN18bBuYgTMuT
|
||||||
|
QzMmZpRpIBB321ZBlcnlxiTJvWxvbCPHKHj20VwwAz7LONF59s84ZsOqfoBv8gKM
|
||||||
|
s0s5dsq5zpLeaw==
|
||||||
|
-----END CERTIFICATE-----`)
|
||||||
|
|
||||||
|
func GetCloudflareRootCA() *x509.CertPool {
|
||||||
|
ca := x509.NewCertPool()
|
||||||
|
if !ca.AppendCertsFromPEM([]byte(cloudflareRootCA)) {
|
||||||
|
// should never happen
|
||||||
|
panic("failure loading Cloudflare origin CA pem")
|
||||||
|
}
|
||||||
|
return ca
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
package tlsconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
helloKey = `
|
||||||
|
-----BEGIN EC PARAMETERS-----
|
||||||
|
BgUrgQQAIg==
|
||||||
|
-----END EC PARAMETERS-----
|
||||||
|
-----BEGIN EC PRIVATE KEY-----
|
||||||
|
MIGkAgEBBDBGGfwhIJdiUiJUVIItqJjEIMmlXxsMa8TQeer47+g+cIZ466rgg8EK
|
||||||
|
+Mdn6BY48GCgBwYFK4EEACKhZANiAASW//A9iDbPKg3OLkn7yJqLer32g9I5lBKR
|
||||||
|
tPc/zBubQLLz9lAaYI6AOQiJXhGr5JkKmQfi1sYHK5rJITPFy4W8Et4hHLdazDZH
|
||||||
|
WnEd+TStQABFUjrhtqXPWmGKcly0pOE=
|
||||||
|
-----END EC PRIVATE KEY-----`
|
||||||
|
|
||||||
|
helloCRT = `
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIICiDCCAg6gAwIBAgIJAJ/FfkBTtbuIMAkGByqGSM49BAEwfzELMAkGA1UEBhMC
|
||||||
|
VVMxDjAMBgNVBAgMBVRleGFzMQ8wDQYDVQQHDAZBdXN0aW4xGTAXBgNVBAoMEENs
|
||||||
|
b3VkZmxhcmUsIEluYy4xNDAyBgNVBAMMK0FyZ28gVHVubmVsIFNhbXBsZSBIZWxs
|
||||||
|
byBTZXJ2ZXIgQ2VydGlmaWNhdGUwHhcNMTgwMzE5MjMwNTMyWhcNMjgwMzE2MjMw
|
||||||
|
NTMyWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxDzANBgNVBAcMBkF1
|
||||||
|
c3RpbjEZMBcGA1UECgwQQ2xvdWRmbGFyZSwgSW5jLjE0MDIGA1UEAwwrQXJnbyBU
|
||||||
|
dW5uZWwgU2FtcGxlIEhlbGxvIFNlcnZlciBDZXJ0aWZpY2F0ZTB2MBAGByqGSM49
|
||||||
|
AgEGBSuBBAAiA2IABJb/8D2INs8qDc4uSfvImot6vfaD0jmUEpG09z/MG5tAsvP2
|
||||||
|
UBpgjoA5CIleEavkmQqZB+LWxgcrmskhM8XLhbwS3iEct1rMNkdacR35NK1AAEVS
|
||||||
|
OuG2pc9aYYpyXLSk4aNXMFUwUwYDVR0RBEwwSoIJbG9jYWxob3N0ghFjbG91ZGZs
|
||||||
|
YXJlZC1oZWxsb4ISY2xvdWRmbGFyZWQyLWhlbGxvhwR/AAABhxAAAAAAAAAAAAAA
|
||||||
|
AAAAAAABMAkGByqGSM49BAEDaQAwZgIxAPxkdghH6y8xLMnY9Bom3Llf4NYM6yB9
|
||||||
|
PD1YsaNUJTsxjTk3YY1Jsp+yzK0yUKtTZwIxAPcdvqCF2/iR9H288pCT1TgtO0a9
|
||||||
|
cJL9RY1lq7DIGN37v1ZXReWaD+3hNokY8NriVg==
|
||||||
|
-----END CERTIFICATE-----`
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetHelloCertificate() (tls.Certificate, error) {
|
||||||
|
return tls.X509KeyPair([]byte(helloCRT), []byte(helloKey))
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetHelloCertificateX509() (*x509.Certificate, error) {
|
||||||
|
helloCertificate, err := GetHelloCertificate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return x509.ParseCertificate(helloCertificate.Certificate[0])
|
||||||
|
}
|
|
@ -0,0 +1,151 @@
|
||||||
|
// Package tlsconfig provides convenience functions for configuring TLS connections from the
|
||||||
|
// command line.
|
||||||
|
package tlsconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/log"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var logger = log.CreateLogger()
|
||||||
|
|
||||||
|
// CLIFlags names the flags used to configure TLS for a command or subsystem.
|
||||||
|
// The nil value for a field means the flag is ignored.
|
||||||
|
type CLIFlags struct {
|
||||||
|
Cert string
|
||||||
|
Key string
|
||||||
|
ClientCert string
|
||||||
|
RootCA string
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConfig returns a TLS configuration according to the flags defined in f and
|
||||||
|
// set by the user.
|
||||||
|
func (f CLIFlags) GetConfig(c *cli.Context) *tls.Config {
|
||||||
|
config := &tls.Config{}
|
||||||
|
|
||||||
|
if c.IsSet(f.Cert) && c.IsSet(f.Key) {
|
||||||
|
cert, err := tls.LoadX509KeyPair(c.String(f.Cert), c.String(f.Key))
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Fatal("Error parsing X509 key pair")
|
||||||
|
}
|
||||||
|
config.Certificates = []tls.Certificate{cert}
|
||||||
|
config.BuildNameToCertificate()
|
||||||
|
}
|
||||||
|
return f.finishGettingConfig(c, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f CLIFlags) GetConfigReloadableCert(c *cli.Context, cr *CertReloader) *tls.Config {
|
||||||
|
config := &tls.Config{
|
||||||
|
GetCertificate: cr.Cert,
|
||||||
|
}
|
||||||
|
config.BuildNameToCertificate()
|
||||||
|
return f.finishGettingConfig(c, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f CLIFlags) finishGettingConfig(c *cli.Context, config *tls.Config) *tls.Config {
|
||||||
|
if c.IsSet(f.ClientCert) {
|
||||||
|
// set of root certificate authorities that servers use if required to verify a client certificate
|
||||||
|
// by the policy in ClientAuth
|
||||||
|
config.ClientCAs = LoadCert(c.String(f.ClientCert))
|
||||||
|
// server's policy for TLS Client Authentication. Default is no client cert
|
||||||
|
config.ClientAuth = tls.RequireAndVerifyClientCert
|
||||||
|
}
|
||||||
|
// set of root certificate authorities that clients use when verifying server certificates
|
||||||
|
if c.IsSet(f.RootCA) {
|
||||||
|
config.RootCAs = LoadCert(c.String(f.RootCA))
|
||||||
|
}
|
||||||
|
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadCert creates a CertPool containing all certificates in a PEM-format file.
|
||||||
|
func LoadCert(certPath string) *x509.CertPool {
|
||||||
|
caCert, err := ioutil.ReadFile(certPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Fatalf("Error reading certificate %s", certPath)
|
||||||
|
}
|
||||||
|
ca := x509.NewCertPool()
|
||||||
|
if !ca.AppendCertsFromPEM(caCert) {
|
||||||
|
logger.WithError(err).Fatalf("Error parsing certificate %s", certPath)
|
||||||
|
}
|
||||||
|
return ca
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadGlobalCertPool() (*x509.CertPool, error) {
|
||||||
|
success := false
|
||||||
|
|
||||||
|
// First, obtain the system certificate pool
|
||||||
|
certPool, systemCertPoolErr := x509.SystemCertPool()
|
||||||
|
if systemCertPoolErr != nil {
|
||||||
|
logger.Warnf("error obtaining the system certificates: %s", systemCertPoolErr)
|
||||||
|
certPool = x509.NewCertPool()
|
||||||
|
} else {
|
||||||
|
success = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next, append the Cloudflare CA pool into the system pool
|
||||||
|
if !certPool.AppendCertsFromPEM(cloudflareRootCA) {
|
||||||
|
logger.Warn("could not append the CF certificate to the cloudflared certificate pool")
|
||||||
|
} else {
|
||||||
|
success = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if success != true { // Obtaining any of the CAs has failed; this is a fatal error
|
||||||
|
return nil, errors.New("error loading any of the CAs into the global certificate pool")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, add the Hello certificate into the pool (since it's self-signed)
|
||||||
|
helloCertificate, err := GetHelloCertificateX509()
|
||||||
|
if err != nil {
|
||||||
|
logger.Warn("error obtaining the Hello server certificate")
|
||||||
|
}
|
||||||
|
|
||||||
|
certPool.AddCert(helloCertificate)
|
||||||
|
|
||||||
|
return certPool, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadOriginCertPool(originCAPoolPEM []byte) (*x509.CertPool, error) {
|
||||||
|
success := false
|
||||||
|
|
||||||
|
// Get the global pool
|
||||||
|
certPool, globalPoolErr := LoadGlobalCertPool()
|
||||||
|
if globalPoolErr != nil {
|
||||||
|
certPool = x509.NewCertPool()
|
||||||
|
} else {
|
||||||
|
success = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then, add any custom origin CA pool the user may have passed
|
||||||
|
if originCAPoolPEM != nil {
|
||||||
|
if !certPool.AppendCertsFromPEM(originCAPoolPEM) {
|
||||||
|
logger.Warn("could not append the provided origin CA to the cloudflared certificate pool")
|
||||||
|
} else {
|
||||||
|
success = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if success != true {
|
||||||
|
return nil, errors.New("error loading any of the CAs into the origin certificate pool")
|
||||||
|
}
|
||||||
|
|
||||||
|
return certPool, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateTunnelConfig(c *cli.Context, addrs []string) *tls.Config {
|
||||||
|
tlsConfig := CLIFlags{RootCA: "cacert"}.GetConfig(c)
|
||||||
|
if tlsConfig.RootCAs == nil {
|
||||||
|
tlsConfig.RootCAs = GetCloudflareRootCA()
|
||||||
|
tlsConfig.ServerName = "cftunnel.com"
|
||||||
|
} else if len(addrs) > 0 {
|
||||||
|
// Set for development environments and for testing specific origintunneld instances
|
||||||
|
tlsConfig.ServerName, _, _ = net.SplitHostPort(addrs[0])
|
||||||
|
}
|
||||||
|
return tlsConfig
|
||||||
|
}
|
|
@ -0,0 +1,214 @@
|
||||||
|
// +build ignore
|
||||||
|
// TODO: Remove the above build tag and include this test when we start compiling with Golang 1.10.0+
|
||||||
|
|
||||||
|
package tlsconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix"
|
||||||
|
"encoding/asn1"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generated using `openssl req -newkey rsa:512 -nodes -x509 -days 3650`
|
||||||
|
var samplePEM = []byte(`
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIB4DCCAYoCCQCb/H0EUrdXEjANBgkqhkiG9w0BAQsFADB3MQswCQYDVQQGEwJV
|
||||||
|
UzEOMAwGA1UECAwFVGV4YXMxDzANBgNVBAcMBkF1c3RpbjEZMBcGA1UECgwQQ2xv
|
||||||
|
dWRmbGFyZSwgSW5jLjEZMBcGA1UECwwQUHJvZHVjdCBTdHJhdGVneTERMA8GA1UE
|
||||||
|
AwwIVGVzdCBPbmUwHhcNMTgwNDI2MTYxMDUxWhcNMjgwNDIzMTYxMDUxWjB3MQsw
|
||||||
|
CQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxDzANBgNVBAcMBkF1c3RpbjEZMBcG
|
||||||
|
A1UECgwQQ2xvdWRmbGFyZSwgSW5jLjEZMBcGA1UECwwQUHJvZHVjdCBTdHJhdGVn
|
||||||
|
eTERMA8GA1UEAwwIVGVzdCBPbmUwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAwVQD
|
||||||
|
K0SJ25UFLznm2pU3zhzMEvpDEofHVNnCjk4mlDrtVop7PkKZ8pDEmuQANltUrxC8
|
||||||
|
yHBE2wXMv+GlH+bDtwIDAQABMA0GCSqGSIb3DQEBCwUAA0EAjVYQzozIFPkt/HRY
|
||||||
|
uUoZ8zEHIDICb0syFf5VAjm9AgTwIPzUmD+c5vl6LWDnxq7L45nLCzhhQ6YmiwDz
|
||||||
|
X7Wcyg==
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIB4DCCAYoCCQDZfCdAJ+mwzDANBgkqhkiG9w0BAQsFADB3MQswCQYDVQQGEwJV
|
||||||
|
UzEOMAwGA1UECAwFVGV4YXMxDzANBgNVBAcMBkF1c3RpbjEZMBcGA1UECgwQQ2xv
|
||||||
|
dWRmbGFyZSwgSW5jLjEZMBcGA1UECwwQUHJvZHVjdCBTdHJhdGVneTERMA8GA1UE
|
||||||
|
AwwIVGVzdCBUd28wHhcNMTgwNDI2MTYxMTIwWhcNMjgwNDIzMTYxMTIwWjB3MQsw
|
||||||
|
CQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxDzANBgNVBAcMBkF1c3RpbjEZMBcG
|
||||||
|
A1UECgwQQ2xvdWRmbGFyZSwgSW5jLjEZMBcGA1UECwwQUHJvZHVjdCBTdHJhdGVn
|
||||||
|
eTERMA8GA1UEAwwIVGVzdCBUd28wXDANBgkqhkiG9w0BAQEFAANLADBIAkEAoHKp
|
||||||
|
ROVK3zCSsH7ocYeyRAML4V7SFAbZcb4WIwDnE08oMBVRkQVcW5tqEkvG3RiClfzV
|
||||||
|
wZIJ3CfqKIeSNSDU9wIDAQABMA0GCSqGSIb3DQEBCwUAA0EAJw2gUbnPiq4C2p5b
|
||||||
|
iWzlA9Q7aKo+VQ4H7IZS7tTccr59nVjvH/TG3eWujpnocr4TOqW9M3CK1DF9mUGP
|
||||||
|
3pQ3Jg==
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
`)
|
||||||
|
|
||||||
|
var systemCertPoolSubjects []*pkix.Name
|
||||||
|
|
||||||
|
type certificateFixture struct {
|
||||||
|
ou string
|
||||||
|
cn string
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
systemCertPool, err := x509.SystemCertPool()
|
||||||
|
if isUnrecoverableError(err) {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if systemCertPool == nil {
|
||||||
|
// On Windows, let's just assume the system cert pool was empty
|
||||||
|
systemCertPool = x509.NewCertPool()
|
||||||
|
}
|
||||||
|
|
||||||
|
systemCertPoolSubjects, err = getCertPoolSubjects(systemCertPool)
|
||||||
|
if err != nil {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadOriginCertPoolJustSystemPool(t *testing.T) {
|
||||||
|
certPoolSubjects := loadCertPoolSubjects(t, nil)
|
||||||
|
extraSubjects := subjectSubtract(systemCertPoolSubjects, certPoolSubjects)
|
||||||
|
|
||||||
|
// Remove extra subjects from the cert pool
|
||||||
|
var filteredSystemCertPoolSubjects []*pkix.Name
|
||||||
|
|
||||||
|
t.Log(extraSubjects)
|
||||||
|
|
||||||
|
OUTER:
|
||||||
|
for _, subject := range certPoolSubjects {
|
||||||
|
for _, extraSubject := range extraSubjects {
|
||||||
|
if subject == extraSubject {
|
||||||
|
t.Log(extraSubject)
|
||||||
|
continue OUTER
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
filteredSystemCertPoolSubjects = append(filteredSystemCertPoolSubjects, subject)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, len(filteredSystemCertPoolSubjects), len(systemCertPoolSubjects))
|
||||||
|
|
||||||
|
difference := subjectSubtract(systemCertPoolSubjects, filteredSystemCertPoolSubjects)
|
||||||
|
assert.Equal(t, 0, len(difference))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadOriginCertPoolCFCertificates(t *testing.T) {
|
||||||
|
certPoolSubjects := loadCertPoolSubjects(t, nil)
|
||||||
|
|
||||||
|
extraSubjects := subjectSubtract(systemCertPoolSubjects, certPoolSubjects)
|
||||||
|
|
||||||
|
expected := []*certificateFixture{
|
||||||
|
{ou: "CloudFlare Origin SSL ECC Certificate Authority"},
|
||||||
|
{ou: "CloudFlare Origin SSL Certificate Authority"},
|
||||||
|
{cn: "origin-pull.cloudflare.net"},
|
||||||
|
{cn: "Argo Tunnel Sample Hello Server Certificate"},
|
||||||
|
}
|
||||||
|
|
||||||
|
assertFixturesMatchSubjects(t, expected, extraSubjects)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadOriginCertPoolWithExtraPEMs(t *testing.T) {
|
||||||
|
certPoolWithoutPEMSubjects := loadCertPoolSubjects(t, nil)
|
||||||
|
certPoolWithPEMSubjects := loadCertPoolSubjects(t, samplePEM)
|
||||||
|
|
||||||
|
difference := subjectSubtract(certPoolWithoutPEMSubjects, certPoolWithPEMSubjects)
|
||||||
|
|
||||||
|
assert.Equal(t, 2, len(difference))
|
||||||
|
|
||||||
|
expected := []*certificateFixture{
|
||||||
|
{cn: "Test One"},
|
||||||
|
{cn: "Test Two"},
|
||||||
|
}
|
||||||
|
|
||||||
|
assertFixturesMatchSubjects(t, expected, difference)
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadCertPoolSubjects(t *testing.T, originCAPoolPEM []byte) []*pkix.Name {
|
||||||
|
certPool, err := LoadOriginCertPool(originCAPoolPEM)
|
||||||
|
if isUnrecoverableError(err) {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.NotEmpty(t, certPool.Subjects())
|
||||||
|
certPoolSubjects, err := getCertPoolSubjects(certPool)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return certPoolSubjects
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertFixturesMatchSubjects(t *testing.T, fixtures []*certificateFixture, subjects []*pkix.Name) {
|
||||||
|
assert.Equal(t, len(fixtures), len(subjects))
|
||||||
|
|
||||||
|
for _, fixture := range fixtures {
|
||||||
|
found := false
|
||||||
|
for _, subject := range subjects {
|
||||||
|
found = found || fixtureMatchesSubjectPredicate(fixture, subject)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func fixtureMatchesSubjectPredicate(fixture *certificateFixture, subject *pkix.Name) bool {
|
||||||
|
cnMatch := true
|
||||||
|
if fixture.cn != "" {
|
||||||
|
cnMatch = fixture.cn == subject.CommonName
|
||||||
|
}
|
||||||
|
|
||||||
|
ouMatch := true
|
||||||
|
if fixture.ou != "" {
|
||||||
|
ouMatch = len(subject.OrganizationalUnit) > 0 && fixture.ou == subject.OrganizationalUnit[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
return cnMatch && ouMatch
|
||||||
|
}
|
||||||
|
|
||||||
|
func subjectSubtract(left []*pkix.Name, right []*pkix.Name) []*pkix.Name {
|
||||||
|
var difference []*pkix.Name
|
||||||
|
|
||||||
|
var found bool
|
||||||
|
for _, r := range right {
|
||||||
|
found = false
|
||||||
|
for _, l := range left {
|
||||||
|
if (*l).String() == (*r).String() {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
difference = append(difference, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return difference
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCertPoolSubjects(certPool *x509.CertPool) ([]*pkix.Name, error) {
|
||||||
|
var subjects []*pkix.Name
|
||||||
|
|
||||||
|
for _, subject := range certPool.Subjects() {
|
||||||
|
var sequence pkix.RDNSequence
|
||||||
|
_, err := asn1.Unmarshal(subject, &sequence)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
name := pkix.Name{}
|
||||||
|
name.FillFromRDNSequence(&sequence)
|
||||||
|
|
||||||
|
subjects = append(subjects, &name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return subjects, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isUnrecoverableError(err error) bool {
|
||||||
|
return err != nil && err.Error() != "crypto/x509: system root pool is not available on Windows"
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
package tunneldns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coredns/coredns/plugin"
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Upstream is a simplified interface for proxy destination
|
||||||
|
type Upstream interface {
|
||||||
|
Exchange(ctx context.Context, query *dns.Msg) (*dns.Msg, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyPlugin is a simplified DNS proxy using a generic upstream interface
|
||||||
|
type ProxyPlugin struct {
|
||||||
|
Upstreams []Upstream
|
||||||
|
Next plugin.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeDNS implements interface for CoreDNS plugin
|
||||||
|
func (p ProxyPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
|
||||||
|
var reply *dns.Msg
|
||||||
|
var backendErr error
|
||||||
|
|
||||||
|
for _, upstream := range p.Upstreams {
|
||||||
|
reply, backendErr = upstream.Exchange(ctx, r)
|
||||||
|
if backendErr == nil {
|
||||||
|
w.WriteMsg(reply)
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dns.RcodeServerFailure, errors.Wrap(backendErr, "failed to contact any of the upstreams")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name implements interface for CoreDNS plugin
|
||||||
|
func (p ProxyPlugin) Name() string { return "proxy" }
|
|
@ -0,0 +1,105 @@
|
||||||
|
package tunneldns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultTimeout = 5 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// UpstreamHTTPS is the upstream implementation for DNS over HTTPS service
|
||||||
|
type UpstreamHTTPS struct {
|
||||||
|
client *http.Client
|
||||||
|
endpoint *url.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUpstreamHTTPS creates a new DNS over HTTPS upstream from hostname
|
||||||
|
func NewUpstreamHTTPS(endpoint string) (Upstream, error) {
|
||||||
|
u, err := url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update TLS and HTTP client configuration
|
||||||
|
tls := &tls.Config{ServerName: u.Hostname()}
|
||||||
|
transport := &http.Transport{
|
||||||
|
TLSClientConfig: tls,
|
||||||
|
DisableCompression: true,
|
||||||
|
MaxIdleConns: 1,
|
||||||
|
}
|
||||||
|
http2.ConfigureTransport(transport)
|
||||||
|
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: defaultTimeout,
|
||||||
|
Transport: transport,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &UpstreamHTTPS{client: client, endpoint: u}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exchange provides an implementation for the Upstream interface
|
||||||
|
func (u *UpstreamHTTPS) Exchange(ctx context.Context, query *dns.Msg) (*dns.Msg, error) {
|
||||||
|
queryBuf, err := query.Pack()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to pack DNS query")
|
||||||
|
}
|
||||||
|
|
||||||
|
// No content negotiation for now, use DNS wire format
|
||||||
|
buf, backendErr := u.exchangeWireformat(queryBuf)
|
||||||
|
if backendErr == nil {
|
||||||
|
response := &dns.Msg{}
|
||||||
|
if err := response.Unpack(buf); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to unpack DNS response from body")
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Id = query.Id
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithError(backendErr).Errorf("failed to connect to an HTTPS backend %q", u.endpoint)
|
||||||
|
return nil, backendErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform message exchange with the default UDP wireformat defined in current draft
|
||||||
|
// https://datatracker.ietf.org/doc/draft-ietf-doh-dns-over-https
|
||||||
|
func (u *UpstreamHTTPS) exchangeWireformat(msg []byte) ([]byte, error) {
|
||||||
|
req, err := http.NewRequest("POST", u.endpoint.String(), bytes.NewBuffer(msg))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to create an HTTPS request")
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Add("Content-Type", "application/dns-udpwireformat")
|
||||||
|
req.Host = u.endpoint.Hostname()
|
||||||
|
|
||||||
|
resp, err := u.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to perform an HTTPS request")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check response status code
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("returned status code %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read wireformat response from the body
|
||||||
|
buf, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to read the response body")
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf, nil
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
package tunneldns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coredns/coredns/plugin"
|
||||||
|
"github.com/coredns/coredns/plugin/metrics/vars"
|
||||||
|
"github.com/coredns/coredns/plugin/pkg/dnstest"
|
||||||
|
"github.com/coredns/coredns/plugin/pkg/rcode"
|
||||||
|
"github.com/coredns/coredns/request"
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetricsPlugin is an adapter for CoreDNS and built-in metrics
|
||||||
|
type MetricsPlugin struct {
|
||||||
|
Next plugin.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricsPlugin creates a plugin with configured metrics
|
||||||
|
func NewMetricsPlugin(next plugin.Handler) *MetricsPlugin {
|
||||||
|
prometheus.MustRegister(vars.RequestCount)
|
||||||
|
prometheus.MustRegister(vars.RequestDuration)
|
||||||
|
prometheus.MustRegister(vars.RequestSize)
|
||||||
|
prometheus.MustRegister(vars.RequestDo)
|
||||||
|
prometheus.MustRegister(vars.RequestType)
|
||||||
|
prometheus.MustRegister(vars.ResponseSize)
|
||||||
|
prometheus.MustRegister(vars.ResponseRcode)
|
||||||
|
return &MetricsPlugin{Next: next}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeDNS implements the CoreDNS plugin interface
|
||||||
|
func (p MetricsPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
|
||||||
|
state := request.Request{W: w, Req: r}
|
||||||
|
|
||||||
|
rw := dnstest.NewRecorder(w)
|
||||||
|
status, err := plugin.NextOrFailure(p.Name(), p.Next, ctx, rw, r)
|
||||||
|
|
||||||
|
// Update built-in metrics
|
||||||
|
vars.Report(ctx, state, ".", rcode.ToString(rw.Rcode), rw.Len, rw.Start)
|
||||||
|
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name implements the CoreDNS plugin interface
|
||||||
|
func (p MetricsPlugin) Name() string { return "metrics" }
|
|
@ -0,0 +1,148 @@
|
||||||
|
package tunneldns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/log"
|
||||||
|
"github.com/cloudflare/cloudflared/metrics"
|
||||||
|
|
||||||
|
"github.com/coredns/coredns/core/dnsserver"
|
||||||
|
"github.com/coredns/coredns/plugin"
|
||||||
|
"github.com/coredns/coredns/plugin/cache"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"gopkg.in/urfave/cli.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var logger = log.CreateLogger()
|
||||||
|
|
||||||
|
// Listener is an adapter between CoreDNS server and Warp runnable
|
||||||
|
type Listener struct {
|
||||||
|
server *dnsserver.Server
|
||||||
|
wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run implements a foreground runner
|
||||||
|
func Run(c *cli.Context) error {
|
||||||
|
metricsListener, err := net.Listen("tcp", c.String("metrics"))
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Fatal("Failed to open the metrics listener")
|
||||||
|
}
|
||||||
|
|
||||||
|
go metrics.ServeMetrics(metricsListener, nil, logger)
|
||||||
|
|
||||||
|
listener, err := CreateListener(c.String("address"), uint16(c.Uint("port")), c.StringSlice("upstream"))
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("Failed to create the listeners")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to start the server
|
||||||
|
readySignal := make(chan struct{})
|
||||||
|
err = listener.Start(readySignal)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("Failed to start the listeners")
|
||||||
|
return listener.Stop()
|
||||||
|
}
|
||||||
|
<-readySignal
|
||||||
|
|
||||||
|
// Wait for signal
|
||||||
|
signals := make(chan os.Signal, 10)
|
||||||
|
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
|
||||||
|
defer signal.Stop(signals)
|
||||||
|
<-signals
|
||||||
|
|
||||||
|
// Shut down server
|
||||||
|
err = listener.Stop()
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).Errorf("failed to stop")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a CoreDNS server plugin from configuration
|
||||||
|
func createConfig(address string, port uint16, p plugin.Handler) *dnsserver.Config {
|
||||||
|
c := &dnsserver.Config{
|
||||||
|
Zone: ".",
|
||||||
|
Transport: "dns",
|
||||||
|
ListenHosts: []string{address},
|
||||||
|
Port: strconv.FormatUint(uint64(port), 10),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AddPlugin(func(next plugin.Handler) plugin.Handler { return p })
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start blocks for serving requests
|
||||||
|
func (l *Listener) Start(readySignal chan struct{}) error {
|
||||||
|
defer close(readySignal)
|
||||||
|
logger.WithField("addr", l.server.Address()).Infof("Starting DNS over HTTPS proxy server")
|
||||||
|
|
||||||
|
// Start UDP listener
|
||||||
|
if udp, err := l.server.ListenPacket(); err == nil {
|
||||||
|
l.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
l.server.ServePacket(udp)
|
||||||
|
l.wg.Done()
|
||||||
|
}()
|
||||||
|
} else {
|
||||||
|
return errors.Wrap(err, "failed to create a UDP listener")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start TCP listener
|
||||||
|
tcp, err := l.server.Listen()
|
||||||
|
if err == nil {
|
||||||
|
l.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
l.server.Serve(tcp)
|
||||||
|
l.wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.Wrap(err, "failed to create a TCP listener")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop signals server shutdown and blocks until completed
|
||||||
|
func (l *Listener) Stop() error {
|
||||||
|
if err := l.server.Stop(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
l.wg.Wait()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateListener configures the server and bound sockets
|
||||||
|
func CreateListener(address string, port uint16, upstreams []string) (*Listener, error) {
|
||||||
|
// Build the list of upstreams
|
||||||
|
upstreamList := make([]Upstream, 0)
|
||||||
|
for _, url := range upstreams {
|
||||||
|
logger.WithField("url", url).Infof("Adding DNS upstream")
|
||||||
|
upstream, err := NewUpstreamHTTPS(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to create HTTPS upstream")
|
||||||
|
}
|
||||||
|
upstreamList = append(upstreamList, upstream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a local cache with HTTPS proxy plugin
|
||||||
|
chain := cache.New()
|
||||||
|
chain.Next = ProxyPlugin{
|
||||||
|
Upstreams: upstreamList,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format an endpoint
|
||||||
|
endpoint := "dns://" + net.JoinHostPort(address, strconv.FormatUint(uint64(port), 10))
|
||||||
|
|
||||||
|
// Create the actual middleware server
|
||||||
|
server, err := dnsserver.NewServer(endpoint, []*dnsserver.Config{createConfig(address, port, NewMetricsPlugin(chain))})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Listener{server: server}, nil
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
# Generate go.capnp.out with:
|
||||||
|
# capnp compile -o- go.capnp > go.capnp.out
|
||||||
|
# Must run inside this directory to preserve paths.
|
||||||
|
|
||||||
|
@0xd12a1c51fedd6c88;
|
||||||
|
|
||||||
|
annotation package(file) :Text;
|
||||||
|
annotation import(file) :Text;
|
||||||
|
annotation doc(struct, field, enum) :Text;
|
||||||
|
annotation tag(enumerant) :Text;
|
||||||
|
annotation notag(enumerant) :Void;
|
||||||
|
annotation customtype(field) :Text;
|
||||||
|
annotation name(struct, field, union, enum, enumerant, interface, method, param, annotation, const, group) :Text;
|
||||||
|
|
||||||
|
$package("capnp");
|
|
@ -0,0 +1,42 @@
|
||||||
|
package tunnelrpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/trace"
|
||||||
|
"zombiezen.com/go/capnproto2/rpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConnLogger wraps a logrus *log.Entry for a connection.
|
||||||
|
type ConnLogger struct {
|
||||||
|
Entry *log.Entry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c ConnLogger) Infof(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
c.Entry.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c ConnLogger) Errorf(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
c.Entry.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConnLog(log *log.Entry) rpc.ConnOption {
|
||||||
|
return rpc.ConnLog(ConnLogger{log})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnTracer wraps a trace.EventLog for a connection.
|
||||||
|
type ConnTracer struct {
|
||||||
|
Events trace.EventLog
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c ConnTracer) Infof(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
c.Events.Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c ConnTracer) Errorf(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
c.Events.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConnTrace(events trace.EventLog) rpc.ConnOption {
|
||||||
|
return rpc.ConnLog(ConnTracer{events})
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
// Package logtransport provides a transport that logs all of its messages.
|
||||||
|
package tunnelrpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"zombiezen.com/go/capnproto2/encoding/text"
|
||||||
|
"zombiezen.com/go/capnproto2/rpc"
|
||||||
|
rpccapnp "zombiezen.com/go/capnproto2/std/capnp/rpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type transport struct {
|
||||||
|
rpc.Transport
|
||||||
|
l *log.Entry
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new logger that proxies messages to and from t and
|
||||||
|
// logs them to l. If l is nil, then the log package's default
|
||||||
|
// logger is used.
|
||||||
|
func NewTransportLogger(l *log.Entry, t rpc.Transport) rpc.Transport {
|
||||||
|
return &transport{Transport: t, l: l}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transport) SendMessage(ctx context.Context, msg rpccapnp.Message) error {
|
||||||
|
t.l.Debugf("tx %s", formatMsg(msg))
|
||||||
|
return t.Transport.SendMessage(ctx, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transport) RecvMessage(ctx context.Context) (rpccapnp.Message, error) {
|
||||||
|
msg, err := t.Transport.RecvMessage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.l.WithError(err).Debug("rx error")
|
||||||
|
return msg, err
|
||||||
|
}
|
||||||
|
t.l.Debugf("rx %s", formatMsg(msg))
|
||||||
|
return msg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatMsg(m rpccapnp.Message) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
text.NewEncoder(&buf).Encode(0x91b79f1f808db032, m.Struct)
|
||||||
|
return buf.String()
|
||||||
|
}
|
|
@ -0,0 +1,211 @@
|
||||||
|
package pogs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/cloudflare/cloudflared/tunnelrpc"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"zombiezen.com/go/capnproto2"
|
||||||
|
"zombiezen.com/go/capnproto2/pogs"
|
||||||
|
"zombiezen.com/go/capnproto2/rpc"
|
||||||
|
"zombiezen.com/go/capnproto2/server"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Authentication struct {
|
||||||
|
Key string
|
||||||
|
Email string
|
||||||
|
OriginCAKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalAuthentication(s tunnelrpc.Authentication, p *Authentication) error {
|
||||||
|
return pogs.Insert(tunnelrpc.Authentication_TypeID, s.Struct, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalAuthentication(s tunnelrpc.Authentication) (*Authentication, error) {
|
||||||
|
p := new(Authentication)
|
||||||
|
err := pogs.Extract(p, tunnelrpc.Authentication_TypeID, s.Struct)
|
||||||
|
return p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type TunnelRegistration struct {
|
||||||
|
Err string
|
||||||
|
Url string
|
||||||
|
LogLines []string
|
||||||
|
PermanentFailure bool
|
||||||
|
TunnelID string `capnp:"tunnelID"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalTunnelRegistration(s tunnelrpc.TunnelRegistration, p *TunnelRegistration) error {
|
||||||
|
return pogs.Insert(tunnelrpc.TunnelRegistration_TypeID, s.Struct, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalTunnelRegistration(s tunnelrpc.TunnelRegistration) (*TunnelRegistration, error) {
|
||||||
|
p := new(TunnelRegistration)
|
||||||
|
err := pogs.Extract(p, tunnelrpc.TunnelRegistration_TypeID, s.Struct)
|
||||||
|
return p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegistrationOptions struct {
|
||||||
|
ClientID string `capnp:"clientId"`
|
||||||
|
Version string
|
||||||
|
OS string `capnp:"os"`
|
||||||
|
ExistingTunnelPolicy tunnelrpc.ExistingTunnelPolicy
|
||||||
|
PoolName string `capnp:"poolName"`
|
||||||
|
Tags []Tag
|
||||||
|
ConnectionID uint8 `capnp:"connectionId"`
|
||||||
|
OriginLocalIP string `capnp:"originLocalIp"`
|
||||||
|
IsAutoupdated bool `capnp:"isAutoupdated"`
|
||||||
|
RunFromTerminal bool `capnp:"runFromTerminal"`
|
||||||
|
CompressionQuality uint64 `capnp:"compressionQuality"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalRegistrationOptions(s tunnelrpc.RegistrationOptions, p *RegistrationOptions) error {
|
||||||
|
return pogs.Insert(tunnelrpc.RegistrationOptions_TypeID, s.Struct, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalRegistrationOptions(s tunnelrpc.RegistrationOptions) (*RegistrationOptions, error) {
|
||||||
|
p := new(RegistrationOptions)
|
||||||
|
err := pogs.Extract(p, tunnelrpc.RegistrationOptions_TypeID, s.Struct)
|
||||||
|
return p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tag struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Value string `json:"value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServerInfo struct {
|
||||||
|
LocationName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func MarshalServerInfo(s tunnelrpc.ServerInfo, p *ServerInfo) error {
|
||||||
|
return pogs.Insert(tunnelrpc.ServerInfo_TypeID, s.Struct, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalServerInfo(s tunnelrpc.ServerInfo) (*ServerInfo, error) {
|
||||||
|
p := new(ServerInfo)
|
||||||
|
err := pogs.Extract(p, tunnelrpc.ServerInfo_TypeID, s.Struct)
|
||||||
|
return p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type TunnelServer interface {
|
||||||
|
RegisterTunnel(ctx context.Context, originCert []byte, hostname string, options *RegistrationOptions) (*TunnelRegistration, error)
|
||||||
|
GetServerInfo(ctx context.Context) (*ServerInfo, error)
|
||||||
|
UnregisterTunnel(ctx context.Context, gracePeriodNanoSec int64) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func TunnelServer_ServerToClient(s TunnelServer) tunnelrpc.TunnelServer {
|
||||||
|
return tunnelrpc.TunnelServer_ServerToClient(TunnelServer_PogsImpl{s})
|
||||||
|
}
|
||||||
|
|
||||||
|
type TunnelServer_PogsImpl struct {
|
||||||
|
impl TunnelServer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i TunnelServer_PogsImpl) RegisterTunnel(p tunnelrpc.TunnelServer_registerTunnel) error {
|
||||||
|
originCert, err := p.Params.OriginCert()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hostname, err := p.Params.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
options, err := p.Params.Options()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pogsOptions, err := UnmarshalRegistrationOptions(options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
server.Ack(p.Options)
|
||||||
|
registration, err := i.impl.RegisterTunnel(p.Ctx, originCert, hostname, pogsOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
result, err := p.Results.NewResult()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info(registration.TunnelID)
|
||||||
|
return MarshalTunnelRegistration(result, registration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i TunnelServer_PogsImpl) GetServerInfo(p tunnelrpc.TunnelServer_getServerInfo) error {
|
||||||
|
server.Ack(p.Options)
|
||||||
|
serverInfo, err := i.impl.GetServerInfo(p.Ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
result, err := p.Results.NewResult()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return MarshalServerInfo(result, serverInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i TunnelServer_PogsImpl) UnregisterTunnel(p tunnelrpc.TunnelServer_unregisterTunnel) error {
|
||||||
|
gracePeriodNanoSec := p.Params.GracePeriodNanoSec()
|
||||||
|
server.Ack(p.Options)
|
||||||
|
return i.impl.UnregisterTunnel(p.Ctx, gracePeriodNanoSec)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
type TunnelServer_PogsClient struct {
|
||||||
|
Client capnp.Client
|
||||||
|
Conn *rpc.Conn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c TunnelServer_PogsClient) Close() error {
|
||||||
|
return c.Conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c TunnelServer_PogsClient) RegisterTunnel(ctx context.Context, originCert []byte, hostname string, options *RegistrationOptions) (*TunnelRegistration, error) {
|
||||||
|
client := tunnelrpc.TunnelServer{Client: c.Client}
|
||||||
|
promise := client.RegisterTunnel(ctx, func(p tunnelrpc.TunnelServer_registerTunnel_Params) error {
|
||||||
|
err := p.SetOriginCert(originCert)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = p.SetHostname(hostname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
registrationOptions, err := p.NewOptions()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = MarshalRegistrationOptions(registrationOptions, options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
retval, err := promise.Result().Struct()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return UnmarshalTunnelRegistration(retval)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c TunnelServer_PogsClient) GetServerInfo(ctx context.Context) (*ServerInfo, error) {
|
||||||
|
client := tunnelrpc.TunnelServer{Client: c.Client}
|
||||||
|
promise := client.GetServerInfo(ctx, func(p tunnelrpc.TunnelServer_getServerInfo_Params) error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
retval, err := promise.Result().Struct()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return UnmarshalServerInfo(retval)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c TunnelServer_PogsClient) UnregisterTunnel(ctx context.Context, gracePeriodNanoSec int64) error {
|
||||||
|
client := tunnelrpc.TunnelServer{Client: c.Client}
|
||||||
|
promise := client.UnregisterTunnel(ctx, func(p tunnelrpc.TunnelServer_unregisterTunnel_Params) error {
|
||||||
|
p.SetGracePeriodNanoSec(gracePeriodNanoSec)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
_, err := promise.Struct()
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,67 @@
|
||||||
|
using Go = import "go.capnp";
|
||||||
|
@0xdb8274f9144abc7e;
|
||||||
|
$Go.package("tunnelrpc");
|
||||||
|
$Go.import("github.com/cloudflare/cloudflared/tunnelrpc");
|
||||||
|
|
||||||
|
struct Authentication {
|
||||||
|
key @0 :Text;
|
||||||
|
email @1 :Text;
|
||||||
|
originCAKey @2 :Text;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TunnelRegistration {
|
||||||
|
err @0 :Text;
|
||||||
|
# the url to access the tunnel
|
||||||
|
url @1 :Text;
|
||||||
|
# Used to inform the client of actions taken.
|
||||||
|
logLines @2 :List(Text);
|
||||||
|
# In case of error, whether the client should attempt to reconnect.
|
||||||
|
permanentFailure @3 :Bool;
|
||||||
|
# Displayed to user
|
||||||
|
tunnelID @4 :Text;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct RegistrationOptions {
|
||||||
|
# The tunnel client's unique identifier, used to verify a reconnection.
|
||||||
|
clientId @0 :Text;
|
||||||
|
# Information about the running binary.
|
||||||
|
version @1 :Text;
|
||||||
|
os @2 :Text;
|
||||||
|
# What to do with existing tunnels for the given hostname.
|
||||||
|
existingTunnelPolicy @3 :ExistingTunnelPolicy;
|
||||||
|
# If using the balancing policy, identifies the LB pool to use.
|
||||||
|
poolName @4 :Text;
|
||||||
|
# Client-defined tags to associate with the tunnel
|
||||||
|
tags @5 :List(Tag);
|
||||||
|
# A unique identifier for a high-availability connection made by a single client.
|
||||||
|
connectionId @6 :UInt8;
|
||||||
|
# origin LAN IP
|
||||||
|
originLocalIp @7 :Text;
|
||||||
|
# whether Argo Tunnel client has been autoupdated
|
||||||
|
isAutoupdated @8 :Bool;
|
||||||
|
# whether Argo Tunnel client is run from a terminal
|
||||||
|
runFromTerminal @9 :Bool;
|
||||||
|
# cross stream compression setting, 0 - off, 3 - high
|
||||||
|
compressionQuality @10 :UInt64;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Tag {
|
||||||
|
name @0 :Text;
|
||||||
|
value @1 :Text;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum ExistingTunnelPolicy {
|
||||||
|
ignore @0;
|
||||||
|
disconnect @1;
|
||||||
|
balance @2;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ServerInfo {
|
||||||
|
locationName @0 :Text;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TunnelServer {
|
||||||
|
registerTunnel @0 (originCert :Data, hostname :Text, options :RegistrationOptions) -> (result :TunnelRegistration);
|
||||||
|
getServerInfo @1 () -> (result :ServerInfo);
|
||||||
|
unregisterTunnel @2 (gracePeriodNanoSec :Int64) -> ();
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,136 @@
|
||||||
|
package validation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/idna"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultScheme = "http"
|
||||||
|
|
||||||
|
var supportedProtocol = [2]string{"http", "https"}
|
||||||
|
|
||||||
|
func ValidateHostname(hostname string) (string, error) {
|
||||||
|
if hostname == "" {
|
||||||
|
return "", fmt.Errorf("Hostname should not be empty")
|
||||||
|
}
|
||||||
|
// users gives url(contains schema) not just hostname
|
||||||
|
if strings.Contains(hostname, ":") || strings.Contains(hostname, "%3A") {
|
||||||
|
unescapeHostname, err := url.PathUnescape(hostname)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Hostname(actually a URL) %s has invalid escape characters %s", hostname, unescapeHostname)
|
||||||
|
}
|
||||||
|
hostnameToURL, err := url.Parse(unescapeHostname)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Hostname(actually a URL) %s has invalid format %s", hostname, hostnameToURL)
|
||||||
|
}
|
||||||
|
asciiHostname, err := idna.ToASCII(hostnameToURL.Hostname())
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Hostname(actually a URL) %s has invalid ASCII encdoing %s", hostname, asciiHostname)
|
||||||
|
}
|
||||||
|
return asciiHostname, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
asciiHostname, err := idna.ToASCII(hostname)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Hostname %s has invalid ASCII encdoing %s", hostname, asciiHostname)
|
||||||
|
}
|
||||||
|
hostnameToURL, err := url.Parse(asciiHostname)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Hostname %s is not valid", hostnameToURL)
|
||||||
|
}
|
||||||
|
return hostnameToURL.RequestURI(), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func ValidateUrl(originUrl string) (string, error) {
|
||||||
|
if originUrl == "" {
|
||||||
|
return "", fmt.Errorf("Url should not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if net.ParseIP(originUrl) != nil {
|
||||||
|
return validateIP("", originUrl, "")
|
||||||
|
} else if strings.HasPrefix(originUrl, "[") && strings.HasSuffix(originUrl, "]") {
|
||||||
|
// ParseIP doesn't recoginze [::1]
|
||||||
|
return validateIP("", originUrl[1:len(originUrl)-1], "")
|
||||||
|
}
|
||||||
|
|
||||||
|
host, port, err := net.SplitHostPort(originUrl)
|
||||||
|
// user might pass in an ip address like 127.0.0.1
|
||||||
|
if err == nil && net.ParseIP(host) != nil {
|
||||||
|
return validateIP("", host, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
unescapedUrl, err := url.PathUnescape(originUrl)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("URL %s has invalid escape characters %s", originUrl, unescapedUrl)
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedUrl, err := url.Parse(unescapedUrl)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("URL %s has invalid format", originUrl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the url is in the form of host:port, IsAbs() will think host is the schema
|
||||||
|
var hostname string
|
||||||
|
hasScheme := parsedUrl.IsAbs() && parsedUrl.Host != ""
|
||||||
|
if hasScheme {
|
||||||
|
err := validateScheme(parsedUrl.Scheme)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
// The earlier check for ip address will miss the case http://[::1]
|
||||||
|
// and http://[::1]:8080
|
||||||
|
if net.ParseIP(parsedUrl.Hostname()) != nil {
|
||||||
|
return validateIP(parsedUrl.Scheme, parsedUrl.Hostname(), parsedUrl.Port())
|
||||||
|
}
|
||||||
|
hostname, err = ValidateHostname(parsedUrl.Hostname())
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("URL %s has invalid format", originUrl)
|
||||||
|
}
|
||||||
|
if parsedUrl.Port() != "" {
|
||||||
|
return fmt.Sprintf("%s://%s", parsedUrl.Scheme, net.JoinHostPort(hostname, parsedUrl.Port())), nil
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s://%s", parsedUrl.Scheme, hostname), nil
|
||||||
|
} else {
|
||||||
|
if host == "" {
|
||||||
|
hostname, err = ValidateHostname(originUrl)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("URL no %s has invalid format", originUrl)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s://%s", defaultScheme, hostname), nil
|
||||||
|
} else {
|
||||||
|
hostname, err = ValidateHostname(host)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("URL %s has invalid format", originUrl)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s://%s", defaultScheme, net.JoinHostPort(hostname, port)), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateScheme(scheme string) error {
|
||||||
|
for _, protocol := range supportedProtocol {
|
||||||
|
if scheme == protocol {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Currently Argo Tunnel does not support %s protocol.", scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateIP(scheme, host, port string) (string, error) {
|
||||||
|
if scheme == "" {
|
||||||
|
scheme = defaultScheme
|
||||||
|
}
|
||||||
|
if port != "" {
|
||||||
|
return fmt.Sprintf("%s://%s", scheme, net.JoinHostPort(host, port)), nil
|
||||||
|
} else if strings.Contains(host, ":") {
|
||||||
|
// IPv6
|
||||||
|
return fmt.Sprintf("%s://[%s]", scheme, host), nil
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s://%s", scheme, host), nil
|
||||||
|
}
|
|
@ -0,0 +1,136 @@
|
||||||
|
package validation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestValidateHostname(t *testing.T) {
|
||||||
|
var inputHostname string
|
||||||
|
hostname, err := ValidateHostname(inputHostname)
|
||||||
|
assert.Equal(t, err, fmt.Errorf("Hostname should not be empty"))
|
||||||
|
assert.Empty(t, hostname)
|
||||||
|
|
||||||
|
inputHostname = "hello.example.com"
|
||||||
|
hostname, err = ValidateHostname(inputHostname)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "hello.example.com", hostname)
|
||||||
|
|
||||||
|
inputHostname = "http://hello.example.com"
|
||||||
|
hostname, err = ValidateHostname(inputHostname)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "hello.example.com", hostname)
|
||||||
|
|
||||||
|
inputHostname = "bücher.example.com"
|
||||||
|
hostname, err = ValidateHostname(inputHostname)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "xn--bcher-kva.example.com", hostname)
|
||||||
|
|
||||||
|
inputHostname = "http://bücher.example.com"
|
||||||
|
hostname, err = ValidateHostname(inputHostname)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "xn--bcher-kva.example.com", hostname)
|
||||||
|
|
||||||
|
inputHostname = "http%3A%2F%2Fhello.example.com"
|
||||||
|
hostname, err = ValidateHostname(inputHostname)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "hello.example.com", hostname)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateUrl(t *testing.T) {
|
||||||
|
validUrl, err := ValidateUrl("")
|
||||||
|
assert.Equal(t, fmt.Errorf("Url should not be empty"), err)
|
||||||
|
assert.Empty(t, validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("https://localhost:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "https://localhost:8080", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("localhost:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://localhost:8080", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("http://localhost")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://localhost", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("http://127.0.0.1:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://127.0.0.1:8080", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("127.0.0.1:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://127.0.0.1:8080", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("127.0.0.1")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://127.0.0.1", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("https://127.0.0.1:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "https://127.0.0.1:8080", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("[::1]:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://[::1]:8080", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("http://[::1]")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://[::1]", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("http://[::1]:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://[::1]:8080", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("[::1]")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://[::1]", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("https://example.com")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "https://example.com", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("example.com")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://example.com", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("http://hello.example.com")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://hello.example.com", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("hello.example.com")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://hello.example.com", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("hello.example.com:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://hello.example.com:8080", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("https://hello.example.com:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "https://hello.example.com:8080", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("https://bücher.example.com")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "https://xn--bcher-kva.example.com", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("bücher.example.com")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "http://xn--bcher-kva.example.com", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("https%3A%2F%2Fhello.example.com")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "https://hello.example.com", validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("ftp://alex:12345@hello.example.com:8080/robot.txt")
|
||||||
|
assert.Equal(t, "Currently Argo Tunnel does not support ftp protocol.", err.Error())
|
||||||
|
assert.Empty(t, validUrl)
|
||||||
|
|
||||||
|
validUrl, err = ValidateUrl("https://alex:12345@hello.example.com:8080")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, "https://hello.example.com:8080", validUrl)
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,130 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Function to find backward reference copies. */
|
||||||
|
|
||||||
|
#include "./enc/backward_references.h"
|
||||||
|
|
||||||
|
#include "./common/constants.h"
|
||||||
|
#include "./common/dictionary.h"
|
||||||
|
#include <brotli/types.h>
|
||||||
|
#include "./enc/command.h"
|
||||||
|
#include "./enc/dictionary_hash.h"
|
||||||
|
#include "./enc/memory.h"
|
||||||
|
#include "./enc/port.h"
|
||||||
|
#include "./enc/quality.h"
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static BROTLI_INLINE size_t ComputeDistanceCode(size_t distance,
|
||||||
|
size_t max_distance,
|
||||||
|
const int* dist_cache) {
|
||||||
|
if (distance <= max_distance) {
|
||||||
|
size_t distance_plus_3 = distance + 3;
|
||||||
|
size_t offset0 = distance_plus_3 - (size_t)dist_cache[0];
|
||||||
|
size_t offset1 = distance_plus_3 - (size_t)dist_cache[1];
|
||||||
|
if (distance == (size_t)dist_cache[0]) {
|
||||||
|
return 0;
|
||||||
|
} else if (distance == (size_t)dist_cache[1]) {
|
||||||
|
return 1;
|
||||||
|
} else if (offset0 < 7) {
|
||||||
|
return (0x9750468 >> (4 * offset0)) & 0xF;
|
||||||
|
} else if (offset1 < 7) {
|
||||||
|
return (0xFDB1ACE >> (4 * offset1)) & 0xF;
|
||||||
|
} else if (distance == (size_t)dist_cache[2]) {
|
||||||
|
return 2;
|
||||||
|
} else if (distance == (size_t)dist_cache[3]) {
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return distance + BROTLI_NUM_DISTANCE_SHORT_CODES - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define EXPAND_CAT(a, b) CAT(a, b)
|
||||||
|
#define CAT(a, b) a ## b
|
||||||
|
#define FN(X) EXPAND_CAT(X, HASHER())
|
||||||
|
|
||||||
|
#define HASHER() H2
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/backward_references_inc.h"
|
||||||
|
#undef HASHER
|
||||||
|
|
||||||
|
#define HASHER() H3
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/backward_references_inc.h"
|
||||||
|
#undef HASHER
|
||||||
|
|
||||||
|
#define HASHER() H4
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/backward_references_inc.h"
|
||||||
|
#undef HASHER
|
||||||
|
|
||||||
|
#define HASHER() H5
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/backward_references_inc.h"
|
||||||
|
#undef HASHER
|
||||||
|
|
||||||
|
#define HASHER() H6
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/backward_references_inc.h"
|
||||||
|
#undef HASHER
|
||||||
|
|
||||||
|
#define HASHER() H40
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/backward_references_inc.h"
|
||||||
|
#undef HASHER
|
||||||
|
|
||||||
|
#define HASHER() H41
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/backward_references_inc.h"
|
||||||
|
#undef HASHER
|
||||||
|
|
||||||
|
#define HASHER() H42
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/backward_references_inc.h"
|
||||||
|
#undef HASHER
|
||||||
|
|
||||||
|
#define HASHER() H54
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/backward_references_inc.h"
|
||||||
|
#undef HASHER
|
||||||
|
|
||||||
|
#undef FN
|
||||||
|
#undef CAT
|
||||||
|
#undef EXPAND_CAT
|
||||||
|
|
||||||
|
void BrotliCreateBackwardReferences(const BrotliDictionary* dictionary,
|
||||||
|
size_t num_bytes,
|
||||||
|
size_t position,
|
||||||
|
const uint8_t* ringbuffer,
|
||||||
|
size_t ringbuffer_mask,
|
||||||
|
const BrotliEncoderParams* params,
|
||||||
|
HasherHandle hasher,
|
||||||
|
int* dist_cache,
|
||||||
|
size_t* last_insert_len,
|
||||||
|
Command* commands,
|
||||||
|
size_t* num_commands,
|
||||||
|
size_t* num_literals) {
|
||||||
|
switch (params->hasher.type) {
|
||||||
|
#define CASE_(N) \
|
||||||
|
case N: \
|
||||||
|
CreateBackwardReferencesH ## N(dictionary, \
|
||||||
|
kStaticDictionaryHash, num_bytes, position, ringbuffer, \
|
||||||
|
ringbuffer_mask, params, hasher, dist_cache, \
|
||||||
|
last_insert_len, commands, num_commands, num_literals); \
|
||||||
|
break;
|
||||||
|
FOR_GENERIC_HASHERS(CASE_)
|
||||||
|
#undef CASE_
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
} /* extern "C" */
|
||||||
|
#endif
|
|
@ -0,0 +1,790 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Function to find backward reference copies. */
|
||||||
|
|
||||||
|
#include "./enc/backward_references_hq.h"
|
||||||
|
|
||||||
|
#include <string.h> /* memcpy, memset */
|
||||||
|
|
||||||
|
#include "./common/constants.h"
|
||||||
|
#include <brotli/types.h>
|
||||||
|
#include "./enc/command.h"
|
||||||
|
#include "./enc/fast_log.h"
|
||||||
|
#include "./enc/find_match_length.h"
|
||||||
|
#include "./enc/literal_cost.h"
|
||||||
|
#include "./enc/memory.h"
|
||||||
|
#include "./enc/port.h"
|
||||||
|
#include "./enc/prefix.h"
|
||||||
|
#include "./enc/quality.h"
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static const float kInfinity = 1.7e38f; /* ~= 2 ^ 127 */
|
||||||
|
|
||||||
|
static const uint32_t kDistanceCacheIndex[] = {
|
||||||
|
0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
|
||||||
|
};
|
||||||
|
static const int kDistanceCacheOffset[] = {
|
||||||
|
0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3
|
||||||
|
};
|
||||||
|
|
||||||
|
void BrotliInitZopfliNodes(ZopfliNode* array, size_t length) {
|
||||||
|
ZopfliNode stub;
|
||||||
|
size_t i;
|
||||||
|
stub.length = 1;
|
||||||
|
stub.distance = 0;
|
||||||
|
stub.insert_length = 0;
|
||||||
|
stub.u.cost = kInfinity;
|
||||||
|
for (i = 0; i < length; ++i) array[i] = stub;
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE uint32_t ZopfliNodeCopyLength(const ZopfliNode* self) {
|
||||||
|
return self->length & 0xffffff;
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE uint32_t ZopfliNodeLengthCode(const ZopfliNode* self) {
|
||||||
|
const uint32_t modifier = self->length >> 24;
|
||||||
|
return ZopfliNodeCopyLength(self) + 9u - modifier;
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE uint32_t ZopfliNodeCopyDistance(const ZopfliNode* self) {
|
||||||
|
return self->distance & 0x1ffffff;
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE uint32_t ZopfliNodeDistanceCode(const ZopfliNode* self) {
|
||||||
|
const uint32_t short_code = self->distance >> 25;
|
||||||
|
return short_code == 0 ?
|
||||||
|
ZopfliNodeCopyDistance(self) + BROTLI_NUM_DISTANCE_SHORT_CODES - 1 :
|
||||||
|
short_code - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE uint32_t ZopfliNodeCommandLength(const ZopfliNode* self) {
|
||||||
|
return ZopfliNodeCopyLength(self) + self->insert_length;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Histogram based cost model for zopflification. */
|
||||||
|
typedef struct ZopfliCostModel {
|
||||||
|
/* The insert and copy length symbols. */
|
||||||
|
float cost_cmd_[BROTLI_NUM_COMMAND_SYMBOLS];
|
||||||
|
float cost_dist_[BROTLI_NUM_DISTANCE_SYMBOLS];
|
||||||
|
/* Cumulative costs of literals per position in the stream. */
|
||||||
|
float* literal_costs_;
|
||||||
|
float min_cost_cmd_;
|
||||||
|
size_t num_bytes_;
|
||||||
|
} ZopfliCostModel;
|
||||||
|
|
||||||
|
static void InitZopfliCostModel(
|
||||||
|
MemoryManager* m, ZopfliCostModel* self, size_t num_bytes) {
|
||||||
|
self->num_bytes_ = num_bytes;
|
||||||
|
self->literal_costs_ = BROTLI_ALLOC(m, float, num_bytes + 2);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void CleanupZopfliCostModel(MemoryManager* m, ZopfliCostModel* self) {
|
||||||
|
BROTLI_FREE(m, self->literal_costs_);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void SetCost(const uint32_t* histogram, size_t histogram_size,
|
||||||
|
float* cost) {
|
||||||
|
size_t sum = 0;
|
||||||
|
float log2sum;
|
||||||
|
size_t i;
|
||||||
|
for (i = 0; i < histogram_size; i++) {
|
||||||
|
sum += histogram[i];
|
||||||
|
}
|
||||||
|
log2sum = (float)FastLog2(sum);
|
||||||
|
for (i = 0; i < histogram_size; i++) {
|
||||||
|
if (histogram[i] == 0) {
|
||||||
|
cost[i] = log2sum + 2;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Shannon bits for this symbol. */
|
||||||
|
cost[i] = log2sum - (float)FastLog2(histogram[i]);
|
||||||
|
|
||||||
|
/* Cannot be coded with less than 1 bit */
|
||||||
|
if (cost[i] < 1) cost[i] = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ZopfliCostModelSetFromCommands(ZopfliCostModel* self,
|
||||||
|
size_t position,
|
||||||
|
const uint8_t* ringbuffer,
|
||||||
|
size_t ringbuffer_mask,
|
||||||
|
const Command* commands,
|
||||||
|
size_t num_commands,
|
||||||
|
size_t last_insert_len) {
|
||||||
|
uint32_t histogram_literal[BROTLI_NUM_LITERAL_SYMBOLS];
|
||||||
|
uint32_t histogram_cmd[BROTLI_NUM_COMMAND_SYMBOLS];
|
||||||
|
uint32_t histogram_dist[BROTLI_NUM_DISTANCE_SYMBOLS];
|
||||||
|
float cost_literal[BROTLI_NUM_LITERAL_SYMBOLS];
|
||||||
|
size_t pos = position - last_insert_len;
|
||||||
|
float min_cost_cmd = kInfinity;
|
||||||
|
size_t i;
|
||||||
|
float* cost_cmd = self->cost_cmd_;
|
||||||
|
|
||||||
|
memset(histogram_literal, 0, sizeof(histogram_literal));
|
||||||
|
memset(histogram_cmd, 0, sizeof(histogram_cmd));
|
||||||
|
memset(histogram_dist, 0, sizeof(histogram_dist));
|
||||||
|
|
||||||
|
for (i = 0; i < num_commands; i++) {
|
||||||
|
size_t inslength = commands[i].insert_len_;
|
||||||
|
size_t copylength = CommandCopyLen(&commands[i]);
|
||||||
|
size_t distcode = commands[i].dist_prefix_;
|
||||||
|
size_t cmdcode = commands[i].cmd_prefix_;
|
||||||
|
size_t j;
|
||||||
|
|
||||||
|
histogram_cmd[cmdcode]++;
|
||||||
|
if (cmdcode >= 128) histogram_dist[distcode]++;
|
||||||
|
|
||||||
|
for (j = 0; j < inslength; j++) {
|
||||||
|
histogram_literal[ringbuffer[(pos + j) & ringbuffer_mask]]++;
|
||||||
|
}
|
||||||
|
|
||||||
|
pos += inslength + copylength;
|
||||||
|
}
|
||||||
|
|
||||||
|
SetCost(histogram_literal, BROTLI_NUM_LITERAL_SYMBOLS, cost_literal);
|
||||||
|
SetCost(histogram_cmd, BROTLI_NUM_COMMAND_SYMBOLS, cost_cmd);
|
||||||
|
SetCost(histogram_dist, BROTLI_NUM_DISTANCE_SYMBOLS, self->cost_dist_);
|
||||||
|
|
||||||
|
for (i = 0; i < BROTLI_NUM_COMMAND_SYMBOLS; ++i) {
|
||||||
|
min_cost_cmd = BROTLI_MIN(float, min_cost_cmd, cost_cmd[i]);
|
||||||
|
}
|
||||||
|
self->min_cost_cmd_ = min_cost_cmd;
|
||||||
|
|
||||||
|
{
|
||||||
|
float* literal_costs = self->literal_costs_;
|
||||||
|
size_t num_bytes = self->num_bytes_;
|
||||||
|
literal_costs[0] = 0.0;
|
||||||
|
for (i = 0; i < num_bytes; ++i) {
|
||||||
|
literal_costs[i + 1] = literal_costs[i] +
|
||||||
|
cost_literal[ringbuffer[(position + i) & ringbuffer_mask]];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ZopfliCostModelSetFromLiteralCosts(ZopfliCostModel* self,
|
||||||
|
size_t position,
|
||||||
|
const uint8_t* ringbuffer,
|
||||||
|
size_t ringbuffer_mask) {
|
||||||
|
float* literal_costs = self->literal_costs_;
|
||||||
|
float* cost_dist = self->cost_dist_;
|
||||||
|
float* cost_cmd = self->cost_cmd_;
|
||||||
|
size_t num_bytes = self->num_bytes_;
|
||||||
|
size_t i;
|
||||||
|
BrotliEstimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask,
|
||||||
|
ringbuffer, &literal_costs[1]);
|
||||||
|
literal_costs[0] = 0.0;
|
||||||
|
for (i = 0; i < num_bytes; ++i) {
|
||||||
|
literal_costs[i + 1] += literal_costs[i];
|
||||||
|
}
|
||||||
|
for (i = 0; i < BROTLI_NUM_COMMAND_SYMBOLS; ++i) {
|
||||||
|
cost_cmd[i] = (float)FastLog2(11 + (uint32_t)i);
|
||||||
|
}
|
||||||
|
for (i = 0; i < BROTLI_NUM_DISTANCE_SYMBOLS; ++i) {
|
||||||
|
cost_dist[i] = (float)FastLog2(20 + (uint32_t)i);
|
||||||
|
}
|
||||||
|
self->min_cost_cmd_ = (float)FastLog2(11);
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE float ZopfliCostModelGetCommandCost(
|
||||||
|
const ZopfliCostModel* self, uint16_t cmdcode) {
|
||||||
|
return self->cost_cmd_[cmdcode];
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE float ZopfliCostModelGetDistanceCost(
|
||||||
|
const ZopfliCostModel* self, size_t distcode) {
|
||||||
|
return self->cost_dist_[distcode];
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE float ZopfliCostModelGetLiteralCosts(
|
||||||
|
const ZopfliCostModel* self, size_t from, size_t to) {
|
||||||
|
return self->literal_costs_[to] - self->literal_costs_[from];
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE float ZopfliCostModelGetMinCostCmd(
|
||||||
|
const ZopfliCostModel* self) {
|
||||||
|
return self->min_cost_cmd_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* REQUIRES: len >= 2, start_pos <= pos */
|
||||||
|
/* REQUIRES: cost < kInfinity, nodes[start_pos].cost < kInfinity */
|
||||||
|
/* Maintains the "ZopfliNode array invariant". */
|
||||||
|
static BROTLI_INLINE void UpdateZopfliNode(ZopfliNode* nodes, size_t pos,
|
||||||
|
size_t start_pos, size_t len, size_t len_code, size_t dist,
|
||||||
|
size_t short_code, float cost) {
|
||||||
|
ZopfliNode* next = &nodes[pos + len];
|
||||||
|
next->length = (uint32_t)(len | ((len + 9u - len_code) << 24));
|
||||||
|
next->distance = (uint32_t)(dist | (short_code << 25));
|
||||||
|
next->insert_length = (uint32_t)(pos - start_pos);
|
||||||
|
next->u.cost = cost;
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct PosData {
|
||||||
|
size_t pos;
|
||||||
|
int distance_cache[4];
|
||||||
|
float costdiff;
|
||||||
|
float cost;
|
||||||
|
} PosData;
|
||||||
|
|
||||||
|
/* Maintains the smallest 8 cost difference together with their positions */
|
||||||
|
typedef struct StartPosQueue {
|
||||||
|
PosData q_[8];
|
||||||
|
size_t idx_;
|
||||||
|
} StartPosQueue;
|
||||||
|
|
||||||
|
static BROTLI_INLINE void InitStartPosQueue(StartPosQueue* self) {
|
||||||
|
self->idx_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t StartPosQueueSize(const StartPosQueue* self) {
|
||||||
|
return BROTLI_MIN(size_t, self->idx_, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void StartPosQueuePush(StartPosQueue* self, const PosData* posdata) {
|
||||||
|
size_t offset = ~(self->idx_++) & 7;
|
||||||
|
size_t len = StartPosQueueSize(self);
|
||||||
|
size_t i;
|
||||||
|
PosData* q = self->q_;
|
||||||
|
q[offset] = *posdata;
|
||||||
|
/* Restore the sorted order. In the list of |len| items at most |len - 1|
|
||||||
|
adjacent element comparisons / swaps are required. */
|
||||||
|
for (i = 1; i < len; ++i) {
|
||||||
|
if (q[offset & 7].costdiff > q[(offset + 1) & 7].costdiff) {
|
||||||
|
BROTLI_SWAP(PosData, q, offset & 7, (offset + 1) & 7);
|
||||||
|
}
|
||||||
|
++offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static const PosData* StartPosQueueAt(const StartPosQueue* self, size_t k) {
|
||||||
|
return &self->q_[(k - self->idx_) & 7];
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Returns the minimum possible copy length that can improve the cost of any */
|
||||||
|
/* future position. */
|
||||||
|
static size_t ComputeMinimumCopyLength(const float start_cost,
|
||||||
|
const ZopfliNode* nodes,
|
||||||
|
const size_t num_bytes,
|
||||||
|
const size_t pos) {
|
||||||
|
/* Compute the minimum possible cost of reaching any future position. */
|
||||||
|
float min_cost = start_cost;
|
||||||
|
size_t len = 2;
|
||||||
|
size_t next_len_bucket = 4;
|
||||||
|
size_t next_len_offset = 10;
|
||||||
|
while (pos + len <= num_bytes && nodes[pos + len].u.cost <= min_cost) {
|
||||||
|
/* We already reached (pos + len) with no more cost than the minimum
|
||||||
|
possible cost of reaching anything from this pos, so there is no point in
|
||||||
|
looking for lengths <= len. */
|
||||||
|
++len;
|
||||||
|
if (len == next_len_offset) {
|
||||||
|
/* We reached the next copy length code bucket, so we add one more
|
||||||
|
extra bit to the minimum cost. */
|
||||||
|
min_cost += 1.0f;
|
||||||
|
next_len_offset += next_len_bucket;
|
||||||
|
next_len_bucket *= 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* REQUIRES: nodes[pos].cost < kInfinity
|
||||||
|
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
|
||||||
|
static uint32_t ComputeDistanceShortcut(const size_t block_start,
|
||||||
|
const size_t pos,
|
||||||
|
const size_t max_backward,
|
||||||
|
const ZopfliNode* nodes) {
|
||||||
|
const size_t clen = ZopfliNodeCopyLength(&nodes[pos]);
|
||||||
|
const size_t ilen = nodes[pos].insert_length;
|
||||||
|
const size_t dist = ZopfliNodeCopyDistance(&nodes[pos]);
|
||||||
|
/* Since |block_start + pos| is the end position of the command, the copy part
|
||||||
|
starts from |block_start + pos - clen|. Distances that are greater than
|
||||||
|
this or greater than |max_backward| are static dictionary references, and
|
||||||
|
do not update the last distances. Also distance code 0 (last distance)
|
||||||
|
does not update the last distances. */
|
||||||
|
if (pos == 0) {
|
||||||
|
return 0;
|
||||||
|
} else if (dist + clen <= block_start + pos &&
|
||||||
|
dist <= max_backward &&
|
||||||
|
ZopfliNodeDistanceCode(&nodes[pos]) > 0) {
|
||||||
|
return (uint32_t)pos;
|
||||||
|
} else {
|
||||||
|
return nodes[pos - clen - ilen].u.shortcut;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Fills in dist_cache[0..3] with the last four distances (as defined by
|
||||||
|
Section 4. of the Spec) that would be used at (block_start + pos) if we
|
||||||
|
used the shortest path of commands from block_start, computed from
|
||||||
|
nodes[0..pos]. The last four distances at block_start are in
|
||||||
|
starting_dist_cache[0..3].
|
||||||
|
REQUIRES: nodes[pos].cost < kInfinity
|
||||||
|
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
|
||||||
|
static void ComputeDistanceCache(const size_t pos,
|
||||||
|
const int* starting_dist_cache,
|
||||||
|
const ZopfliNode* nodes,
|
||||||
|
int* dist_cache) {
|
||||||
|
int idx = 0;
|
||||||
|
size_t p = nodes[pos].u.shortcut;
|
||||||
|
while (idx < 4 && p > 0) {
|
||||||
|
const size_t ilen = nodes[p].insert_length;
|
||||||
|
const size_t clen = ZopfliNodeCopyLength(&nodes[p]);
|
||||||
|
const size_t dist = ZopfliNodeCopyDistance(&nodes[p]);
|
||||||
|
dist_cache[idx++] = (int)dist;
|
||||||
|
/* Because of prerequisite, p >= clen + ilen >= 2. */
|
||||||
|
p = nodes[p - clen - ilen].u.shortcut;
|
||||||
|
}
|
||||||
|
for (; idx < 4; ++idx) {
|
||||||
|
dist_cache[idx] = *starting_dist_cache++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it
|
||||||
|
is eligible. */
|
||||||
|
static void EvaluateNode(
|
||||||
|
const size_t block_start, const size_t pos, const size_t max_backward_limit,
|
||||||
|
const int* starting_dist_cache, const ZopfliCostModel* model,
|
||||||
|
StartPosQueue* queue, ZopfliNode* nodes) {
|
||||||
|
/* Save cost, because ComputeDistanceCache invalidates it. */
|
||||||
|
float node_cost = nodes[pos].u.cost;
|
||||||
|
nodes[pos].u.shortcut = ComputeDistanceShortcut(
|
||||||
|
block_start, pos, max_backward_limit, nodes);
|
||||||
|
if (node_cost <= ZopfliCostModelGetLiteralCosts(model, 0, pos)) {
|
||||||
|
PosData posdata;
|
||||||
|
posdata.pos = pos;
|
||||||
|
posdata.cost = node_cost;
|
||||||
|
posdata.costdiff = node_cost -
|
||||||
|
ZopfliCostModelGetLiteralCosts(model, 0, pos);
|
||||||
|
ComputeDistanceCache(
|
||||||
|
pos, starting_dist_cache, nodes, posdata.distance_cache);
|
||||||
|
StartPosQueuePush(queue, &posdata);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Returns longest copy length. */
|
||||||
|
static size_t UpdateNodes(
|
||||||
|
const size_t num_bytes, const size_t block_start, const size_t pos,
|
||||||
|
const uint8_t* ringbuffer, const size_t ringbuffer_mask,
|
||||||
|
const BrotliEncoderParams* params, const size_t max_backward_limit,
|
||||||
|
const int* starting_dist_cache, const size_t num_matches,
|
||||||
|
const BackwardMatch* matches, const ZopfliCostModel* model,
|
||||||
|
StartPosQueue* queue, ZopfliNode* nodes) {
|
||||||
|
const size_t cur_ix = block_start + pos;
|
||||||
|
const size_t cur_ix_masked = cur_ix & ringbuffer_mask;
|
||||||
|
const size_t max_distance = BROTLI_MIN(size_t, cur_ix, max_backward_limit);
|
||||||
|
const size_t max_len = num_bytes - pos;
|
||||||
|
const size_t max_zopfli_len = MaxZopfliLen(params);
|
||||||
|
const size_t max_iters = MaxZopfliCandidates(params);
|
||||||
|
size_t min_len;
|
||||||
|
size_t result = 0;
|
||||||
|
size_t k;
|
||||||
|
|
||||||
|
EvaluateNode(block_start, pos, max_backward_limit, starting_dist_cache, model,
|
||||||
|
queue, nodes);
|
||||||
|
|
||||||
|
{
|
||||||
|
const PosData* posdata = StartPosQueueAt(queue, 0);
|
||||||
|
float min_cost = (posdata->cost + ZopfliCostModelGetMinCostCmd(model) +
|
||||||
|
ZopfliCostModelGetLiteralCosts(model, posdata->pos, pos));
|
||||||
|
min_len = ComputeMinimumCopyLength(min_cost, nodes, num_bytes, pos);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Go over the command starting positions in order of increasing cost
|
||||||
|
difference. */
|
||||||
|
for (k = 0; k < max_iters && k < StartPosQueueSize(queue); ++k) {
|
||||||
|
const PosData* posdata = StartPosQueueAt(queue, k);
|
||||||
|
const size_t start = posdata->pos;
|
||||||
|
const uint16_t inscode = GetInsertLengthCode(pos - start);
|
||||||
|
const float start_costdiff = posdata->costdiff;
|
||||||
|
const float base_cost = start_costdiff + (float)GetInsertExtra(inscode) +
|
||||||
|
ZopfliCostModelGetLiteralCosts(model, 0, pos);
|
||||||
|
|
||||||
|
/* Look for last distance matches using the distance cache from this
|
||||||
|
starting position. */
|
||||||
|
size_t best_len = min_len - 1;
|
||||||
|
size_t j = 0;
|
||||||
|
for (; j < BROTLI_NUM_DISTANCE_SHORT_CODES && best_len < max_len; ++j) {
|
||||||
|
const size_t idx = kDistanceCacheIndex[j];
|
||||||
|
const size_t backward =
|
||||||
|
(size_t)(posdata->distance_cache[idx] + kDistanceCacheOffset[j]);
|
||||||
|
size_t prev_ix = cur_ix - backward;
|
||||||
|
if (prev_ix >= cur_ix) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (BROTLI_PREDICT_FALSE(backward > max_distance)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
prev_ix &= ringbuffer_mask;
|
||||||
|
|
||||||
|
if (cur_ix_masked + best_len > ringbuffer_mask ||
|
||||||
|
prev_ix + best_len > ringbuffer_mask ||
|
||||||
|
ringbuffer[cur_ix_masked + best_len] !=
|
||||||
|
ringbuffer[prev_ix + best_len]) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const size_t len =
|
||||||
|
FindMatchLengthWithLimit(&ringbuffer[prev_ix],
|
||||||
|
&ringbuffer[cur_ix_masked],
|
||||||
|
max_len);
|
||||||
|
const float dist_cost = base_cost +
|
||||||
|
ZopfliCostModelGetDistanceCost(model, j);
|
||||||
|
size_t l;
|
||||||
|
for (l = best_len + 1; l <= len; ++l) {
|
||||||
|
const uint16_t copycode = GetCopyLengthCode(l);
|
||||||
|
const uint16_t cmdcode =
|
||||||
|
CombineLengthCodes(inscode, copycode, j == 0);
|
||||||
|
const float cost = (cmdcode < 128 ? base_cost : dist_cost) +
|
||||||
|
(float)GetCopyExtra(copycode) +
|
||||||
|
ZopfliCostModelGetCommandCost(model, cmdcode);
|
||||||
|
if (cost < nodes[pos + l].u.cost) {
|
||||||
|
UpdateZopfliNode(nodes, pos, start, l, l, backward, j + 1, cost);
|
||||||
|
result = BROTLI_MAX(size_t, result, l);
|
||||||
|
}
|
||||||
|
best_len = l;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* At higher iterations look only for new last distance matches, since
|
||||||
|
looking only for new command start positions with the same distances
|
||||||
|
does not help much. */
|
||||||
|
if (k >= 2) continue;
|
||||||
|
|
||||||
|
{
|
||||||
|
/* Loop through all possible copy lengths at this position. */
|
||||||
|
size_t len = min_len;
|
||||||
|
for (j = 0; j < num_matches; ++j) {
|
||||||
|
BackwardMatch match = matches[j];
|
||||||
|
size_t dist = match.distance;
|
||||||
|
BROTLI_BOOL is_dictionary_match = TO_BROTLI_BOOL(dist > max_distance);
|
||||||
|
/* We already tried all possible last distance matches, so we can use
|
||||||
|
normal distance code here. */
|
||||||
|
size_t dist_code = dist + BROTLI_NUM_DISTANCE_SHORT_CODES - 1;
|
||||||
|
uint16_t dist_symbol;
|
||||||
|
uint32_t distextra;
|
||||||
|
uint32_t distnumextra;
|
||||||
|
float dist_cost;
|
||||||
|
size_t max_match_len;
|
||||||
|
PrefixEncodeCopyDistance(dist_code, 0, 0, &dist_symbol, &distextra);
|
||||||
|
distnumextra = distextra >> 24;
|
||||||
|
dist_cost = base_cost + (float)distnumextra +
|
||||||
|
ZopfliCostModelGetDistanceCost(model, dist_symbol);
|
||||||
|
|
||||||
|
/* Try all copy lengths up until the maximum copy length corresponding
|
||||||
|
to this distance. If the distance refers to the static dictionary, or
|
||||||
|
the maximum length is long enough, try only one maximum length. */
|
||||||
|
max_match_len = BackwardMatchLength(&match);
|
||||||
|
if (len < max_match_len &&
|
||||||
|
(is_dictionary_match || max_match_len > max_zopfli_len)) {
|
||||||
|
len = max_match_len;
|
||||||
|
}
|
||||||
|
for (; len <= max_match_len; ++len) {
|
||||||
|
const size_t len_code =
|
||||||
|
is_dictionary_match ? BackwardMatchLengthCode(&match) : len;
|
||||||
|
const uint16_t copycode = GetCopyLengthCode(len_code);
|
||||||
|
const uint16_t cmdcode = CombineLengthCodes(inscode, copycode, 0);
|
||||||
|
const float cost = dist_cost + (float)GetCopyExtra(copycode) +
|
||||||
|
ZopfliCostModelGetCommandCost(model, cmdcode);
|
||||||
|
if (cost < nodes[pos + len].u.cost) {
|
||||||
|
UpdateZopfliNode(nodes, pos, start, len, len_code, dist, 0, cost);
|
||||||
|
result = BROTLI_MAX(size_t, result, len);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t ComputeShortestPathFromNodes(size_t num_bytes,
|
||||||
|
ZopfliNode* nodes) {
|
||||||
|
size_t index = num_bytes;
|
||||||
|
size_t num_commands = 0;
|
||||||
|
while (nodes[index].insert_length == 0 && nodes[index].length == 1) --index;
|
||||||
|
nodes[index].u.next = BROTLI_UINT32_MAX;
|
||||||
|
while (index != 0) {
|
||||||
|
size_t len = ZopfliNodeCommandLength(&nodes[index]);
|
||||||
|
index -= len;
|
||||||
|
nodes[index].u.next = (uint32_t)len;
|
||||||
|
num_commands++;
|
||||||
|
}
|
||||||
|
return num_commands;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */
|
||||||
|
void BrotliZopfliCreateCommands(const size_t num_bytes,
|
||||||
|
const size_t block_start,
|
||||||
|
const size_t max_backward_limit,
|
||||||
|
const ZopfliNode* nodes,
|
||||||
|
int* dist_cache,
|
||||||
|
size_t* last_insert_len,
|
||||||
|
Command* commands,
|
||||||
|
size_t* num_literals) {
|
||||||
|
size_t pos = 0;
|
||||||
|
uint32_t offset = nodes[0].u.next;
|
||||||
|
size_t i;
|
||||||
|
for (i = 0; offset != BROTLI_UINT32_MAX; i++) {
|
||||||
|
const ZopfliNode* next = &nodes[pos + offset];
|
||||||
|
size_t copy_length = ZopfliNodeCopyLength(next);
|
||||||
|
size_t insert_length = next->insert_length;
|
||||||
|
pos += insert_length;
|
||||||
|
offset = next->u.next;
|
||||||
|
if (i == 0) {
|
||||||
|
insert_length += *last_insert_len;
|
||||||
|
*last_insert_len = 0;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
size_t distance = ZopfliNodeCopyDistance(next);
|
||||||
|
size_t len_code = ZopfliNodeLengthCode(next);
|
||||||
|
size_t max_distance =
|
||||||
|
BROTLI_MIN(size_t, block_start + pos, max_backward_limit);
|
||||||
|
BROTLI_BOOL is_dictionary = TO_BROTLI_BOOL(distance > max_distance);
|
||||||
|
size_t dist_code = ZopfliNodeDistanceCode(next);
|
||||||
|
|
||||||
|
InitCommand(&commands[i], insert_length,
|
||||||
|
copy_length, (int)len_code - (int)copy_length, dist_code);
|
||||||
|
|
||||||
|
if (!is_dictionary && dist_code > 0) {
|
||||||
|
dist_cache[3] = dist_cache[2];
|
||||||
|
dist_cache[2] = dist_cache[1];
|
||||||
|
dist_cache[1] = dist_cache[0];
|
||||||
|
dist_cache[0] = (int)distance;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*num_literals += insert_length;
|
||||||
|
pos += copy_length;
|
||||||
|
}
|
||||||
|
*last_insert_len += num_bytes - pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t ZopfliIterate(size_t num_bytes,
|
||||||
|
size_t position,
|
||||||
|
const uint8_t* ringbuffer,
|
||||||
|
size_t ringbuffer_mask,
|
||||||
|
const BrotliEncoderParams* params,
|
||||||
|
const size_t max_backward_limit,
|
||||||
|
const int* dist_cache,
|
||||||
|
const ZopfliCostModel* model,
|
||||||
|
const uint32_t* num_matches,
|
||||||
|
const BackwardMatch* matches,
|
||||||
|
ZopfliNode* nodes) {
|
||||||
|
const size_t max_zopfli_len = MaxZopfliLen(params);
|
||||||
|
StartPosQueue queue;
|
||||||
|
size_t cur_match_pos = 0;
|
||||||
|
size_t i;
|
||||||
|
nodes[0].length = 0;
|
||||||
|
nodes[0].u.cost = 0;
|
||||||
|
InitStartPosQueue(&queue);
|
||||||
|
for (i = 0; i + 3 < num_bytes; i++) {
|
||||||
|
size_t skip = UpdateNodes(num_bytes, position, i, ringbuffer,
|
||||||
|
ringbuffer_mask, params, max_backward_limit, dist_cache,
|
||||||
|
num_matches[i], &matches[cur_match_pos], model, &queue, nodes);
|
||||||
|
if (skip < BROTLI_LONG_COPY_QUICK_STEP) skip = 0;
|
||||||
|
cur_match_pos += num_matches[i];
|
||||||
|
if (num_matches[i] == 1 &&
|
||||||
|
BackwardMatchLength(&matches[cur_match_pos - 1]) > max_zopfli_len) {
|
||||||
|
skip = BROTLI_MAX(size_t,
|
||||||
|
BackwardMatchLength(&matches[cur_match_pos - 1]), skip);
|
||||||
|
}
|
||||||
|
if (skip > 1) {
|
||||||
|
skip--;
|
||||||
|
while (skip) {
|
||||||
|
i++;
|
||||||
|
if (i + 3 >= num_bytes) break;
|
||||||
|
EvaluateNode(
|
||||||
|
position, i, max_backward_limit, dist_cache, model, &queue, nodes);
|
||||||
|
cur_match_pos += num_matches[i];
|
||||||
|
skip--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ComputeShortestPathFromNodes(num_bytes, nodes);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */
|
||||||
|
size_t BrotliZopfliComputeShortestPath(MemoryManager* m,
|
||||||
|
const BrotliDictionary* dictionary,
|
||||||
|
size_t num_bytes,
|
||||||
|
size_t position,
|
||||||
|
const uint8_t* ringbuffer,
|
||||||
|
size_t ringbuffer_mask,
|
||||||
|
const BrotliEncoderParams* params,
|
||||||
|
const size_t max_backward_limit,
|
||||||
|
const int* dist_cache,
|
||||||
|
HasherHandle hasher,
|
||||||
|
ZopfliNode* nodes) {
|
||||||
|
const size_t max_zopfli_len = MaxZopfliLen(params);
|
||||||
|
ZopfliCostModel model;
|
||||||
|
StartPosQueue queue;
|
||||||
|
BackwardMatch matches[MAX_NUM_MATCHES_H10];
|
||||||
|
const size_t store_end = num_bytes >= StoreLookaheadH10() ?
|
||||||
|
position + num_bytes - StoreLookaheadH10() + 1 : position;
|
||||||
|
size_t i;
|
||||||
|
nodes[0].length = 0;
|
||||||
|
nodes[0].u.cost = 0;
|
||||||
|
InitZopfliCostModel(m, &model, num_bytes);
|
||||||
|
if (BROTLI_IS_OOM(m)) return 0;
|
||||||
|
ZopfliCostModelSetFromLiteralCosts(
|
||||||
|
&model, position, ringbuffer, ringbuffer_mask);
|
||||||
|
InitStartPosQueue(&queue);
|
||||||
|
for (i = 0; i + HashTypeLengthH10() - 1 < num_bytes; i++) {
|
||||||
|
const size_t pos = position + i;
|
||||||
|
const size_t max_distance = BROTLI_MIN(size_t, pos, max_backward_limit);
|
||||||
|
size_t num_matches = FindAllMatchesH10(hasher, dictionary, ringbuffer,
|
||||||
|
ringbuffer_mask, pos, num_bytes - i, max_distance, params, matches);
|
||||||
|
size_t skip;
|
||||||
|
if (num_matches > 0 &&
|
||||||
|
BackwardMatchLength(&matches[num_matches - 1]) > max_zopfli_len) {
|
||||||
|
matches[0] = matches[num_matches - 1];
|
||||||
|
num_matches = 1;
|
||||||
|
}
|
||||||
|
skip = UpdateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask,
|
||||||
|
params, max_backward_limit, dist_cache, num_matches, matches, &model,
|
||||||
|
&queue, nodes);
|
||||||
|
if (skip < BROTLI_LONG_COPY_QUICK_STEP) skip = 0;
|
||||||
|
if (num_matches == 1 && BackwardMatchLength(&matches[0]) > max_zopfli_len) {
|
||||||
|
skip = BROTLI_MAX(size_t, BackwardMatchLength(&matches[0]), skip);
|
||||||
|
}
|
||||||
|
if (skip > 1) {
|
||||||
|
/* Add the tail of the copy to the hasher. */
|
||||||
|
StoreRangeH10(hasher, ringbuffer, ringbuffer_mask, pos + 1, BROTLI_MIN(
|
||||||
|
size_t, pos + skip, store_end));
|
||||||
|
skip--;
|
||||||
|
while (skip) {
|
||||||
|
i++;
|
||||||
|
if (i + HashTypeLengthH10() - 1 >= num_bytes) break;
|
||||||
|
EvaluateNode(
|
||||||
|
position, i, max_backward_limit, dist_cache, &model, &queue, nodes);
|
||||||
|
skip--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
CleanupZopfliCostModel(m, &model);
|
||||||
|
return ComputeShortestPathFromNodes(num_bytes, nodes);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BrotliCreateZopfliBackwardReferences(
|
||||||
|
MemoryManager* m, const BrotliDictionary* dictionary, size_t num_bytes,
|
||||||
|
size_t position, const uint8_t* ringbuffer, size_t ringbuffer_mask,
|
||||||
|
const BrotliEncoderParams* params, HasherHandle hasher, int* dist_cache,
|
||||||
|
size_t* last_insert_len, Command* commands, size_t* num_commands,
|
||||||
|
size_t* num_literals) {
|
||||||
|
const size_t max_backward_limit = BROTLI_MAX_BACKWARD_LIMIT(params->lgwin);
|
||||||
|
ZopfliNode* nodes;
|
||||||
|
nodes = BROTLI_ALLOC(m, ZopfliNode, num_bytes + 1);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
BrotliInitZopfliNodes(nodes, num_bytes + 1);
|
||||||
|
*num_commands += BrotliZopfliComputeShortestPath(m, dictionary, num_bytes,
|
||||||
|
position, ringbuffer, ringbuffer_mask, params, max_backward_limit,
|
||||||
|
dist_cache, hasher, nodes);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
BrotliZopfliCreateCommands(num_bytes, position, max_backward_limit, nodes,
|
||||||
|
dist_cache, last_insert_len, commands, num_literals);
|
||||||
|
BROTLI_FREE(m, nodes);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BrotliCreateHqZopfliBackwardReferences(
|
||||||
|
MemoryManager* m, const BrotliDictionary* dictionary, size_t num_bytes,
|
||||||
|
size_t position, const uint8_t* ringbuffer, size_t ringbuffer_mask,
|
||||||
|
const BrotliEncoderParams* params, HasherHandle hasher, int* dist_cache,
|
||||||
|
size_t* last_insert_len, Command* commands, size_t* num_commands,
|
||||||
|
size_t* num_literals) {
|
||||||
|
const size_t max_backward_limit = BROTLI_MAX_BACKWARD_LIMIT(params->lgwin);
|
||||||
|
uint32_t* num_matches = BROTLI_ALLOC(m, uint32_t, num_bytes);
|
||||||
|
size_t matches_size = 4 * num_bytes;
|
||||||
|
const size_t store_end = num_bytes >= StoreLookaheadH10() ?
|
||||||
|
position + num_bytes - StoreLookaheadH10() + 1 : position;
|
||||||
|
size_t cur_match_pos = 0;
|
||||||
|
size_t i;
|
||||||
|
size_t orig_num_literals;
|
||||||
|
size_t orig_last_insert_len;
|
||||||
|
int orig_dist_cache[4];
|
||||||
|
size_t orig_num_commands;
|
||||||
|
ZopfliCostModel model;
|
||||||
|
ZopfliNode* nodes;
|
||||||
|
BackwardMatch* matches = BROTLI_ALLOC(m, BackwardMatch, matches_size);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
for (i = 0; i + HashTypeLengthH10() - 1 < num_bytes; ++i) {
|
||||||
|
const size_t pos = position + i;
|
||||||
|
size_t max_distance = BROTLI_MIN(size_t, pos, max_backward_limit);
|
||||||
|
size_t max_length = num_bytes - i;
|
||||||
|
size_t num_found_matches;
|
||||||
|
size_t cur_match_end;
|
||||||
|
size_t j;
|
||||||
|
/* Ensure that we have enough free slots. */
|
||||||
|
BROTLI_ENSURE_CAPACITY(m, BackwardMatch, matches, matches_size,
|
||||||
|
cur_match_pos + MAX_NUM_MATCHES_H10);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
num_found_matches = FindAllMatchesH10(hasher, dictionary, ringbuffer,
|
||||||
|
ringbuffer_mask, pos, max_length, max_distance, params,
|
||||||
|
&matches[cur_match_pos]);
|
||||||
|
cur_match_end = cur_match_pos + num_found_matches;
|
||||||
|
for (j = cur_match_pos; j + 1 < cur_match_end; ++j) {
|
||||||
|
assert(BackwardMatchLength(&matches[j]) <
|
||||||
|
BackwardMatchLength(&matches[j + 1]));
|
||||||
|
assert(matches[j].distance > max_distance ||
|
||||||
|
matches[j].distance <= matches[j + 1].distance);
|
||||||
|
}
|
||||||
|
num_matches[i] = (uint32_t)num_found_matches;
|
||||||
|
if (num_found_matches > 0) {
|
||||||
|
const size_t match_len = BackwardMatchLength(&matches[cur_match_end - 1]);
|
||||||
|
if (match_len > MAX_ZOPFLI_LEN_QUALITY_11) {
|
||||||
|
const size_t skip = match_len - 1;
|
||||||
|
matches[cur_match_pos++] = matches[cur_match_end - 1];
|
||||||
|
num_matches[i] = 1;
|
||||||
|
/* Add the tail of the copy to the hasher. */
|
||||||
|
StoreRangeH10(hasher, ringbuffer, ringbuffer_mask, pos + 1,
|
||||||
|
BROTLI_MIN(size_t, pos + match_len, store_end));
|
||||||
|
memset(&num_matches[i + 1], 0, skip * sizeof(num_matches[0]));
|
||||||
|
i += skip;
|
||||||
|
} else {
|
||||||
|
cur_match_pos = cur_match_end;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
orig_num_literals = *num_literals;
|
||||||
|
orig_last_insert_len = *last_insert_len;
|
||||||
|
memcpy(orig_dist_cache, dist_cache, 4 * sizeof(dist_cache[0]));
|
||||||
|
orig_num_commands = *num_commands;
|
||||||
|
nodes = BROTLI_ALLOC(m, ZopfliNode, num_bytes + 1);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
InitZopfliCostModel(m, &model, num_bytes);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
for (i = 0; i < 2; i++) {
|
||||||
|
BrotliInitZopfliNodes(nodes, num_bytes + 1);
|
||||||
|
if (i == 0) {
|
||||||
|
ZopfliCostModelSetFromLiteralCosts(
|
||||||
|
&model, position, ringbuffer, ringbuffer_mask);
|
||||||
|
} else {
|
||||||
|
ZopfliCostModelSetFromCommands(&model, position, ringbuffer,
|
||||||
|
ringbuffer_mask, commands, *num_commands - orig_num_commands,
|
||||||
|
orig_last_insert_len);
|
||||||
|
}
|
||||||
|
*num_commands = orig_num_commands;
|
||||||
|
*num_literals = orig_num_literals;
|
||||||
|
*last_insert_len = orig_last_insert_len;
|
||||||
|
memcpy(dist_cache, orig_dist_cache, 4 * sizeof(dist_cache[0]));
|
||||||
|
*num_commands += ZopfliIterate(num_bytes, position, ringbuffer,
|
||||||
|
ringbuffer_mask, params, max_backward_limit, dist_cache,
|
||||||
|
&model, num_matches, matches, nodes);
|
||||||
|
BrotliZopfliCreateCommands(num_bytes, position, max_backward_limit,
|
||||||
|
nodes, dist_cache, last_insert_len, commands, num_literals);
|
||||||
|
}
|
||||||
|
CleanupZopfliCostModel(m, &model);
|
||||||
|
BROTLI_FREE(m, nodes);
|
||||||
|
BROTLI_FREE(m, matches);
|
||||||
|
BROTLI_FREE(m, num_matches);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
} /* extern "C" */
|
||||||
|
#endif
|
|
@ -0,0 +1,35 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Functions to estimate the bit cost of Huffman trees. */
|
||||||
|
|
||||||
|
#include "./enc/bit_cost.h"
|
||||||
|
|
||||||
|
#include "./common/constants.h"
|
||||||
|
#include <brotli/types.h>
|
||||||
|
#include "./enc/fast_log.h"
|
||||||
|
#include "./enc/histogram.h"
|
||||||
|
#include "./enc/port.h"
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define FN(X) X ## Literal
|
||||||
|
#include "./enc/bit_cost_inc.h" /* NOLINT(build/include) */
|
||||||
|
#undef FN
|
||||||
|
|
||||||
|
#define FN(X) X ## Command
|
||||||
|
#include "./enc/bit_cost_inc.h" /* NOLINT(build/include) */
|
||||||
|
#undef FN
|
||||||
|
|
||||||
|
#define FN(X) X ## Distance
|
||||||
|
#include "./enc/bit_cost_inc.h" /* NOLINT(build/include) */
|
||||||
|
#undef FN
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
} /* extern "C" */
|
||||||
|
#endif
|
|
@ -0,0 +1,48 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Bit reading helpers */
|
||||||
|
|
||||||
|
#include "./dec/bit_reader.h"
|
||||||
|
|
||||||
|
#include <brotli/types.h>
|
||||||
|
#include "./dec/port.h"
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void BrotliInitBitReader(BrotliBitReader* const br) {
|
||||||
|
br->val_ = 0;
|
||||||
|
br->bit_pos_ = sizeof(br->val_) << 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
BROTLI_BOOL BrotliWarmupBitReader(BrotliBitReader* const br) {
|
||||||
|
size_t aligned_read_mask = (sizeof(br->val_) >> 1) - 1;
|
||||||
|
/* Fixing alignment after unaligned BrotliFillWindow would result accumulator
|
||||||
|
overflow. If unalignment is caused by BrotliSafeReadBits, then there is
|
||||||
|
enough space in accumulator to fix alignment. */
|
||||||
|
if (!BROTLI_ALIGNED_READ) {
|
||||||
|
aligned_read_mask = 0;
|
||||||
|
}
|
||||||
|
if (BrotliGetAvailableBits(br) == 0) {
|
||||||
|
if (!BrotliPullByte(br)) {
|
||||||
|
return BROTLI_FALSE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
while ((((size_t)br->next_in) & aligned_read_mask) != 0) {
|
||||||
|
if (!BrotliPullByte(br)) {
|
||||||
|
/* If we consumed all the input, we don't care about the alignment. */
|
||||||
|
return BROTLI_TRUE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return BROTLI_TRUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
} /* extern "C" */
|
||||||
|
#endif
|
|
@ -0,0 +1,197 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Block split point selection utilities. */
|
||||||
|
|
||||||
|
#include "./enc/block_splitter.h"
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
#include <string.h> /* memcpy, memset */
|
||||||
|
|
||||||
|
#include "./enc/bit_cost.h"
|
||||||
|
#include "./enc/cluster.h"
|
||||||
|
#include "./enc/command.h"
|
||||||
|
#include "./enc/fast_log.h"
|
||||||
|
#include "./enc/histogram.h"
|
||||||
|
#include "./enc/memory.h"
|
||||||
|
#include "./enc/port.h"
|
||||||
|
#include "./enc/quality.h"
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static const size_t kMaxLiteralHistograms = 100;
|
||||||
|
static const size_t kMaxCommandHistograms = 50;
|
||||||
|
static const double kLiteralBlockSwitchCost = 28.1;
|
||||||
|
static const double kCommandBlockSwitchCost = 13.5;
|
||||||
|
static const double kDistanceBlockSwitchCost = 14.6;
|
||||||
|
static const size_t kLiteralStrideLength = 70;
|
||||||
|
static const size_t kCommandStrideLength = 40;
|
||||||
|
static const size_t kSymbolsPerLiteralHistogram = 544;
|
||||||
|
static const size_t kSymbolsPerCommandHistogram = 530;
|
||||||
|
static const size_t kSymbolsPerDistanceHistogram = 544;
|
||||||
|
static const size_t kMinLengthForBlockSplitting = 128;
|
||||||
|
static const size_t kIterMulForRefining = 2;
|
||||||
|
static const size_t kMinItersForRefining = 100;
|
||||||
|
|
||||||
|
static size_t CountLiterals(const Command* cmds, const size_t num_commands) {
|
||||||
|
/* Count how many we have. */
|
||||||
|
size_t total_length = 0;
|
||||||
|
size_t i;
|
||||||
|
for (i = 0; i < num_commands; ++i) {
|
||||||
|
total_length += cmds[i].insert_len_;
|
||||||
|
}
|
||||||
|
return total_length;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void CopyLiteralsToByteArray(const Command* cmds,
|
||||||
|
const size_t num_commands,
|
||||||
|
const uint8_t* data,
|
||||||
|
const size_t offset,
|
||||||
|
const size_t mask,
|
||||||
|
uint8_t* literals) {
|
||||||
|
size_t pos = 0;
|
||||||
|
size_t from_pos = offset & mask;
|
||||||
|
size_t i;
|
||||||
|
for (i = 0; i < num_commands; ++i) {
|
||||||
|
size_t insert_len = cmds[i].insert_len_;
|
||||||
|
if (from_pos + insert_len > mask) {
|
||||||
|
size_t head_size = mask + 1 - from_pos;
|
||||||
|
memcpy(literals + pos, data + from_pos, head_size);
|
||||||
|
from_pos = 0;
|
||||||
|
pos += head_size;
|
||||||
|
insert_len -= head_size;
|
||||||
|
}
|
||||||
|
if (insert_len > 0) {
|
||||||
|
memcpy(literals + pos, data + from_pos, insert_len);
|
||||||
|
pos += insert_len;
|
||||||
|
}
|
||||||
|
from_pos = (from_pos + insert_len + CommandCopyLen(&cmds[i])) & mask;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE unsigned int MyRand(unsigned int* seed) {
|
||||||
|
*seed *= 16807U;
|
||||||
|
if (*seed == 0) {
|
||||||
|
*seed = 1;
|
||||||
|
}
|
||||||
|
return *seed;
|
||||||
|
}
|
||||||
|
|
||||||
|
static BROTLI_INLINE double BitCost(size_t count) {
|
||||||
|
return count == 0 ? -2.0 : FastLog2(count);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define HISTOGRAMS_PER_BATCH 64
|
||||||
|
#define CLUSTERS_PER_BATCH 16
|
||||||
|
|
||||||
|
#define FN(X) X ## Literal
|
||||||
|
#define DataType uint8_t
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/block_splitter_inc.h"
|
||||||
|
#undef DataType
|
||||||
|
#undef FN
|
||||||
|
|
||||||
|
#define FN(X) X ## Command
|
||||||
|
#define DataType uint16_t
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/block_splitter_inc.h"
|
||||||
|
#undef FN
|
||||||
|
|
||||||
|
#define FN(X) X ## Distance
|
||||||
|
/* NOLINTNEXTLINE(build/include) */
|
||||||
|
#include "./enc/block_splitter_inc.h"
|
||||||
|
#undef DataType
|
||||||
|
#undef FN
|
||||||
|
|
||||||
|
void BrotliInitBlockSplit(BlockSplit* self) {
|
||||||
|
self->num_types = 0;
|
||||||
|
self->num_blocks = 0;
|
||||||
|
self->types = 0;
|
||||||
|
self->lengths = 0;
|
||||||
|
self->types_alloc_size = 0;
|
||||||
|
self->lengths_alloc_size = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BrotliDestroyBlockSplit(MemoryManager* m, BlockSplit* self) {
|
||||||
|
BROTLI_FREE(m, self->types);
|
||||||
|
BROTLI_FREE(m, self->lengths);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BrotliSplitBlock(MemoryManager* m,
|
||||||
|
const Command* cmds,
|
||||||
|
const size_t num_commands,
|
||||||
|
const uint8_t* data,
|
||||||
|
const size_t pos,
|
||||||
|
const size_t mask,
|
||||||
|
const BrotliEncoderParams* params,
|
||||||
|
BlockSplit* literal_split,
|
||||||
|
BlockSplit* insert_and_copy_split,
|
||||||
|
BlockSplit* dist_split) {
|
||||||
|
{
|
||||||
|
size_t literals_count = CountLiterals(cmds, num_commands);
|
||||||
|
uint8_t* literals = BROTLI_ALLOC(m, uint8_t, literals_count);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
/* Create a continuous array of literals. */
|
||||||
|
CopyLiteralsToByteArray(cmds, num_commands, data, pos, mask, literals);
|
||||||
|
/* Create the block split on the array of literals.
|
||||||
|
Literal histograms have alphabet size 256. */
|
||||||
|
SplitByteVectorLiteral(
|
||||||
|
m, literals, literals_count,
|
||||||
|
kSymbolsPerLiteralHistogram, kMaxLiteralHistograms,
|
||||||
|
kLiteralStrideLength, kLiteralBlockSwitchCost, params,
|
||||||
|
literal_split);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
BROTLI_FREE(m, literals);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
/* Compute prefix codes for commands. */
|
||||||
|
uint16_t* insert_and_copy_codes = BROTLI_ALLOC(m, uint16_t, num_commands);
|
||||||
|
size_t i;
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
for (i = 0; i < num_commands; ++i) {
|
||||||
|
insert_and_copy_codes[i] = cmds[i].cmd_prefix_;
|
||||||
|
}
|
||||||
|
/* Create the block split on the array of command prefixes. */
|
||||||
|
SplitByteVectorCommand(
|
||||||
|
m, insert_and_copy_codes, num_commands,
|
||||||
|
kSymbolsPerCommandHistogram, kMaxCommandHistograms,
|
||||||
|
kCommandStrideLength, kCommandBlockSwitchCost, params,
|
||||||
|
insert_and_copy_split);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
/* TODO: reuse for distances? */
|
||||||
|
BROTLI_FREE(m, insert_and_copy_codes);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
/* Create a continuous array of distance prefixes. */
|
||||||
|
uint16_t* distance_prefixes = BROTLI_ALLOC(m, uint16_t, num_commands);
|
||||||
|
size_t j = 0;
|
||||||
|
size_t i;
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
for (i = 0; i < num_commands; ++i) {
|
||||||
|
const Command* cmd = &cmds[i];
|
||||||
|
if (CommandCopyLen(cmd) && cmd->cmd_prefix_ >= 128) {
|
||||||
|
distance_prefixes[j++] = cmd->dist_prefix_;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Create the block split on the array of distance prefixes. */
|
||||||
|
SplitByteVectorDistance(
|
||||||
|
m, distance_prefixes, j,
|
||||||
|
kSymbolsPerDistanceHistogram, kMaxCommandHistograms,
|
||||||
|
kCommandStrideLength, kDistanceBlockSwitchCost, params,
|
||||||
|
dist_split);
|
||||||
|
if (BROTLI_IS_OOM(m)) return;
|
||||||
|
BROTLI_FREE(m, distance_prefixes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
} /* extern "C" */
|
||||||
|
#endif
|
|
@ -0,0 +1,362 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file
|
||||||
|
* API for Brotli decompression.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef BROTLI_DEC_DECODE_H_
|
||||||
|
#define BROTLI_DEC_DECODE_H_
|
||||||
|
|
||||||
|
#include <brotli/port.h>
|
||||||
|
#include <brotli/types.h>
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opaque structure that holds decoder state.
|
||||||
|
*
|
||||||
|
* Allocated and initialized with ::BrotliDecoderCreateInstance.
|
||||||
|
* Cleaned up and deallocated with ::BrotliDecoderDestroyInstance.
|
||||||
|
*/
|
||||||
|
typedef struct BrotliDecoderStateStruct BrotliDecoderState;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Result type for ::BrotliDecoderDecompress and
|
||||||
|
* ::BrotliDecoderDecompressStream functions.
|
||||||
|
*/
|
||||||
|
typedef enum {
|
||||||
|
/** Decoding error, e.g. corrupted input or memory allocation problem. */
|
||||||
|
BROTLI_DECODER_RESULT_ERROR = 0,
|
||||||
|
/** Decoding successfully completed */
|
||||||
|
BROTLI_DECODER_RESULT_SUCCESS = 1,
|
||||||
|
/** Partially done; should be called again with more input */
|
||||||
|
BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT = 2,
|
||||||
|
/** Partially done; should be called again with more output */
|
||||||
|
BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT = 3
|
||||||
|
} BrotliDecoderResult;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Template that evaluates items of ::BrotliDecoderErrorCode.
|
||||||
|
*
|
||||||
|
* Example: @code {.cpp}
|
||||||
|
* // Log Brotli error code.
|
||||||
|
* switch (brotliDecoderErrorCode) {
|
||||||
|
* #define CASE_(PREFIX, NAME, CODE) \
|
||||||
|
* case BROTLI_DECODER ## PREFIX ## NAME: \
|
||||||
|
* LOG(INFO) << "error code:" << #NAME; \
|
||||||
|
* break;
|
||||||
|
* #define NEWLINE_
|
||||||
|
* BROTLI_DECODER_ERROR_CODES_LIST(CASE_, NEWLINE_)
|
||||||
|
* #undef CASE_
|
||||||
|
* #undef NEWLINE_
|
||||||
|
* default: LOG(FATAL) << "unknown brotli error code";
|
||||||
|
* }
|
||||||
|
* @endcode
|
||||||
|
*/
|
||||||
|
#define BROTLI_DECODER_ERROR_CODES_LIST(BROTLI_ERROR_CODE, SEPARATOR) \
|
||||||
|
BROTLI_ERROR_CODE(_, NO_ERROR, 0) SEPARATOR \
|
||||||
|
/* Same as BrotliDecoderResult values */ \
|
||||||
|
BROTLI_ERROR_CODE(_, SUCCESS, 1) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_, NEEDS_MORE_INPUT, 2) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_, NEEDS_MORE_OUTPUT, 3) SEPARATOR \
|
||||||
|
\
|
||||||
|
/* Errors caused by invalid input */ \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, EXUBERANT_NIBBLE, -1) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, RESERVED, -2) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, EXUBERANT_META_NIBBLE, -3) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, SIMPLE_HUFFMAN_ALPHABET, -4) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, SIMPLE_HUFFMAN_SAME, -5) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, CL_SPACE, -6) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, HUFFMAN_SPACE, -7) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, CONTEXT_MAP_REPEAT, -8) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, BLOCK_LENGTH_1, -9) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, BLOCK_LENGTH_2, -10) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, TRANSFORM, -11) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, DICTIONARY, -12) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, WINDOW_BITS, -13) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, PADDING_1, -14) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_FORMAT_, PADDING_2, -15) SEPARATOR \
|
||||||
|
\
|
||||||
|
/* -16..-18 codes are reserved */ \
|
||||||
|
\
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_, DICTIONARY_NOT_SET, -19) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_, INVALID_ARGUMENTS, -20) SEPARATOR \
|
||||||
|
\
|
||||||
|
/* Memory allocation problems */ \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_ALLOC_, CONTEXT_MODES, -21) SEPARATOR \
|
||||||
|
/* Literal, insert and distance trees together */ \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_ALLOC_, TREE_GROUPS, -22) SEPARATOR \
|
||||||
|
/* -23..-24 codes are reserved for distinct tree groups */ \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_ALLOC_, CONTEXT_MAP, -25) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_ALLOC_, RING_BUFFER_1, -26) SEPARATOR \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_ALLOC_, RING_BUFFER_2, -27) SEPARATOR \
|
||||||
|
/* -28..-29 codes are reserved for dynamic ring-buffer allocation */ \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_ALLOC_, BLOCK_TYPE_TREES, -30) SEPARATOR \
|
||||||
|
\
|
||||||
|
/* "Impossible" states */ \
|
||||||
|
BROTLI_ERROR_CODE(_ERROR_, UNREACHABLE, -31)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Error code for detailed logging / production debugging.
|
||||||
|
*
|
||||||
|
* See ::BrotliDecoderGetErrorCode and ::BROTLI_LAST_ERROR_CODE.
|
||||||
|
*/
|
||||||
|
typedef enum {
|
||||||
|
#define BROTLI_COMMA_ ,
|
||||||
|
#define BROTLI_ERROR_CODE_ENUM_ITEM_(PREFIX, NAME, CODE) \
|
||||||
|
BROTLI_DECODER ## PREFIX ## NAME = CODE
|
||||||
|
BROTLI_DECODER_ERROR_CODES_LIST(BROTLI_ERROR_CODE_ENUM_ITEM_, BROTLI_COMMA_)
|
||||||
|
} BrotliDecoderErrorCode;
|
||||||
|
#undef BROTLI_ERROR_CODE_ENUM_ITEM_
|
||||||
|
#undef BROTLI_COMMA_
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The value of the last error code, negative integer.
|
||||||
|
*
|
||||||
|
* All other error code values are in the range from ::BROTLI_LAST_ERROR_CODE
|
||||||
|
* to @c -1. There are also 4 other possible non-error codes @c 0 .. @c 3 in
|
||||||
|
* ::BrotliDecoderErrorCode enumeration.
|
||||||
|
*/
|
||||||
|
#define BROTLI_LAST_ERROR_CODE BROTLI_DECODER_ERROR_UNREACHABLE
|
||||||
|
|
||||||
|
/** Options to be used with ::BrotliDecoderSetParameter. */
|
||||||
|
typedef enum BrotliDecoderParameter {
|
||||||
|
/**
|
||||||
|
* Disable "canny" ring buffer allocation strategy.
|
||||||
|
*
|
||||||
|
* Ring buffer is allocated according to window size, despite the real size of
|
||||||
|
* the content.
|
||||||
|
*/
|
||||||
|
BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION = 0
|
||||||
|
} BrotliDecoderParameter;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the specified parameter to the given decoder instance.
|
||||||
|
*
|
||||||
|
* @param state decoder instance
|
||||||
|
* @param param parameter to set
|
||||||
|
* @param value new parameter value
|
||||||
|
* @returns ::BROTLI_FALSE if parameter is unrecognized, or value is invalid
|
||||||
|
* @returns ::BROTLI_TRUE if value is accepted
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API BROTLI_BOOL BrotliDecoderSetParameter(
|
||||||
|
BrotliDecoderState* state, BrotliDecoderParameter param, uint32_t value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an instance of ::BrotliDecoderState and initializes it.
|
||||||
|
*
|
||||||
|
* The instance can be used once for decoding and should then be destroyed with
|
||||||
|
* ::BrotliDecoderDestroyInstance, it cannot be reused for a new decoding
|
||||||
|
* session.
|
||||||
|
*
|
||||||
|
* @p alloc_func and @p free_func @b MUST be both zero or both non-zero. In the
|
||||||
|
* case they are both zero, default memory allocators are used. @p opaque is
|
||||||
|
* passed to @p alloc_func and @p free_func when they are called.
|
||||||
|
*
|
||||||
|
* @param alloc_func custom memory allocation function
|
||||||
|
* @param free_func custom memory fee function
|
||||||
|
* @param opaque custom memory manager handle
|
||||||
|
* @returns @c 0 if instance can not be allocated or initialized
|
||||||
|
* @returns pointer to initialized ::BrotliDecoderState otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API BrotliDecoderState* BrotliDecoderCreateInstance(
|
||||||
|
brotli_alloc_func alloc_func, brotli_free_func free_func, void* opaque);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deinitializes and frees ::BrotliDecoderState instance.
|
||||||
|
*
|
||||||
|
* @param state decoder instance to be cleaned up and deallocated
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API void BrotliDecoderDestroyInstance(BrotliDecoderState* state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs one-shot memory-to-memory decompression.
|
||||||
|
*
|
||||||
|
* Decompresses the data in @p encoded_buffer into @p decoded_buffer, and sets
|
||||||
|
* @p *decoded_size to the decompressed length.
|
||||||
|
*
|
||||||
|
* @param encoded_size size of @p encoded_buffer
|
||||||
|
* @param encoded_buffer compressed data buffer with at least @p encoded_size
|
||||||
|
* addressable bytes
|
||||||
|
* @param[in, out] decoded_size @b in: size of @p decoded_buffer; \n
|
||||||
|
* @b out: length of decompressed data written to
|
||||||
|
* @p decoded_buffer
|
||||||
|
* @param decoded_buffer decompressed data destination buffer
|
||||||
|
* @returns ::BROTLI_DECODER_RESULT_ERROR if input is corrupted, memory
|
||||||
|
* allocation failed, or @p decoded_buffer is not large enough;
|
||||||
|
* @returns ::BROTLI_DECODER_RESULT_SUCCESS otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API BrotliDecoderResult BrotliDecoderDecompress(
|
||||||
|
size_t encoded_size,
|
||||||
|
const uint8_t encoded_buffer[BROTLI_ARRAY_PARAM(encoded_size)],
|
||||||
|
size_t* decoded_size,
|
||||||
|
uint8_t decoded_buffer[BROTLI_ARRAY_PARAM(*decoded_size)]);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decompresses the input stream to the output stream.
|
||||||
|
*
|
||||||
|
* The values @p *available_in and @p *available_out must specify the number of
|
||||||
|
* bytes addressable at @p *next_in and @p *next_out respectively.
|
||||||
|
* When @p *available_out is @c 0, @p next_out is allowed to be @c NULL.
|
||||||
|
*
|
||||||
|
* After each call, @p *available_in will be decremented by the amount of input
|
||||||
|
* bytes consumed, and the @p *next_in pointer will be incremented by that
|
||||||
|
* amount. Similarly, @p *available_out will be decremented by the amount of
|
||||||
|
* output bytes written, and the @p *next_out pointer will be incremented by
|
||||||
|
* that amount.
|
||||||
|
*
|
||||||
|
* @p total_out, if it is not a null-pointer, will be set to the number
|
||||||
|
* of bytes decompressed since the last @p state initialization.
|
||||||
|
*
|
||||||
|
* @note Input is never overconsumed, so @p next_in and @p available_in could be
|
||||||
|
* passed to the next consumer after decoding is complete.
|
||||||
|
*
|
||||||
|
* @param state decoder instance
|
||||||
|
* @param[in, out] available_in @b in: amount of available input; \n
|
||||||
|
* @b out: amount of unused input
|
||||||
|
* @param[in, out] next_in pointer to the next compressed byte
|
||||||
|
* @param[in, out] available_out @b in: length of output buffer; \n
|
||||||
|
* @b out: remaining size of output buffer
|
||||||
|
* @param[in, out] next_out output buffer cursor;
|
||||||
|
* can be @c NULL if @p available_out is @c 0
|
||||||
|
* @param[out] total_out number of bytes decompressed so far; can be @c NULL
|
||||||
|
* @returns ::BROTLI_DECODER_RESULT_ERROR if input is corrupted, memory
|
||||||
|
* allocation failed, arguments were invalid, etc.;
|
||||||
|
* use ::BrotliDecoderGetErrorCode to get detailed error code
|
||||||
|
* @returns ::BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT decoding is blocked until
|
||||||
|
* more input data is provided
|
||||||
|
* @returns ::BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT decoding is blocked until
|
||||||
|
* more output space is provided
|
||||||
|
* @returns ::BROTLI_DECODER_RESULT_SUCCESS decoding is finished, no more
|
||||||
|
* input might be consumed and no more output will be produced
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API BrotliDecoderResult BrotliDecoderDecompressStream(
|
||||||
|
BrotliDecoderState* state, size_t* available_in, const uint8_t** next_in,
|
||||||
|
size_t* available_out, uint8_t** next_out, size_t* total_out);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepends LZ77 dictionary.
|
||||||
|
*
|
||||||
|
* Fills the fresh ::BrotliDecoderState with additional data corpus for LZ77
|
||||||
|
* backward references.
|
||||||
|
*
|
||||||
|
* @note Not to be confused with the static dictionary (see RFC7932 section 8).
|
||||||
|
* @warning The dictionary must exist in memory until decoding is done and
|
||||||
|
* is owned by the caller.
|
||||||
|
*
|
||||||
|
* Workflow:
|
||||||
|
* -# Allocate and initialize state with ::BrotliDecoderCreateInstance
|
||||||
|
* -# Invoke ::BrotliDecoderSetCustomDictionary
|
||||||
|
* -# Use ::BrotliDecoderDecompressStream
|
||||||
|
* -# Clean up and free state with ::BrotliDecoderDestroyInstance
|
||||||
|
*
|
||||||
|
* @param state decoder instance
|
||||||
|
* @param size length of @p dict; should be less or equal to 2^24 (16MiB),
|
||||||
|
* otherwise the dictionary will be ignored
|
||||||
|
* @param dict "dictionary"; @b MUST be the same as used during compression
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API void BrotliDecoderSetCustomDictionary(
|
||||||
|
BrotliDecoderState* state, size_t size,
|
||||||
|
const uint8_t dict[BROTLI_ARRAY_PARAM(size)]);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if decoder has more output.
|
||||||
|
*
|
||||||
|
* @param state decoder instance
|
||||||
|
* @returns ::BROTLI_TRUE, if decoder has some unconsumed output
|
||||||
|
* @returns ::BROTLI_FALSE otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API BROTLI_BOOL BrotliDecoderHasMoreOutput(
|
||||||
|
const BrotliDecoderState* state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Acquires pointer to internal output buffer.
|
||||||
|
*
|
||||||
|
* This method is used to make language bindings easier and more efficient:
|
||||||
|
* -# push data to ::BrotliDecoderDecompressStream,
|
||||||
|
* until ::BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT is reported
|
||||||
|
* -# use ::BrotliDecoderTakeOutput to peek bytes and copy to language-specific
|
||||||
|
* entity
|
||||||
|
*
|
||||||
|
* Also this could be useful if there is an output stream that is able to
|
||||||
|
* consume all the provided data (e.g. when data is saved to file system).
|
||||||
|
*
|
||||||
|
* @attention After every call to ::BrotliDecoderTakeOutput @p *size bytes of
|
||||||
|
* output are considered consumed for all consecutive calls to the
|
||||||
|
* instance methods; returned pointer becomes invalidated as well.
|
||||||
|
*
|
||||||
|
* @note Decoder output is not guaranteed to be contiguous. This means that
|
||||||
|
* after the size-unrestricted call to ::BrotliDecoderTakeOutput,
|
||||||
|
* immediate next call to ::BrotliDecoderTakeOutput may return more data.
|
||||||
|
*
|
||||||
|
* @param state decoder instance
|
||||||
|
* @param[in, out] size @b in: number of bytes caller is ready to take, @c 0 if
|
||||||
|
* any amount could be handled; \n
|
||||||
|
* @b out: amount of data pointed by returned pointer and
|
||||||
|
* considered consumed; \n
|
||||||
|
* out value is never greater than in value, unless it is @c 0
|
||||||
|
* @returns pointer to output data
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API const uint8_t* BrotliDecoderTakeOutput(
|
||||||
|
BrotliDecoderState* state, size_t* size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if instance has already consumed input.
|
||||||
|
*
|
||||||
|
* Instance that returns ::BROTLI_FALSE is considered "fresh" and could be
|
||||||
|
* reused.
|
||||||
|
*
|
||||||
|
* @param state decoder instance
|
||||||
|
* @returns ::BROTLI_TRUE if decoder has already used some input bytes
|
||||||
|
* @returns ::BROTLI_FALSE otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API BROTLI_BOOL BrotliDecoderIsUsed(const BrotliDecoderState* state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if decoder instance reached the final state.
|
||||||
|
*
|
||||||
|
* @param state decoder instance
|
||||||
|
* @returns ::BROTLI_TRUE if decoder is in a state where it reached the end of
|
||||||
|
* the input and produced all of the output
|
||||||
|
* @returns ::BROTLI_FALSE otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API BROTLI_BOOL BrotliDecoderIsFinished(const BrotliDecoderState* state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Acquires a detailed error code.
|
||||||
|
*
|
||||||
|
* Should be used only after ::BrotliDecoderDecompressStream returns
|
||||||
|
* ::BROTLI_DECODER_RESULT_ERROR.
|
||||||
|
*
|
||||||
|
* See also ::BrotliDecoderErrorString
|
||||||
|
*
|
||||||
|
* @param state decoder instance
|
||||||
|
* @returns last saved error code
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API BrotliDecoderErrorCode BrotliDecoderGetErrorCode(
|
||||||
|
const BrotliDecoderState* state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts error code to a c-string.
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API const char* BrotliDecoderErrorString(BrotliDecoderErrorCode c);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a decoder library version.
|
||||||
|
*
|
||||||
|
* Look at BROTLI_VERSION for more information.
|
||||||
|
*/
|
||||||
|
BROTLI_DEC_API uint32_t BrotliDecoderVersion(void);
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
} /* extern "C" */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* BROTLI_DEC_DECODE_H_ */
|
|
@ -0,0 +1,421 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file
|
||||||
|
* API for Brotli compression.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef BROTLI_ENC_ENCODE_H_
|
||||||
|
#define BROTLI_ENC_ENCODE_H_
|
||||||
|
|
||||||
|
#include <brotli/port.h>
|
||||||
|
#include <brotli/types.h>
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/** Minimal value for ::BROTLI_PARAM_LGWIN parameter. */
|
||||||
|
#define BROTLI_MIN_WINDOW_BITS 10
|
||||||
|
/**
|
||||||
|
* Maximal value for ::BROTLI_PARAM_LGWIN parameter.
|
||||||
|
*
|
||||||
|
* @note equal to @c BROTLI_MAX_DISTANCE_BITS constant.
|
||||||
|
*/
|
||||||
|
#define BROTLI_MAX_WINDOW_BITS 24
|
||||||
|
/** Minimal value for ::BROTLI_PARAM_LGBLOCK parameter. */
|
||||||
|
#define BROTLI_MIN_INPUT_BLOCK_BITS 16
|
||||||
|
/** Maximal value for ::BROTLI_PARAM_LGBLOCK parameter. */
|
||||||
|
#define BROTLI_MAX_INPUT_BLOCK_BITS 24
|
||||||
|
/** Minimal value for ::BROTLI_PARAM_QUALITY parameter. */
|
||||||
|
#define BROTLI_MIN_QUALITY 0
|
||||||
|
/** Maximal value for ::BROTLI_PARAM_QUALITY parameter. */
|
||||||
|
#define BROTLI_MAX_QUALITY 11
|
||||||
|
|
||||||
|
/** Options for ::BROTLI_PARAM_MODE parameter. */
|
||||||
|
typedef enum BrotliEncoderMode {
|
||||||
|
/**
|
||||||
|
* Default compression mode.
|
||||||
|
*
|
||||||
|
* In this mode compressor does not know anything in advance about the
|
||||||
|
* properties of the input.
|
||||||
|
*/
|
||||||
|
BROTLI_MODE_GENERIC = 0,
|
||||||
|
/** Compression mode for UTF-8 formatted text input. */
|
||||||
|
BROTLI_MODE_TEXT = 1,
|
||||||
|
/** Compression mode used in WOFF 2.0. */
|
||||||
|
BROTLI_MODE_FONT = 2
|
||||||
|
} BrotliEncoderMode;
|
||||||
|
|
||||||
|
/** Default value for ::BROTLI_PARAM_QUALITY parameter. */
|
||||||
|
#define BROTLI_DEFAULT_QUALITY 11
|
||||||
|
/** Default value for ::BROTLI_PARAM_LGWIN parameter. */
|
||||||
|
#define BROTLI_DEFAULT_WINDOW 22
|
||||||
|
/** Default value for ::BROTLI_PARAM_MODE parameter. */
|
||||||
|
#define BROTLI_DEFAULT_MODE BROTLI_MODE_GENERIC
|
||||||
|
|
||||||
|
/** Operations that can be performed by streaming encoder. */
|
||||||
|
typedef enum BrotliEncoderOperation {
|
||||||
|
/**
|
||||||
|
* Process input.
|
||||||
|
*
|
||||||
|
* Encoder may postpone producing output, until it has processed enough input.
|
||||||
|
*/
|
||||||
|
BROTLI_OPERATION_PROCESS = 0,
|
||||||
|
/**
|
||||||
|
* Produce output for all processed input.
|
||||||
|
*
|
||||||
|
* Actual flush is performed when input stream is depleted and there is enough
|
||||||
|
* space in output stream. This means that client should repeat
|
||||||
|
* ::BROTLI_OPERATION_FLUSH operation until @p available_in becomes @c 0, and
|
||||||
|
* ::BrotliEncoderHasMoreOutput returns ::BROTLI_FALSE.
|
||||||
|
*
|
||||||
|
* @warning Until flush is complete, client @b SHOULD @b NOT swap,
|
||||||
|
* reduce or extend input stream.
|
||||||
|
*
|
||||||
|
* When flush is complete, output data will be sufficient for decoder to
|
||||||
|
* reproduce all the given input.
|
||||||
|
*/
|
||||||
|
BROTLI_OPERATION_FLUSH = 1,
|
||||||
|
/**
|
||||||
|
* Finalize the stream.
|
||||||
|
*
|
||||||
|
* Actual finalization is performed when input stream is depleted and there is
|
||||||
|
* enough space in output stream. This means that client should repeat
|
||||||
|
* ::BROTLI_OPERATION_FLUSH operation until @p available_in becomes @c 0, and
|
||||||
|
* ::BrotliEncoderHasMoreOutput returns ::BROTLI_FALSE.
|
||||||
|
*
|
||||||
|
* @warning Until finalization is complete, client @b SHOULD @b NOT swap,
|
||||||
|
* reduce or extend input stream.
|
||||||
|
*
|
||||||
|
* Helper function ::BrotliEncoderIsFinished checks if stream is finalized and
|
||||||
|
* output fully dumped.
|
||||||
|
*
|
||||||
|
* Adding more input data to finalized stream is impossible.
|
||||||
|
*/
|
||||||
|
BROTLI_OPERATION_FINISH = 2,
|
||||||
|
/**
|
||||||
|
* Emit metadata block to stream.
|
||||||
|
*
|
||||||
|
* Metadata is opaque to Brotli: neither encoder, nor decoder processes this
|
||||||
|
* data or relies on it. It may be used to pass some extra information from
|
||||||
|
* encoder client to decoder client without interfering with main data stream.
|
||||||
|
*
|
||||||
|
* @note Encoder may emit empty metadata blocks internally, to pad encoded
|
||||||
|
* stream to byte boundary.
|
||||||
|
*
|
||||||
|
* @warning Until emitting metadata is complete client @b SHOULD @b NOT swap,
|
||||||
|
* reduce or extend input stream.
|
||||||
|
*
|
||||||
|
* @warning The whole content of input buffer is considered to be the content
|
||||||
|
* of metadata block. Do @b NOT @e append metadata to input stream,
|
||||||
|
* before it is depleted with other operations.
|
||||||
|
*
|
||||||
|
* Stream is soft-flushed before metadata block is emitted. Metadata block
|
||||||
|
* @b MUST be no longer than than 16MiB.
|
||||||
|
*/
|
||||||
|
BROTLI_OPERATION_EMIT_METADATA = 3
|
||||||
|
} BrotliEncoderOperation;
|
||||||
|
|
||||||
|
/** Options to be used with ::BrotliEncoderSetParameter. */
|
||||||
|
typedef enum BrotliEncoderParameter {
|
||||||
|
/**
|
||||||
|
* Tune encoder for specific input.
|
||||||
|
*
|
||||||
|
* ::BrotliEncoderMode enumerates all available values.
|
||||||
|
*/
|
||||||
|
BROTLI_PARAM_MODE = 0,
|
||||||
|
/**
|
||||||
|
* The main compression speed-density lever.
|
||||||
|
*
|
||||||
|
* The higher the quality, the slower the compression. Range is
|
||||||
|
* from ::BROTLI_MIN_QUALITY to ::BROTLI_MAX_QUALITY.
|
||||||
|
*/
|
||||||
|
BROTLI_PARAM_QUALITY = 1,
|
||||||
|
/**
|
||||||
|
* Recommended sliding LZ77 window size.
|
||||||
|
*
|
||||||
|
* Encoder may reduce this value, e.g. if input is much smaller than
|
||||||
|
* window size.
|
||||||
|
*
|
||||||
|
* Window size is `(1 << value) - 16`.
|
||||||
|
*
|
||||||
|
* Range is from ::BROTLI_MIN_WINDOW_BITS to ::BROTLI_MAX_WINDOW_BITS.
|
||||||
|
*/
|
||||||
|
BROTLI_PARAM_LGWIN = 2,
|
||||||
|
/**
|
||||||
|
* Recommended input block size.
|
||||||
|
*
|
||||||
|
* Encoder may reduce this value, e.g. if input is much smaller than input
|
||||||
|
* block size.
|
||||||
|
*
|
||||||
|
* Range is from ::BROTLI_MIN_INPUT_BLOCK_BITS to
|
||||||
|
* ::BROTLI_MAX_INPUT_BLOCK_BITS.
|
||||||
|
*
|
||||||
|
* @note Bigger input block size allows better compression, but consumes more
|
||||||
|
* memory. \n The rough formula of memory used for temporary input
|
||||||
|
* storage is `3 << lgBlock`.
|
||||||
|
*/
|
||||||
|
BROTLI_PARAM_LGBLOCK = 3,
|
||||||
|
/**
|
||||||
|
* Flag that affects usage of "literal context modeling" format feature.
|
||||||
|
*
|
||||||
|
* This flag is a "decoding-speed vs compression ratio" trade-off.
|
||||||
|
*/
|
||||||
|
BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING = 4,
|
||||||
|
/**
|
||||||
|
* Estimated total input size for all ::BrotliEncoderCompressStream calls.
|
||||||
|
*
|
||||||
|
* The default value is 0, which means that the total input size is unknown.
|
||||||
|
*/
|
||||||
|
BROTLI_PARAM_SIZE_HINT = 5
|
||||||
|
} BrotliEncoderParameter;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opaque structure that holds encoder state.
|
||||||
|
*
|
||||||
|
* Allocated and initialized with ::BrotliEncoderCreateInstance.
|
||||||
|
* Cleaned up and deallocated with ::BrotliEncoderDestroyInstance.
|
||||||
|
*/
|
||||||
|
typedef struct BrotliEncoderStateStruct BrotliEncoderState;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the specified parameter to the given encoder instance.
|
||||||
|
*
|
||||||
|
* @param state encoder instance
|
||||||
|
* @param param parameter to set
|
||||||
|
* @param value new parameter value
|
||||||
|
* @returns ::BROTLI_FALSE if parameter is unrecognized, or value is invalid
|
||||||
|
* @returns ::BROTLI_FALSE if value of parameter can not be changed at current
|
||||||
|
* encoder state (e.g. when encoding is started, window size might be
|
||||||
|
* already encoded and therefore it is impossible to change it)
|
||||||
|
* @returns ::BROTLI_TRUE if value is accepted
|
||||||
|
* @warning invalid values might be accepted in case they would not break
|
||||||
|
* encoding process.
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API BROTLI_BOOL BrotliEncoderSetParameter(
|
||||||
|
BrotliEncoderState* state, BrotliEncoderParameter param, uint32_t value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an instance of ::BrotliEncoderState and initializes it.
|
||||||
|
*
|
||||||
|
* @p alloc_func and @p free_func @b MUST be both zero or both non-zero. In the
|
||||||
|
* case they are both zero, default memory allocators are used. @p opaque is
|
||||||
|
* passed to @p alloc_func and @p free_func when they are called.
|
||||||
|
*
|
||||||
|
* @param alloc_func custom memory allocation function
|
||||||
|
* @param free_func custom memory fee function
|
||||||
|
* @param opaque custom memory manager handle
|
||||||
|
* @returns @c 0 if instance can not be allocated or initialized
|
||||||
|
* @returns pointer to initialized ::BrotliEncoderState otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API BrotliEncoderState* BrotliEncoderCreateInstance(
|
||||||
|
brotli_alloc_func alloc_func, brotli_free_func free_func, void* opaque);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deinitializes and frees ::BrotliEncoderState instance.
|
||||||
|
*
|
||||||
|
* @param state decoder instance to be cleaned up and deallocated
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API void BrotliEncoderDestroyInstance(BrotliEncoderState* state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepends imaginary LZ77 dictionary.
|
||||||
|
*
|
||||||
|
* Fills the fresh ::BrotliEncoderState with additional data corpus for LZ77
|
||||||
|
* backward references.
|
||||||
|
*
|
||||||
|
* @note Not to be confused with the static dictionary (see RFC7932 section 8).
|
||||||
|
*
|
||||||
|
* Workflow:
|
||||||
|
* -# Allocate and initialize state with ::BrotliEncoderCreateInstance
|
||||||
|
* -# Set ::BROTLI_PARAM_LGWIN parameter
|
||||||
|
* -# Invoke ::BrotliEncoderSetCustomDictionary
|
||||||
|
* -# Use ::BrotliEncoderCompressStream
|
||||||
|
* -# Clean up and free state with ::BrotliEncoderDestroyInstance
|
||||||
|
*
|
||||||
|
* @param state encoder instance
|
||||||
|
* @param size length of @p dict; at most "window size" bytes are used
|
||||||
|
* @param dict "dictionary"; @b MUST use same dictionary during decompression
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API void BrotliEncoderSetCustomDictionary(
|
||||||
|
BrotliEncoderState* state, size_t size,
|
||||||
|
const uint8_t dict[BROTLI_ARRAY_PARAM(size)]);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculates the output size bound for the given @p input_size.
|
||||||
|
*
|
||||||
|
* @warning Result is not applicable to ::BrotliEncoderCompressStream output,
|
||||||
|
* because every "flush" adds extra overhead bytes, and some encoder
|
||||||
|
* settings (e.g. quality @c 0 and @c 1) might imply a "soft flush"
|
||||||
|
* after every chunk of input.
|
||||||
|
*
|
||||||
|
* @param input_size size of projected input
|
||||||
|
* @returns @c 0 if result does not fit @c size_t
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API size_t BrotliEncoderMaxCompressedSize(size_t input_size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs one-shot memory-to-memory compression.
|
||||||
|
*
|
||||||
|
* Compresses the data in @p input_buffer into @p encoded_buffer, and sets
|
||||||
|
* @p *encoded_size to the compressed length.
|
||||||
|
*
|
||||||
|
* @note If ::BrotliEncoderMaxCompressedSize(@p input_size) returns non-zero
|
||||||
|
* value, then output is guaranteed to be no longer than that.
|
||||||
|
*
|
||||||
|
* @param quality quality parameter value, e.g. ::BROTLI_DEFAULT_QUALITY
|
||||||
|
* @param lgwin lgwin parameter value, e.g. ::BROTLI_DEFAULT_WINDOW
|
||||||
|
* @param mode mode parameter value, e.g. ::BROTLI_DEFAULT_MODE
|
||||||
|
* @param input_size size of @p input_buffer
|
||||||
|
* @param input_buffer input data buffer with at least @p input_size
|
||||||
|
* addressable bytes
|
||||||
|
* @param[in, out] encoded_size @b in: size of @p encoded_buffer; \n
|
||||||
|
* @b out: length of compressed data written to
|
||||||
|
* @p encoded_buffer, or @c 0 if compression fails
|
||||||
|
* @param encoded_buffer compressed data destination buffer
|
||||||
|
* @returns ::BROTLI_FALSE in case of compression error
|
||||||
|
* @returns ::BROTLI_FALSE if output buffer is too small
|
||||||
|
* @returns ::BROTLI_TRUE otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API BROTLI_BOOL BrotliEncoderCompress(
|
||||||
|
int quality, int lgwin, BrotliEncoderMode mode, size_t input_size,
|
||||||
|
const uint8_t input_buffer[BROTLI_ARRAY_PARAM(input_size)],
|
||||||
|
size_t* encoded_size,
|
||||||
|
uint8_t encoded_buffer[BROTLI_ARRAY_PARAM(*encoded_size)]);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compresses input stream to output stream.
|
||||||
|
*
|
||||||
|
* The values @p *available_in and @p *available_out must specify the number of
|
||||||
|
* bytes addressable at @p *next_in and @p *next_out respectively.
|
||||||
|
* When @p *available_out is @c 0, @p next_out is allowed to be @c NULL.
|
||||||
|
*
|
||||||
|
* After each call, @p *available_in will be decremented by the amount of input
|
||||||
|
* bytes consumed, and the @p *next_in pointer will be incremented by that
|
||||||
|
* amount. Similarly, @p *available_out will be decremented by the amount of
|
||||||
|
* output bytes written, and the @p *next_out pointer will be incremented by
|
||||||
|
* that amount.
|
||||||
|
*
|
||||||
|
* @p total_out, if it is not a null-pointer, will be set to the number
|
||||||
|
* of bytes decompressed since the last @p state initialization.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* Internally workflow consists of 3 tasks:
|
||||||
|
* -# (optionally) copy input data to internal buffer
|
||||||
|
* -# actually compress data and (optionally) store it to internal buffer
|
||||||
|
* -# (optionally) copy compressed bytes from internal buffer to output stream
|
||||||
|
*
|
||||||
|
* Whenever all 3 tasks can't move forward anymore, or error occurs, this
|
||||||
|
* method returns the control flow to caller.
|
||||||
|
*
|
||||||
|
* @p op is used to perform flush, finish the stream, or inject metadata block.
|
||||||
|
* See ::BrotliEncoderOperation for more information.
|
||||||
|
*
|
||||||
|
* Flushing the stream means forcing encoding of all input passed to encoder and
|
||||||
|
* completing the current output block, so it could be fully decoded by stream
|
||||||
|
* decoder. To perform flush set @p op to ::BROTLI_OPERATION_FLUSH.
|
||||||
|
* Under some circumstances (e.g. lack of output stream capacity) this operation
|
||||||
|
* would require several calls to ::BrotliEncoderCompressStream. The method must
|
||||||
|
* be called again until both input stream is depleted and encoder has no more
|
||||||
|
* output (see ::BrotliEncoderHasMoreOutput) after the method is called.
|
||||||
|
*
|
||||||
|
* Finishing the stream means encoding of all input passed to encoder and
|
||||||
|
* adding specific "final" marks, so stream decoder could determine that stream
|
||||||
|
* is complete. To perform finish set @p op to ::BROTLI_OPERATION_FINISH.
|
||||||
|
* Under some circumstances (e.g. lack of output stream capacity) this operation
|
||||||
|
* would require several calls to ::BrotliEncoderCompressStream. The method must
|
||||||
|
* be called again until both input stream is depleted and encoder has no more
|
||||||
|
* output (see ::BrotliEncoderHasMoreOutput) after the method is called.
|
||||||
|
*
|
||||||
|
* @warning When flushing and finishing, @p op should not change until operation
|
||||||
|
* is complete; input stream should not be swapped, reduced or
|
||||||
|
* extended as well.
|
||||||
|
*
|
||||||
|
* @param state encoder instance
|
||||||
|
* @param op requested operation
|
||||||
|
* @param[in, out] available_in @b in: amount of available input; \n
|
||||||
|
* @b out: amount of unused input
|
||||||
|
* @param[in, out] next_in pointer to the next input byte
|
||||||
|
* @param[in, out] available_out @b in: length of output buffer; \n
|
||||||
|
* @b out: remaining size of output buffer
|
||||||
|
* @param[in, out] next_out compressed output buffer cursor;
|
||||||
|
* can be @c NULL if @p available_out is @c 0
|
||||||
|
* @param[out] total_out number of bytes produced so far; can be @c NULL
|
||||||
|
* @returns ::BROTLI_FALSE if there was an error
|
||||||
|
* @returns ::BROTLI_TRUE otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API BROTLI_BOOL BrotliEncoderCompressStream(
|
||||||
|
BrotliEncoderState* state, BrotliEncoderOperation op, size_t* available_in,
|
||||||
|
const uint8_t** next_in, size_t* available_out, uint8_t** next_out,
|
||||||
|
size_t* total_out);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if encoder instance reached the final state.
|
||||||
|
*
|
||||||
|
* @param state encoder instance
|
||||||
|
* @returns ::BROTLI_TRUE if encoder is in a state where it reached the end of
|
||||||
|
* the input and produced all of the output
|
||||||
|
* @returns ::BROTLI_FALSE otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API BROTLI_BOOL BrotliEncoderIsFinished(BrotliEncoderState* state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if encoder has more output.
|
||||||
|
*
|
||||||
|
* @param state encoder instance
|
||||||
|
* @returns ::BROTLI_TRUE, if encoder has some unconsumed output
|
||||||
|
* @returns ::BROTLI_FALSE otherwise
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API BROTLI_BOOL BrotliEncoderHasMoreOutput(
|
||||||
|
BrotliEncoderState* state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Acquires pointer to internal output buffer.
|
||||||
|
*
|
||||||
|
* This method is used to make language bindings easier and more efficient:
|
||||||
|
* -# push data to ::BrotliEncoderCompressStream,
|
||||||
|
* until ::BrotliEncoderHasMoreOutput returns BROTL_TRUE
|
||||||
|
* -# use ::BrotliEncoderTakeOutput to peek bytes and copy to language-specific
|
||||||
|
* entity
|
||||||
|
*
|
||||||
|
* Also this could be useful if there is an output stream that is able to
|
||||||
|
* consume all the provided data (e.g. when data is saved to file system).
|
||||||
|
*
|
||||||
|
* @attention After every call to ::BrotliEncoderTakeOutput @p *size bytes of
|
||||||
|
* output are considered consumed for all consecutive calls to the
|
||||||
|
* instance methods; returned pointer becomes invalidated as well.
|
||||||
|
*
|
||||||
|
* @note Encoder output is not guaranteed to be contiguous. This means that
|
||||||
|
* after the size-unrestricted call to ::BrotliEncoderTakeOutput,
|
||||||
|
* immediate next call to ::BrotliEncoderTakeOutput may return more data.
|
||||||
|
*
|
||||||
|
* @param state encoder instance
|
||||||
|
* @param[in, out] size @b in: number of bytes caller is ready to take, @c 0 if
|
||||||
|
* any amount could be handled; \n
|
||||||
|
* @b out: amount of data pointed by returned pointer and
|
||||||
|
* considered consumed; \n
|
||||||
|
* out value is never greater than in value, unless it is @c 0
|
||||||
|
* @returns pointer to output data
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API const uint8_t* BrotliEncoderTakeOutput(
|
||||||
|
BrotliEncoderState* state, size_t* size);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets an encoder library version.
|
||||||
|
*
|
||||||
|
* Look at BROTLI_VERSION for more information.
|
||||||
|
*/
|
||||||
|
BROTLI_ENC_API uint32_t BrotliEncoderVersion(void);
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
} /* extern "C" */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* BROTLI_ENC_ENCODE_H_ */
|
|
@ -0,0 +1,146 @@
|
||||||
|
/* Copyright 2016 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Macros for compiler / platform specific features and build options. */
|
||||||
|
|
||||||
|
#ifndef BROTLI_COMMON_PORT_H_
|
||||||
|
#define BROTLI_COMMON_PORT_H_
|
||||||
|
|
||||||
|
/* Compatibility with non-clang compilers. */
|
||||||
|
#ifndef __has_builtin
|
||||||
|
#define __has_builtin(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __has_attribute
|
||||||
|
#define __has_attribute(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __has_feature
|
||||||
|
#define __has_feature(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__GNUC__) && defined(__GNUC_MINOR__)
|
||||||
|
#define BROTLI_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||||
|
#else
|
||||||
|
#define BROTLI_GCC_VERSION 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__ICC)
|
||||||
|
#define BROTLI_ICC_VERSION __ICC
|
||||||
|
#else
|
||||||
|
#define BROTLI_ICC_VERSION 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(BROTLI_BUILD_MODERN_COMPILER)
|
||||||
|
#define BROTLI_MODERN_COMPILER 1
|
||||||
|
#elif BROTLI_GCC_VERSION >= 304 || BROTLI_ICC_VERSION >= 1600
|
||||||
|
#define BROTLI_MODERN_COMPILER 1
|
||||||
|
#else
|
||||||
|
#define BROTLI_MODERN_COMPILER 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Define "BROTLI_PREDICT_TRUE" and "BROTLI_PREDICT_FALSE" macros for capable
|
||||||
|
compilers.
|
||||||
|
|
||||||
|
To apply compiler hint, enclose the branching condition into macros, like this:
|
||||||
|
|
||||||
|
if (BROTLI_PREDICT_TRUE(zero == 0)) {
|
||||||
|
// main execution path
|
||||||
|
} else {
|
||||||
|
// compiler should place this code outside of main execution path
|
||||||
|
}
|
||||||
|
|
||||||
|
OR:
|
||||||
|
|
||||||
|
if (BROTLI_PREDICT_FALSE(something_rare_or_unexpected_happens)) {
|
||||||
|
// compiler should place this code outside of main execution path
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
#if BROTLI_MODERN_COMPILER || __has_builtin(__builtin_expect)
|
||||||
|
#define BROTLI_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
|
||||||
|
#define BROTLI_PREDICT_FALSE(x) (__builtin_expect(x, 0))
|
||||||
|
#else
|
||||||
|
#define BROTLI_PREDICT_FALSE(x) (x)
|
||||||
|
#define BROTLI_PREDICT_TRUE(x) (x)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BROTLI_MODERN_COMPILER || __has_attribute(always_inline)
|
||||||
|
#define BROTLI_ATTRIBUTE_ALWAYS_INLINE __attribute__ ((always_inline))
|
||||||
|
#else
|
||||||
|
#define BROTLI_ATTRIBUTE_ALWAYS_INLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(_WIN32) || defined(__CYGWIN__)
|
||||||
|
#define BROTLI_ATTRIBUTE_VISIBILITY_HIDDEN
|
||||||
|
#elif BROTLI_MODERN_COMPILER || __has_attribute(visibility)
|
||||||
|
#define BROTLI_ATTRIBUTE_VISIBILITY_HIDDEN \
|
||||||
|
__attribute__ ((visibility ("hidden")))
|
||||||
|
#else
|
||||||
|
#define BROTLI_ATTRIBUTE_VISIBILITY_HIDDEN
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef BROTLI_INTERNAL
|
||||||
|
#define BROTLI_INTERNAL BROTLI_ATTRIBUTE_VISIBILITY_HIDDEN
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(BROTLI_SHARED_COMPILATION) && defined(_WIN32)
|
||||||
|
#if defined(BROTLICOMMON_SHARED_COMPILATION)
|
||||||
|
#define BROTLI_COMMON_API __declspec(dllexport)
|
||||||
|
#else
|
||||||
|
#define BROTLI_COMMON_API __declspec(dllimport)
|
||||||
|
#endif /* BROTLICOMMON_SHARED_COMPILATION */
|
||||||
|
#if defined(BROTLIDEC_SHARED_COMPILATION)
|
||||||
|
#define BROTLI_DEC_API __declspec(dllexport)
|
||||||
|
#else
|
||||||
|
#define BROTLI_DEC_API __declspec(dllimport)
|
||||||
|
#endif /* BROTLIDEC_SHARED_COMPILATION */
|
||||||
|
#if defined(BROTLIENC_SHARED_COMPILATION)
|
||||||
|
#define BROTLI_ENC_API __declspec(dllexport)
|
||||||
|
#else
|
||||||
|
#define BROTLI_ENC_API __declspec(dllimport)
|
||||||
|
#endif /* BROTLIENC_SHARED_COMPILATION */
|
||||||
|
#else /* BROTLI_SHARED_COMPILATION && _WIN32 */
|
||||||
|
#define BROTLI_COMMON_API
|
||||||
|
#define BROTLI_DEC_API
|
||||||
|
#define BROTLI_ENC_API
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef _MSC_VER
|
||||||
|
#if defined(__cplusplus) || !defined(__STRICT_ANSI__) || \
|
||||||
|
(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
|
||||||
|
#define BROTLI_INLINE inline BROTLI_ATTRIBUTE_ALWAYS_INLINE
|
||||||
|
#else
|
||||||
|
#define BROTLI_INLINE
|
||||||
|
#endif
|
||||||
|
#else /* _MSC_VER */
|
||||||
|
#define BROTLI_INLINE __forceinline
|
||||||
|
#endif /* _MSC_VER */
|
||||||
|
|
||||||
|
#if !defined(__cplusplus) && !defined(c_plusplus) && \
|
||||||
|
(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
|
||||||
|
#define BROTLI_RESTRICT restrict
|
||||||
|
#elif BROTLI_GCC_VERSION > 295 || defined(__llvm__)
|
||||||
|
#define BROTLI_RESTRICT __restrict
|
||||||
|
#else
|
||||||
|
#define BROTLI_RESTRICT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BROTLI_MODERN_COMPILER || __has_attribute(noinline)
|
||||||
|
#define BROTLI_NOINLINE __attribute__((noinline))
|
||||||
|
#else
|
||||||
|
#define BROTLI_NOINLINE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if BROTLI_MODERN_COMPILER || __has_attribute(deprecated)
|
||||||
|
#define BROTLI_DEPRECATED __attribute__((deprecated))
|
||||||
|
#else
|
||||||
|
#define BROTLI_DEPRECATED
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define BROTLI_UNUSED(X) (void)(X)
|
||||||
|
|
||||||
|
#endif /* BROTLI_COMMON_PORT_H_ */
|
|
@ -0,0 +1,90 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file
|
||||||
|
* Common types used in decoder and encoder API.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef BROTLI_COMMON_TYPES_H_
|
||||||
|
#define BROTLI_COMMON_TYPES_H_
|
||||||
|
|
||||||
|
#include <stddef.h> /* for size_t */
|
||||||
|
|
||||||
|
#if defined(_MSC_VER) && (_MSC_VER < 1600)
|
||||||
|
typedef __int8 int8_t;
|
||||||
|
typedef unsigned __int8 uint8_t;
|
||||||
|
typedef __int16 int16_t;
|
||||||
|
typedef unsigned __int16 uint16_t;
|
||||||
|
typedef __int32 int32_t;
|
||||||
|
typedef unsigned __int32 uint32_t;
|
||||||
|
typedef unsigned __int64 uint64_t;
|
||||||
|
typedef __int64 int64_t;
|
||||||
|
#else
|
||||||
|
#include <stdint.h>
|
||||||
|
#endif /* defined(_MSC_VER) && (_MSC_VER < 1600) */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A portable @c bool replacement.
|
||||||
|
*
|
||||||
|
* ::BROTLI_BOOL is a "documentation" type: actually it is @c int, but in API it
|
||||||
|
* denotes a type, whose only values are ::BROTLI_TRUE and ::BROTLI_FALSE.
|
||||||
|
*
|
||||||
|
* ::BROTLI_BOOL values passed to Brotli should either be ::BROTLI_TRUE or
|
||||||
|
* ::BROTLI_FALSE, or be a result of ::TO_BROTLI_BOOL macros.
|
||||||
|
*
|
||||||
|
* ::BROTLI_BOOL values returned by Brotli should not be tested for equality
|
||||||
|
* with @c true, @c false, ::BROTLI_TRUE, ::BROTLI_FALSE, but rather should be
|
||||||
|
* evaluated, for example: @code{.cpp}
|
||||||
|
* if (SomeBrotliFunction(encoder, BROTLI_TRUE) &&
|
||||||
|
* !OtherBrotliFunction(decoder, BROTLI_FALSE)) {
|
||||||
|
* bool x = !!YetAnotherBrotliFunction(encoder, TO_BROLTI_BOOL(2 * 2 == 4));
|
||||||
|
* DoSomething(x);
|
||||||
|
* }
|
||||||
|
* @endcode
|
||||||
|
*/
|
||||||
|
#define BROTLI_BOOL int
|
||||||
|
/** Portable @c true replacement. */
|
||||||
|
#define BROTLI_TRUE 1
|
||||||
|
/** Portable @c false replacement. */
|
||||||
|
#define BROTLI_FALSE 0
|
||||||
|
/** @c bool to ::BROTLI_BOOL conversion macros. */
|
||||||
|
#define TO_BROTLI_BOOL(X) (!!(X) ? BROTLI_TRUE : BROTLI_FALSE)
|
||||||
|
|
||||||
|
#define BROTLI_MAKE_UINT64_T(high, low) ((((uint64_t)(high)) << 32) | low)
|
||||||
|
|
||||||
|
#define BROTLI_UINT32_MAX (~((uint32_t)0))
|
||||||
|
#define BROTLI_SIZE_MAX (~((size_t)0))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocating function pointer type.
|
||||||
|
*
|
||||||
|
* @param opaque custom memory manager handle provided by client
|
||||||
|
* @param size requested memory region size; can not be @c 0
|
||||||
|
* @returns @c 0 in the case of failure
|
||||||
|
* @returns a valid pointer to a memory region of at least @p size bytes
|
||||||
|
* long otherwise
|
||||||
|
*/
|
||||||
|
typedef void* (*brotli_alloc_func)(void* opaque, size_t size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deallocating function pointer type.
|
||||||
|
*
|
||||||
|
* This function @b SHOULD do nothing if @p address is @c 0.
|
||||||
|
*
|
||||||
|
* @param opaque custom memory manager handle provided by client
|
||||||
|
* @param address memory region pointer returned by ::brotli_alloc_func, or @c 0
|
||||||
|
*/
|
||||||
|
typedef void (*brotli_free_func)(void* opaque, void* address);
|
||||||
|
|
||||||
|
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
|
||||||
|
!defined(__cplusplus) && !defined(__PGI)
|
||||||
|
#define BROTLI_ARRAY_PARAM(L) L
|
||||||
|
#else
|
||||||
|
#define BROTLI_ARRAY_PARAM(L)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* BROTLI_COMMON_TYPES_H_ */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,367 @@
|
||||||
|
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Distributed under MIT license.
|
||||||
|
// See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
|
||||||
|
package brotli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkCompressedData(compressedData, wantOriginalData []byte) error {
|
||||||
|
uncompressed, err := Decode(compressedData)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("brotli decompress failed: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(uncompressed, wantOriginalData) {
|
||||||
|
if len(wantOriginalData) != len(uncompressed) {
|
||||||
|
return fmt.Errorf(""+
|
||||||
|
"Data doesn't uncompress to the original value.\n"+
|
||||||
|
"Length of original: %v\n"+
|
||||||
|
"Length of uncompressed: %v",
|
||||||
|
len(wantOriginalData), len(uncompressed))
|
||||||
|
}
|
||||||
|
for i := range wantOriginalData {
|
||||||
|
if wantOriginalData[i] != uncompressed[i] {
|
||||||
|
return fmt.Errorf(""+
|
||||||
|
"Data doesn't uncompress to the original value.\n"+
|
||||||
|
"Original at %v is %v\n"+
|
||||||
|
"Uncompressed at %v is %v",
|
||||||
|
i, wantOriginalData[i], i, uncompressed[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncoderNoWrite(t *testing.T) {
|
||||||
|
out := bytes.Buffer{}
|
||||||
|
e := NewWriter(&out, WriterOptions{Quality: 5})
|
||||||
|
if err := e.Close(); err != nil {
|
||||||
|
t.Errorf("Close()=%v, want nil", err)
|
||||||
|
}
|
||||||
|
// Check Write after close.
|
||||||
|
if _, err := e.Write([]byte("hi")); err == nil {
|
||||||
|
t.Errorf("No error after Close() + Write()")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncoderEmptyWrite(t *testing.T) {
|
||||||
|
out := bytes.Buffer{}
|
||||||
|
e := NewWriter(&out, WriterOptions{Quality: 5})
|
||||||
|
n, err := e.Write([]byte(""))
|
||||||
|
if n != 0 || err != nil {
|
||||||
|
t.Errorf("Write()=%v,%v, want 0, nil", n, err)
|
||||||
|
}
|
||||||
|
if err := e.Close(); err != nil {
|
||||||
|
t.Errorf("Close()=%v, want nil", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriter(t *testing.T) {
|
||||||
|
// Test basic encoder usage.
|
||||||
|
input := []byte("<html><body><H1>Hello world</H1></body></html>")
|
||||||
|
out := bytes.Buffer{}
|
||||||
|
e := NewWriter(&out, WriterOptions{Quality: 1})
|
||||||
|
in := bytes.NewReader([]byte(input))
|
||||||
|
n, err := io.Copy(e, in)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Copy Error: %v", err)
|
||||||
|
}
|
||||||
|
if int(n) != len(input) {
|
||||||
|
t.Errorf("Copy() n=%v, want %v", n, len(input))
|
||||||
|
}
|
||||||
|
if err := e.Close(); err != nil {
|
||||||
|
t.Errorf("Close Error after copied %d bytes: %v", n, err)
|
||||||
|
}
|
||||||
|
if err := checkCompressedData(out.Bytes(), input); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncoderStreams(t *testing.T) {
|
||||||
|
// Test that output is streamed.
|
||||||
|
// Adjust window size to ensure the encoder outputs at least enough bytes
|
||||||
|
// to fill the window.
|
||||||
|
const lgWin = 16
|
||||||
|
windowSize := int(math.Pow(2, lgWin))
|
||||||
|
input := make([]byte, 8*windowSize)
|
||||||
|
rand.Read(input)
|
||||||
|
out := bytes.Buffer{}
|
||||||
|
e := NewWriter(&out, WriterOptions{Quality: 11, LGWin: lgWin})
|
||||||
|
halfInput := input[:len(input)/2]
|
||||||
|
in := bytes.NewReader(halfInput)
|
||||||
|
|
||||||
|
n, err := io.Copy(e, in)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Copy Error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We've fed more data than the sliding window size. Check that some
|
||||||
|
// compressed data has been output.
|
||||||
|
if out.Len() == 0 {
|
||||||
|
t.Errorf("Output length is 0 after %d bytes written", n)
|
||||||
|
}
|
||||||
|
if err := e.Close(); err != nil {
|
||||||
|
t.Errorf("Close Error after copied %d bytes: %v", n, err)
|
||||||
|
}
|
||||||
|
if err := checkCompressedData(out.Bytes(), halfInput); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncoderLargeInput(t *testing.T) {
|
||||||
|
input := make([]byte, 1000000)
|
||||||
|
rand.Read(input)
|
||||||
|
out := bytes.Buffer{}
|
||||||
|
e := NewWriter(&out, WriterOptions{Quality: 5})
|
||||||
|
in := bytes.NewReader(input)
|
||||||
|
|
||||||
|
n, err := io.Copy(e, in)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Copy Error: %v", err)
|
||||||
|
}
|
||||||
|
if int(n) != len(input) {
|
||||||
|
t.Errorf("Copy() n=%v, want %v", n, len(input))
|
||||||
|
}
|
||||||
|
if err := e.Close(); err != nil {
|
||||||
|
t.Errorf("Close Error after copied %d bytes: %v", n, err)
|
||||||
|
}
|
||||||
|
if err := checkCompressedData(out.Bytes(), input); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncoderFlush(t *testing.T) {
|
||||||
|
input := make([]byte, 1000)
|
||||||
|
rand.Read(input)
|
||||||
|
out := bytes.Buffer{}
|
||||||
|
e := NewWriter(&out, WriterOptions{Quality: 5})
|
||||||
|
in := bytes.NewReader(input)
|
||||||
|
_, err := io.Copy(e, in)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Copy Error: %v", err)
|
||||||
|
}
|
||||||
|
if err := e.Flush(); err != nil {
|
||||||
|
t.Fatalf("Flush(): %v", err)
|
||||||
|
}
|
||||||
|
if out.Len() == 0 {
|
||||||
|
t.Fatalf("0 bytes written after Flush()")
|
||||||
|
}
|
||||||
|
decompressed := make([]byte, 1000)
|
||||||
|
reader := NewReader(bytes.NewReader(out.Bytes()))
|
||||||
|
n, err := reader.Read(decompressed)
|
||||||
|
if n != len(decompressed) || err != nil {
|
||||||
|
t.Errorf("Expected <%v, nil>, but <%v, %v>", len(decompressed), n, err)
|
||||||
|
}
|
||||||
|
reader.Close()
|
||||||
|
if !bytes.Equal(decompressed, input) {
|
||||||
|
t.Errorf(""+
|
||||||
|
"Decompress after flush: %v\n"+
|
||||||
|
"%q\n"+
|
||||||
|
"want:\n%q",
|
||||||
|
err, decompressed, input)
|
||||||
|
}
|
||||||
|
if err := e.Close(); err != nil {
|
||||||
|
t.Errorf("Close(): %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type readerWithTimeout struct {
|
||||||
|
io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r readerWithTimeout) Read(p []byte) (int, error) {
|
||||||
|
type result struct {
|
||||||
|
n int
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
ch := make(chan result)
|
||||||
|
go func() {
|
||||||
|
n, err := r.ReadCloser.Read(p)
|
||||||
|
ch <- result{n, err}
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case result := <-ch:
|
||||||
|
return result.n, result.err
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
return 0, fmt.Errorf("read timed out")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecoderStreaming(t *testing.T) {
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
writer := NewWriter(pw, WriterOptions{Quality: 5, LGWin: 20})
|
||||||
|
reader := readerWithTimeout{NewReader(pr)}
|
||||||
|
defer func() {
|
||||||
|
if err := reader.Close(); err != nil {
|
||||||
|
t.Errorf("reader.Close: %v", err)
|
||||||
|
}
|
||||||
|
go ioutil.ReadAll(pr) // swallow the "EOF" token from writer.Close
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Errorf("writer.Close: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ch := make(chan []byte)
|
||||||
|
errch := make(chan error)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
segment, ok := <-ch
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n, err := writer.Write(segment); err != nil || n != len(segment) {
|
||||||
|
errch <- fmt.Errorf("write=%v,%v, want %v,%v", n, err, len(segment), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := writer.Flush(); err != nil {
|
||||||
|
errch <- fmt.Errorf("flush: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
defer close(ch)
|
||||||
|
|
||||||
|
segments := [...][]byte{
|
||||||
|
[]byte("first"),
|
||||||
|
[]byte("second"),
|
||||||
|
[]byte("third"),
|
||||||
|
}
|
||||||
|
for k, segment := range segments {
|
||||||
|
t.Run(fmt.Sprintf("Segment%d", k), func(t *testing.T) {
|
||||||
|
select {
|
||||||
|
case ch <- segment:
|
||||||
|
case err := <-errch:
|
||||||
|
t.Fatalf("write: %v", err)
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatalf("timed out")
|
||||||
|
}
|
||||||
|
wantLen := len(segment)
|
||||||
|
got := make([]byte, wantLen)
|
||||||
|
if n, err := reader.Read(got); err != nil || n != wantLen || !bytes.Equal(got, segment) {
|
||||||
|
t.Fatalf("read[%d]=%q,%v,%v, want %q,%v,%v", k, got, n, err, segment, wantLen, nil)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader(t *testing.T) {
|
||||||
|
content := bytes.Repeat([]byte("hello world!"), 10000)
|
||||||
|
encoded, _ := Encode(content, WriterOptions{Quality: 5})
|
||||||
|
r := NewReader(bytes.NewReader(encoded))
|
||||||
|
var decodedOutput bytes.Buffer
|
||||||
|
n, err := io.Copy(&decodedOutput, r)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Copy(): n=%v, err=%v", n, err)
|
||||||
|
}
|
||||||
|
if err := r.Close(); err != nil {
|
||||||
|
t.Errorf("Close(): %v", err)
|
||||||
|
}
|
||||||
|
if got := decodedOutput.Bytes(); !bytes.Equal(got, content) {
|
||||||
|
t.Errorf(""+
|
||||||
|
"Reader output:\n"+
|
||||||
|
"%q\n"+
|
||||||
|
"want:\n"+
|
||||||
|
"<%d bytes>",
|
||||||
|
got, len(content))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecode(t *testing.T) {
|
||||||
|
content := bytes.Repeat([]byte("hello world!"), 10000)
|
||||||
|
encoded, _ := Encode(content, WriterOptions{Quality: 5})
|
||||||
|
decoded, err := Decode(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Decode: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(decoded, content) {
|
||||||
|
t.Errorf(""+
|
||||||
|
"Decode content:\n"+
|
||||||
|
"%q\n"+
|
||||||
|
"want:\n"+
|
||||||
|
"<%d bytes>",
|
||||||
|
decoded, len(content))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecodeFuzz(t *testing.T) {
|
||||||
|
// Test that the decoder terminates with corrupted input.
|
||||||
|
content := bytes.Repeat([]byte("hello world!"), 100)
|
||||||
|
src := rand.NewSource(0)
|
||||||
|
encoded, err := Encode(content, WriterOptions{Quality: 5})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Encode(<%d bytes>, _) = _, %s", len(content), err)
|
||||||
|
}
|
||||||
|
if len(encoded) == 0 {
|
||||||
|
t.Fatalf("Encode(<%d bytes>, _) produced empty output", len(content))
|
||||||
|
}
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
enc := append([]byte{}, encoded...)
|
||||||
|
for j := 0; j < 5; j++ {
|
||||||
|
enc[int(src.Int63())%len(enc)] = byte(src.Int63() % 256)
|
||||||
|
}
|
||||||
|
Decode(enc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecodeTrailingData(t *testing.T) {
|
||||||
|
content := bytes.Repeat([]byte("hello world!"), 100)
|
||||||
|
encoded, _ := Encode(content, WriterOptions{Quality: 5})
|
||||||
|
_, err := Decode(append(encoded, 0))
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Expected 'excessive input' error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncodeDecode(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
data []byte
|
||||||
|
repeats int
|
||||||
|
}{
|
||||||
|
{nil, 0},
|
||||||
|
{[]byte("A"), 1},
|
||||||
|
{[]byte("<html><body><H1>Hello world</H1></body></html>"), 10},
|
||||||
|
{[]byte("<html><body><H1>Hello world</H1></body></html>"), 1000},
|
||||||
|
} {
|
||||||
|
t.Logf("case %q x %d", test.data, test.repeats)
|
||||||
|
input := bytes.Repeat(test.data, test.repeats)
|
||||||
|
encoded, err := Encode(input, WriterOptions{Quality: 5})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Encode: %v", err)
|
||||||
|
}
|
||||||
|
// Inputs are compressible, but may be too small to compress.
|
||||||
|
if maxSize := len(input)/2 + 20; len(encoded) >= maxSize {
|
||||||
|
t.Errorf(""+
|
||||||
|
"Encode returned %d bytes, want <%d\n"+
|
||||||
|
"Encoded=%q",
|
||||||
|
len(encoded), maxSize, encoded)
|
||||||
|
}
|
||||||
|
decoded, err := Decode(encoded)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Decode: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(decoded, input) {
|
||||||
|
var want string
|
||||||
|
if len(input) > 320 {
|
||||||
|
want = fmt.Sprintf("<%d bytes>", len(input))
|
||||||
|
} else {
|
||||||
|
want = fmt.Sprintf("%q", input)
|
||||||
|
}
|
||||||
|
t.Errorf(""+
|
||||||
|
"Decode content:\n"+
|
||||||
|
"%q\n"+
|
||||||
|
"want:\n"+
|
||||||
|
"%s",
|
||||||
|
decoded, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Distributed under MIT license.
|
||||||
|
// See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
|
||||||
|
package brotli
|
||||||
|
|
||||||
|
// Inform golang build system that it should link brotli libraries.
|
||||||
|
|
||||||
|
// #cgo CFLAGS: -O3
|
||||||
|
// #cgo LDFLAGS: -lm
|
||||||
|
import "C"
|
|
@ -0,0 +1,56 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Functions for clustering similar histograms together. */
|
||||||
|
|
||||||
|
#include "./enc/cluster.h"
|
||||||
|
|
||||||
|
#include <brotli/types.h>
|
||||||
|
#include "./enc/bit_cost.h" /* BrotliPopulationCost */
|
||||||
|
#include "./enc/fast_log.h"
|
||||||
|
#include "./enc/histogram.h"
|
||||||
|
#include "./enc/memory.h"
|
||||||
|
#include "./enc/port.h"
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static BROTLI_INLINE BROTLI_BOOL HistogramPairIsLess(
|
||||||
|
const HistogramPair* p1, const HistogramPair* p2) {
|
||||||
|
if (p1->cost_diff != p2->cost_diff) {
|
||||||
|
return TO_BROTLI_BOOL(p1->cost_diff > p2->cost_diff);
|
||||||
|
}
|
||||||
|
return TO_BROTLI_BOOL((p1->idx2 - p1->idx1) > (p2->idx2 - p2->idx1));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Returns entropy reduction of the context map when we combine two clusters. */
|
||||||
|
static BROTLI_INLINE double ClusterCostDiff(size_t size_a, size_t size_b) {
|
||||||
|
size_t size_c = size_a + size_b;
|
||||||
|
return (double)size_a * FastLog2(size_a) +
|
||||||
|
(double)size_b * FastLog2(size_b) -
|
||||||
|
(double)size_c * FastLog2(size_c);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define CODE(X) X
|
||||||
|
|
||||||
|
#define FN(X) X ## Literal
|
||||||
|
#include "./enc/cluster_inc.h" /* NOLINT(build/include) */
|
||||||
|
#undef FN
|
||||||
|
|
||||||
|
#define FN(X) X ## Command
|
||||||
|
#include "./enc/cluster_inc.h" /* NOLINT(build/include) */
|
||||||
|
#undef FN
|
||||||
|
|
||||||
|
#define FN(X) X ## Distance
|
||||||
|
#include "./enc/cluster_inc.h" /* NOLINT(build/include) */
|
||||||
|
#undef FN
|
||||||
|
|
||||||
|
#undef CODE
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
} /* extern "C" */
|
||||||
|
#endif
|
|
@ -0,0 +1,55 @@
|
||||||
|
/* Copyright 2016 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef BROTLI_COMMON_CONSTANTS_H_
|
||||||
|
#define BROTLI_COMMON_CONSTANTS_H_
|
||||||
|
|
||||||
|
/* Specification: 7.3. Encoding of the context map */
|
||||||
|
#define BROTLI_CONTEXT_MAP_MAX_RLE 16
|
||||||
|
|
||||||
|
/* Specification: 2. Compressed representation overview */
|
||||||
|
#define BROTLI_MAX_NUMBER_OF_BLOCK_TYPES 256
|
||||||
|
|
||||||
|
/* Specification: 3.3. Alphabet sizes: insert-and-copy length */
|
||||||
|
#define BROTLI_NUM_LITERAL_SYMBOLS 256
|
||||||
|
#define BROTLI_NUM_COMMAND_SYMBOLS 704
|
||||||
|
#define BROTLI_NUM_BLOCK_LEN_SYMBOLS 26
|
||||||
|
#define BROTLI_MAX_CONTEXT_MAP_SYMBOLS (BROTLI_MAX_NUMBER_OF_BLOCK_TYPES + \
|
||||||
|
BROTLI_CONTEXT_MAP_MAX_RLE)
|
||||||
|
#define BROTLI_MAX_BLOCK_TYPE_SYMBOLS (BROTLI_MAX_NUMBER_OF_BLOCK_TYPES + 2)
|
||||||
|
|
||||||
|
/* Specification: 3.5. Complex prefix codes */
|
||||||
|
#define BROTLI_REPEAT_PREVIOUS_CODE_LENGTH 16
|
||||||
|
#define BROTLI_REPEAT_ZERO_CODE_LENGTH 17
|
||||||
|
#define BROTLI_CODE_LENGTH_CODES (BROTLI_REPEAT_ZERO_CODE_LENGTH + 1)
|
||||||
|
/* "code length of 8 is repeated" */
|
||||||
|
#define BROTLI_INITIAL_REPEATED_CODE_LENGTH 8
|
||||||
|
|
||||||
|
/* Specification: 4. Encoding of distances */
|
||||||
|
#define BROTLI_NUM_DISTANCE_SHORT_CODES 16
|
||||||
|
#define BROTLI_MAX_NPOSTFIX 3
|
||||||
|
#define BROTLI_MAX_NDIRECT 120
|
||||||
|
#define BROTLI_MAX_DISTANCE_BITS 24U
|
||||||
|
/* BROTLI_NUM_DISTANCE_SYMBOLS == 520 */
|
||||||
|
#define BROTLI_NUM_DISTANCE_SYMBOLS (BROTLI_NUM_DISTANCE_SHORT_CODES + \
|
||||||
|
BROTLI_MAX_NDIRECT + \
|
||||||
|
(BROTLI_MAX_DISTANCE_BITS << \
|
||||||
|
(BROTLI_MAX_NPOSTFIX + 1)))
|
||||||
|
|
||||||
|
/* 7.1. Context modes and context ID lookup for literals */
|
||||||
|
/* "context IDs for literals are in the range of 0..63" */
|
||||||
|
#define BROTLI_LITERAL_CONTEXT_BITS 6
|
||||||
|
|
||||||
|
/* 7.2. Context ID for distances */
|
||||||
|
#define BROTLI_DISTANCE_CONTEXT_BITS 2
|
||||||
|
|
||||||
|
/* 9.1. Format of the Stream Header */
|
||||||
|
/* Number of slack bytes for window size. Don't confuse
|
||||||
|
with BROTLI_NUM_DISTANCE_SHORT_CODES. */
|
||||||
|
#define BROTLI_WINDOW_GAP 16
|
||||||
|
#define BROTLI_MAX_BACKWARD_LIMIT(W) (((size_t)1 << (W)) - BROTLI_WINDOW_GAP)
|
||||||
|
|
||||||
|
#endif /* BROTLI_COMMON_CONSTANTS_H_ */
|
|
@ -0,0 +1,64 @@
|
||||||
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Collection of static dictionary words. */
|
||||||
|
|
||||||
|
#ifndef BROTLI_COMMON_DICTIONARY_H_
|
||||||
|
#define BROTLI_COMMON_DICTIONARY_H_
|
||||||
|
|
||||||
|
#include <brotli/port.h>
|
||||||
|
#include <brotli/types.h>
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct BrotliDictionary {
|
||||||
|
/**
|
||||||
|
* Number of bits to encode index of dictionary word in a bucket.
|
||||||
|
*
|
||||||
|
* Specification: Appendix A. Static Dictionary Data
|
||||||
|
*
|
||||||
|
* Words in a dictionary are bucketed by length.
|
||||||
|
* @c 0 means that there are no words of a given length.
|
||||||
|
* Dictionary consists of words with length of [4..24] bytes.
|
||||||
|
* Values at [0..3] and [25..31] indices should not be addressed.
|
||||||
|
*/
|
||||||
|
const uint8_t size_bits_by_length[32];
|
||||||
|
|
||||||
|
/* assert(offset[i + 1] == offset[i] + (bits[i] ? (i << bits[i]) : 0)) */
|
||||||
|
const uint32_t offsets_by_length[32];
|
||||||
|
|
||||||
|
/* assert(data_size == offsets_by_length[31]) */
|
||||||
|
const size_t data_size;
|
||||||
|
|
||||||
|
/* Data array is not bound, and should obey to size_bits_by_length values.
|
||||||
|
Specified size matches default (RFC 7932) dictionary. Its size is
|
||||||
|
defined by data_size */
|
||||||
|
const uint8_t* data;
|
||||||
|
} BrotliDictionary;
|
||||||
|
|
||||||
|
BROTLI_COMMON_API extern const BrotliDictionary* BrotliGetDictionary(void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets dictionary data.
|
||||||
|
*
|
||||||
|
* When dictionary data is already set / present, this method is no-op.
|
||||||
|
*
|
||||||
|
* Dictionary data MUST be provided before BrotliGetDictionary is invoked.
|
||||||
|
* This method is used ONLY in multi-client environment (e.g. C + Java),
|
||||||
|
* to reduce storage by sharing single dictionary between implementations.
|
||||||
|
*/
|
||||||
|
BROTLI_COMMON_API void BrotliSetDictionaryData(const uint8_t* data);
|
||||||
|
|
||||||
|
#define BROTLI_MIN_DICTIONARY_WORD_LENGTH 4
|
||||||
|
#define BROTLI_MAX_DICTIONARY_WORD_LENGTH 24
|
||||||
|
|
||||||
|
#if defined(__cplusplus) || defined(c_plusplus)
|
||||||
|
} /* extern "C" */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* BROTLI_COMMON_DICTIONARY_H_ */
|
|
@ -0,0 +1,19 @@
|
||||||
|
/* Copyright 2016 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Distributed under MIT license.
|
||||||
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Version definition. */
|
||||||
|
|
||||||
|
#ifndef BROTLI_COMMON_VERSION_H_
|
||||||
|
#define BROTLI_COMMON_VERSION_H_
|
||||||
|
|
||||||
|
/* This macro should only be used when library is compiled together with client.
|
||||||
|
If library is dynamically linked, use BrotliDecoderVersion and
|
||||||
|
BrotliEncoderVersion methods. */
|
||||||
|
|
||||||
|
/* Semantic version, calculated as (MAJOR << 24) | (MINOR << 12) | PATCH */
|
||||||
|
#define BROTLI_VERSION 0x1000000
|
||||||
|
|
||||||
|
#endif /* BROTLI_COMMON_VERSION_H_ */
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue