diff --git a/Gopkg.lock b/Gopkg.lock index d57f4d53..87b1f74b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,14 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + digest = "1:3d3a509c5ba327e8573bb57f9da8430c63a46a06886eb1d2ffc8af4e76f31c72" + name = "cloud.google.com/go" + packages = ["civil"] + pruneopts = "UT" + revision = "cdaaf98f9226c39dc162b8e55083b2fbc67b4674" + version = "v0.43.0" + [[projects]] digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761" name = "github.com/BurntSushi/toml" @@ -9,6 +17,14 @@ revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" version = "v0.3.1" +[[projects]] + digest = "1:c84a587136cb69cecc11f3dbe9f9001444044c0dba74997b07f7e4c150b07cda" + name = "github.com/DATA-DOG/go-sqlmock" + packages = ["."] + pruneopts = "UT" + revision = "3f9954f6f6697845b082ca57995849ddf614f450" + version = "v1.3.3" + [[projects]] digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" @@ -39,6 +55,14 @@ pruneopts = "UT" revision = "333127dbecfcc23a8db7d9a4f52785d23aff44a1" +[[projects]] + branch = "master" + digest = "1:05756b73bf7eae03d6f6c5f0ae83de42ce296e7d83336563dd9ce2072106d5e6" + name = "github.com/cloudflare/golz4" + packages = ["."] + pruneopts = "UT" + revision = "ef862a3cdc58a6f1fee4e3af3d44fbe279194cde" + [[projects]] digest = "1:3f9506ee991cdee1f05bf0cd3e34b5cd922dc00d6a950fb4beb4e07ab1c4d3d1" name = "github.com/coredns/coredns" @@ -73,11 +97,12 @@ version = "v1.2.0" [[projects]] - digest = "1:6f70106e7bc1c803e8a0a4519e09c12d154771acfa2559206e97b033bbd1dd38" + branch = "v2" + digest = "1:ca6ce07b0d28c6044411b9c966cd845233e27c2ff91d08a3a869631f329c9918" name = "github.com/coreos/go-oidc" - packages = ["jose"] + packages = ["."] pruneopts = "UT" - revision = "a93f71fdfe73d2c0f5413c0565eea0af6523a6df" + revision = "274971e2c94cd6fb17614979e2899edbc81af146" [[projects]] digest = "1:1da3a221f0bc090792d3a2a080ff09008427c0e0f0533a4ed6abd8994421da73" @@ -97,11 +122,15 @@ [[projects]] branch = "master" - digest = "1:c013ffc6e15f9f898078f9d38441c68b228aa7b899659452170250ccb27f5f1e" - name = "github.com/elgs/gosqljson" - packages = ["."] + digest = "1:27cbe99893a40975358d20736d80adc95ef395d3bf5923f802f3de259533c94a" + name = "github.com/denisenkom/go-mssqldb" + packages = [ + ".", + "internal/cp", + "internal/querytext", + ] pruneopts = "UT" - revision = "027aa4915315a0b2825c0f025cea347829b974fa" + revision = "11b2859924c1e3c3da056fd4e0976b93883f3545" [[projects]] digest = "1:d4268b2a09b1f736633577c4ac93f2a5356c73742fff5344e2451aeec60a7ad0" @@ -125,6 +154,14 @@ pruneopts = "UT" revision = "75cf19382434e82df4dd84953f566b8ad23d6e9e" +[[projects]] + digest = "1:296cda2c4a6a7a54964d7b3b0815e2aed215da75217b72033060ffb1c4e4b6ae" + name = "github.com/felixge/httpsnoop" + packages = ["."] + pruneopts = "UT" + revision = "eadd4fad6aac69ae62379194fe0219f3dbc80fd3" + version = "v1.0.0" + [[projects]] branch = "master" digest = "1:50a46ab1d5edbbdd55125b4d37f1bf503d0807c26461f9ad7b358d6006641d09" @@ -140,6 +177,14 @@ pruneopts = "UT" revision = "ed7bcb39ff10f39ab08e317ce16df282845852fa" +[[projects]] + digest = "1:ec6f9bf5e274c833c911923c9193867f3f18788c461f76f05f62bb1510e0ae65" + name = "github.com/go-sql-driver/mysql" + packages = ["."] + pruneopts = "UT" + revision = "72cd26f257d44c1114970e19afddcd812016007e" + version = "v1.4.1" + [[projects]] branch = "master" digest = "1:3e6afc3ed8a72949aa735c00fddc23427dc9384ccfd51cf0d91a412e668da632" @@ -148,6 +193,14 @@ pruneopts = "UT" revision = "604e922904d35e97f98a774db7881f049cd8d970" +[[projects]] + branch = "master" + digest = "1:01745416ed734d0dbd8f2a25344536e399ee60319654873da12b107e9e3cb309" + name = "github.com/golang/gddo" + packages = ["httputil/header"] + pruneopts = "UT" + revision = "af0f2af80721261f4d211b2c9563f7b46b2aab06" + [[projects]] digest = "1:239c4c7fd2159585454003d9be7207167970194216193a8a210b8d29576f19c9" name = "github.com/golang/protobuf" @@ -194,6 +247,17 @@ pruneopts = "UT" revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746" +[[projects]] + digest = "1:6c41d4f998a03b6604227ccad36edaed6126c397e5d78709ef4814a1145a6757" + name = "github.com/jmoiron/sqlx" + packages = [ + ".", + "reflectx", + ] + pruneopts = "UT" + revision = "d161d7a76b5661016ad0b085869f77fd410f3e6a" + version = "v1.2.0" + [[projects]] digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" name = "github.com/konsorten/go-windows-terminal-sequences" @@ -203,7 +267,26 @@ version = "v1.0.2" [[projects]] - digest = "1:bc1c0be40c67b6b4aee09d7508d5a2a52c1c116b1fa43806dad2b0d6b4d4003b" + digest = "1:177a18252d40bf39a867876456854717780b0b1212fdae231d31e93b6f885d80" + name = "github.com/kshvakov/clickhouse" + packages = [ + ".", + "lib/binary", + "lib/cityhash102", + "lib/column", + "lib/data", + "lib/leakypool", + "lib/lz4", + "lib/protocol", + "lib/types", + "lib/writebuffer", + ] + pruneopts = "UT" + revision = "43e176a8d165376662f4d67203c71cc8a027c86b" + version = "v1.3.9" + +[[projects]] + digest = "1:12cb143f2148bf54bcd9fe622abac17325e85eeb1d84b8ec6caf1c80232108fd" name = "github.com/lib/pq" packages = [ ".", @@ -211,8 +294,8 @@ "scram", ] pruneopts = "UT" - revision = "51e2106eed1cea199c802d2a49e91e2491b02056" - version = "v1.1.0" + revision = "3427c32cb71afc948325f299f040e53c1dd78979" + version = "v1.2.0" [[projects]] digest = "1:2fa7b0155cd54479a755c629de26f888a918e13f8857a2c442205d825368e084" @@ -230,6 +313,14 @@ revision = "c2a7a6ca930a4cd0bc33a3f298eb71960732a3a7" version = "v0.0.7" +[[projects]] + digest = "1:79e87abf06b873987dee86598950f5b51732ac454d5a5cab6445a14330e6c9e3" + name = "github.com/mattn/go-sqlite3" + packages = ["."] + pruneopts = "UT" + revision = "b612a2feea6aa87c6d052d9086572551df06497e" + version = "v1.11.0" + [[projects]] digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" name = "github.com/matttproud/golang_protobuf_extensions" @@ -265,6 +356,14 @@ revision = "af06845cf3004701891bf4fdb884bfe4920b3727" version = "v1.1.0" +[[projects]] + digest = "1:dd3d50b46f8d3c88a8d3986f2e5344db582c457469a7cb3a0dda953c669cdf9a" + name = "github.com/mitchellh/go-server-timing" + packages = ["."] + pruneopts = "UT" + revision = "1ca01db910d57d64ec1c68f27105614b77558e28" + version = "v1.0.0" + [[projects]] digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" name = "github.com/mitchellh/mapstructure" @@ -301,6 +400,17 @@ revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" +[[projects]] + branch = "master" + digest = "1:bd9efe4e0b0f768302a1e2f0c22458149278de533e521206e5ddc71848c269a0" + name = "github.com/pquerna/cachecontrol" + packages = [ + ".", + "cacheobject", + ] + pruneopts = "UT" + revision = "1555304b9b35fdd2b425bccf1a5613677705e7d0" + [[projects]] digest = "1:c968b29db5d68ec97de404b6d058d5937fa015a141b3b4f7a0d87d5f8226f04c" name = "github.com/prometheus/client_golang" @@ -369,7 +479,15 @@ [[projects]] branch = "master" - digest = "1:a84d5ec8b40a827962ea250f2cf03434138ccae9d83fcac12fb49b70c70b80cc" + digest = "1:cac263b8eb2e9ad2327b70e03be1de1227040bfb3be26b49c4b56cbed568afc7" + name = "github.com/xo/dburl" + packages = ["."] + pruneopts = "UT" + revision = "98997a05b24fc069c79fa13b05ccf376c22a326c" + +[[projects]] + branch = "master" + digest = "1:1a9391a80fe548ffcf90aee8ae35bfb0b188198b57a3fe2ef5bb099ba4b4f610" name = "golang.org/x/crypto" packages = [ "curve25519", @@ -377,8 +495,10 @@ "ed25519/internal/edwards25519", "internal/chacha20", "internal/subtle", + "md4", "nacl/box", "nacl/secretbox", + "pbkdf2", "poly1305", "salsa20/salsa", "ssh", @@ -389,11 +509,12 @@ [[projects]] branch = "master" - digest = "1:52d140f7ab52e491cc1cbc93e6637aa5e9a7f3beae7545d675b02e52ca9d7290" + digest = "1:6b3dbfffba73ea0d8131ecb5738fe87f6addc4c7b73154b9d58e2693ad6a6256" name = "golang.org/x/net" packages = [ "bpf", "context", + "context/ctxhttp", "http/httpguts", "http2", "http2/hpack", @@ -409,6 +530,17 @@ pruneopts = "UT" revision = "1da14a5a36f220ea3f03470682b737b1dfd5de22" +[[projects]] + branch = "master" + digest = "1:8d1c112fb1679fa097e9a9255a786ee47383fa2549a3da71bcb1334a693ebcfe" + name = "golang.org/x/oauth2" + packages = [ + ".", + "internal", + ] + pruneopts = "UT" + revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33" + [[projects]] digest = "1:39ebcc2b11457b703ae9ee2e8cca0f68df21969c6102cb3b705f76cca0ea0239" name = "golang.org/x/sync" @@ -455,6 +587,23 @@ revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" +[[projects]] + digest = "1:7e8b9c5ae49011b12ae8473834ac1a7bb8ac029ba201270c723e4c280c9e4855" + name = "google.golang.org/appengine" + packages = [ + "cloudsql", + "internal", + "internal/base", + "internal/datastore", + "internal/log", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] + pruneopts = "UT" + revision = "b2f4a3cf3c67576a2ee09e1fe62656a5086ce880" + version = "v1.6.1" + [[projects]] branch = "master" digest = "1:c3076e7defee87de1236f1814beb588f40a75544c60121e6eb38b3b3721783e2" @@ -504,6 +653,26 @@ revision = "236199dd5f8031d698fb64091194aecd1c3895b2" version = "v1.20.0" +[[projects]] + branch = "v1" + digest = "1:6f70106e7bc1c803e8a0a4519e09c12d154771acfa2559206e97b033bbd1dd38" + name = "gopkg.in/coreos/go-oidc.v1" + packages = ["jose"] + pruneopts = "UT" + revision = "e860bd55bfa7d7cb35d30d26a167982584f616b0" + +[[projects]] + digest = "1:8c05919580be8a5be668709d7e5a69d5cd19b8ee9f23d62ce5b10d3457bf6a13" + name = "gopkg.in/square/go-jose.v2" + packages = [ + ".", + "cipher", + "json", + ] + pruneopts = "UT" + revision = "730df5f748271903322feb182be83b43ebbbe27d" + version = "v2.3.1" + [[projects]] branch = "altsrc-parse-durations" digest = "1:0370b1bceda03dbfade3abbde639a43f1113bab711ec760452e5c0dcc0c14787" @@ -552,6 +721,7 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "github.com/DATA-DOG/go-sqlmock", "github.com/cloudflare/brotli-go", "github.com/cloudflare/golibs/lrucache", "github.com/coredns/coredns/core/dnsserver", @@ -561,20 +731,25 @@ "github.com/coredns/coredns/plugin/pkg/dnstest", "github.com/coredns/coredns/plugin/pkg/rcode", "github.com/coredns/coredns/request", - "github.com/coreos/go-oidc/jose", + "github.com/coreos/go-oidc", "github.com/coreos/go-systemd/daemon", - "github.com/elgs/gosqljson", + "github.com/denisenkom/go-mssqldb", "github.com/equinox-io/equinox", "github.com/facebookgo/grace/gracenet", "github.com/getsentry/raven-go", + "github.com/go-sql-driver/mysql", "github.com/golang-collections/collections/queue", "github.com/google/uuid", "github.com/gorilla/mux", "github.com/gorilla/websocket", + "github.com/jmoiron/sqlx", + "github.com/kshvakov/clickhouse", "github.com/lib/pq", "github.com/mattn/go-colorable", + "github.com/mattn/go-sqlite3", "github.com/miekg/dns", "github.com/mitchellh/go-homedir", + "github.com/mitchellh/go-server-timing", "github.com/mitchellh/mapstructure", "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", @@ -583,6 +758,7 @@ "github.com/sirupsen/logrus", "github.com/stretchr/testify/assert", "github.com/stretchr/testify/require", + "github.com/xo/dburl", "golang.org/x/crypto/nacl/box", "golang.org/x/crypto/ssh", "golang.org/x/crypto/ssh/terminal", @@ -597,6 +773,7 @@ "golang.org/x/sys/windows/svc", "golang.org/x/sys/windows/svc/eventlog", "golang.org/x/sys/windows/svc/mgr", + "gopkg.in/coreos/go-oidc.v1/jose", "gopkg.in/urfave/cli.v2", "gopkg.in/urfave/cli.v2/altsrc", "zombiezen.com/go/capnproto2", diff --git a/Gopkg.toml b/Gopkg.toml index 319d4f8b..da88ce70 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -6,8 +6,6 @@ name = "github.com/cloudflare/brotli-go" unused-packages = false - - [[constraint]] name = "github.com/facebookgo/grace" revision = "75cf19382434e82df4dd84953f566b8ad23d6e9e" @@ -70,10 +68,6 @@ name = "github.com/mholt/caddy" revision = "d3b731e9255b72d4571a5aac125634cf1b6031dc" -[[constraint]] - name = "github.com/coreos/go-oidc" - revision = "a93f71fdfe73d2c0f5413c0565eea0af6523a6df" - [[constraint]] name = "golang.org/x/crypto" branch = "master" # master required by github.com/miekg/dns @@ -89,3 +83,35 @@ [[constraint]] name = "github.com/mitchellh/mapstructure" version = "1.1.2" + +[[constraint]] + name = "github.com/jmoiron/sqlx" + version = "1.2.0" + +[[constraint]] + name = "github.com/go-sql-driver/mysql" + version = "1.4.1" + +[[constraint]] + name = "github.com/mattn/go-sqlite3" + version = "1.11.0" + +[[constraint]] + branch = "master" + name = "github.com/denisenkom/go-mssqldb" + +[[constraint]] + name = "github.com/kshvakov/clickhouse" + version = "1.3.9" + +[[constraint]] + name = "github.com/DATA-DOG/go-sqlmock" + version = "1.3.3" + +[[constraint]] + branch = "master" + name = "github.com/xo/dburl" + +[[constraint]] + branch = "v2" + name = "github.com/coreos/go-oidc" diff --git a/cfsetup.yaml b/cfsetup.yaml index ee2a6514..1f3aa8c3 100644 --- a/cfsetup.yaml +++ b/cfsetup.yaml @@ -93,3 +93,14 @@ stretch: &stretch - make test jessie: *stretch + +# cfsetup compose +default-stack: test_dbconnect +test_dbconnect: + compose: + up-args: + - --renew-anon-volumes + - --abort-on-container-exit + - --exit-code-from=cloudflared + files: + - dbconnect/integration_test/dbconnect.yaml \ No newline at end of file diff --git a/cmd/cloudflared/token/token.go b/cmd/cloudflared/token/token.go index 45baa186..93c7c62b 100644 --- a/cmd/cloudflared/token/token.go +++ b/cmd/cloudflared/token/token.go @@ -14,7 +14,7 @@ import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/transfer" "github.com/cloudflare/cloudflared/log" "github.com/cloudflare/cloudflared/origin" - "github.com/coreos/go-oidc/jose" + "gopkg.in/coreos/go-oidc.v1/jose" ) const ( @@ -26,15 +26,15 @@ var logger = log.CreateLogger() type lock struct { lockFilePath string backoff *origin.BackoffHandler - sigHandler *signalHandler + sigHandler *signalHandler } type signalHandler struct { - sigChannel chan os.Signal - signals []os.Signal + sigChannel chan os.Signal + signals []os.Signal } -func (s *signalHandler) register(handler func()){ +func (s *signalHandler) register(handler func()) { s.sigChannel = make(chan os.Signal, 1) signal.Notify(s.sigChannel, s.signals...) go func(s *signalHandler) { @@ -59,8 +59,8 @@ func newLock(path string) *lock { return &lock{ lockFilePath: lockPath, backoff: &origin.BackoffHandler{MaxRetries: 7}, - sigHandler: &signalHandler{ - signals: []os.Signal{syscall.SIGINT, syscall.SIGTERM}, + sigHandler: &signalHandler{ + signals: []os.Signal{syscall.SIGINT, syscall.SIGTERM}, }, } } @@ -68,8 +68,8 @@ func newLock(path string) *lock { func (l *lock) Acquire() error { // Intercept SIGINT and SIGTERM to release lock before exiting l.sigHandler.register(func() { - l.deleteLockFile() - os.Exit(0) + l.deleteLockFile() + os.Exit(0) }) // Check for a path.lock file diff --git a/cmd/cloudflared/tunnel/cmd.go b/cmd/cloudflared/tunnel/cmd.go index f6f027c3..20409045 100644 --- a/cmd/cloudflared/tunnel/cmd.go +++ b/cmd/cloudflared/tunnel/cmd.go @@ -7,11 +7,12 @@ import ( "net" "net/url" "os" + "reflect" "runtime/trace" "sync" - "syscall" "time" + "github.com/cloudflare/cloudflared/dbconnect" "github.com/cloudflare/cloudflared/tunnelrpc/pogs" "github.com/cloudflare/cloudflared/connection" @@ -19,12 +20,10 @@ import ( "github.com/google/uuid" "github.com/getsentry/raven-go" - "golang.org/x/crypto/ssh/terminal" "github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" "github.com/cloudflare/cloudflared/cmd/cloudflared/updater" - "github.com/cloudflare/cloudflared/cmd/sqlgateway" "github.com/cloudflare/cloudflared/hello" "github.com/cloudflare/cloudflared/metrics" "github.com/cloudflare/cloudflared/origin" @@ -99,43 +98,7 @@ func Commands() []*cli.Command { ArgsUsage: " ", // can't be the empty string or we get the default output Hidden: false, }, - { - Name: "db", - Action: func(c *cli.Context) error { - tags := make(map[string]string) - tags["hostname"] = c.String("hostname") - raven.SetTagsContext(tags) - - fmt.Printf("\nSQL Database Password: ") - pass, err := terminal.ReadPassword(int(syscall.Stdin)) - if err != nil { - logger.Error(err) - } - - go sqlgateway.StartProxy(c, logger, string(pass)) - - raven.CapturePanic(func() { err = tunnel(c) }, nil) - if err != nil { - raven.CaptureError(err, nil) - } - return err - }, - Before: Before, - Usage: "SQL Gateway is an SQL over HTTP reverse proxy", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "db", - Value: true, - Usage: "Enable the SQL Gateway Proxy", - }, - &cli.StringFlag{ - Name: "address", - Value: "", - Usage: "Database connection string: db://user:pass", - }, - }, - Hidden: true, - }, + dbConnectCmd(), } var subcommands []*cli.Command @@ -559,6 +522,60 @@ func addPortIfMissing(uri *url.URL, port int) string { return fmt.Sprintf("%s:%d", uri.Hostname(), port) } +func dbConnectCmd() *cli.Command { + cmd := dbconnect.Cmd() + + // Append the tunnel commands so users can customize the daemon settings. + cmd.Flags = appendFlags(Flags(), cmd.Flags...) + + // Override before to run tunnel validation before dbconnect validation. + cmd.Before = func(c *cli.Context) error { + err := Before(c) + if err == nil { + err = dbconnect.CmdBefore(c) + } + return err + } + + // Override action to setup the Proxy, then if successful, start the tunnel daemon. + cmd.Action = func(c *cli.Context) error { + err := dbconnect.CmdAction(c) + if err == nil { + err = tunnel(c) + } + return err + } + + return cmd +} + +// appendFlags will append extra flags to a slice of flags. +// +// The cli package will panic if two flags exist with the same name, +// so if extraFlags contains a flag that was already defined, modify the +// original flags to use the extra version. +func appendFlags(flags []cli.Flag, extraFlags ...cli.Flag) []cli.Flag { + for _, extra := range extraFlags { + var found bool + + // Check if an extra flag overrides an existing flag. + for i, flag := range flags { + if reflect.DeepEqual(extra.Names(), flag.Names()) { + flags[i] = extra + found = true + break + } + } + + // Append the extra flag if it has nothing to override. + if !found { + flags = append(flags, extra) + } + } + + return flags +} + func tunnelFlags(shouldHide bool) []cli.Flag { return []cli.Flag{ &cli.StringFlag{ diff --git a/cmd/sqlgateway/sqlgateway.go b/cmd/sqlgateway/sqlgateway.go deleted file mode 100644 index cf73fab8..00000000 --- a/cmd/sqlgateway/sqlgateway.go +++ /dev/null @@ -1,148 +0,0 @@ -package sqlgateway - -import ( - "database/sql" - "encoding/json" - "fmt" - "math/rand" - "net/http" - "strings" - "time" - - _ "github.com/lib/pq" - cli "gopkg.in/urfave/cli.v2" - - "github.com/elgs/gosqljson" - - "github.com/gorilla/mux" - "github.com/sirupsen/logrus" -) - -type Message struct { - Connection Connection `json:"connection"` - Command string `json:"command"` - Params []interface{} `json:"params"` -} - -type Connection struct { - SSLMode string `json:"sslmode"` - Token string `json:"token"` -} - -type Response struct { - Columns []string `json:"columns"` - Rows [][]string `json:"rows"` - Error string `json:"error"` -} - -type Proxy struct { - Context *cli.Context - Router *mux.Router - Token string - User string - Password string - Driver string - Database string - Logger *logrus.Logger -} - -func StartProxy(c *cli.Context, logger *logrus.Logger, password string) error { - proxy := NewProxy(c, logger, password) - - logger.Infof("Starting SQL Gateway Proxy on port %s", strings.Split(c.String("url"), ":")[1]) - - err := http.ListenAndServe(":"+strings.Split(c.String("url"), ":")[1], proxy.Router) - if err != nil { - return err - } - - return nil -} - -func randID(n int, c *cli.Context) string { - charBytes := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890") - b := make([]byte, n) - for i := range b { - b[i] = charBytes[rand.Intn(len(charBytes))] - } - return fmt.Sprintf("%s&%s", c.String("hostname"), b) -} - -// db://user@dbname -func parseInfo(input string) (string, string, string) { - p1 := strings.Split(input, "://") - p2 := strings.Split(p1[1], "@") - return p1[0], p2[0], p2[1] -} - -func NewProxy(c *cli.Context, logger *logrus.Logger, pass string) *Proxy { - rand.Seed(time.Now().UnixNano()) - driver, user, dbname := parseInfo(c.String("address")) - proxy := Proxy{ - Context: c, - Router: mux.NewRouter(), - Token: randID(64, c), - Logger: logger, - User: user, - Password: pass, - Database: dbname, - Driver: driver, - } - - logger.Info(fmt.Sprintf(` - - -------------------- - SQL Gateway Proxy - Token: %s - -------------------- - - `, proxy.Token)) - - proxy.Router.HandleFunc("/", proxy.proxyRequest).Methods("POST") - return &proxy -} - -func (proxy *Proxy) proxyRequest(rw http.ResponseWriter, req *http.Request) { - var message Message - response := Response{} - - err := json.NewDecoder(req.Body).Decode(&message) - if err != nil { - proxy.Logger.Error(err) - http.Error(rw, fmt.Sprintf("400 - %s", err.Error()), http.StatusBadRequest) - return - } - - if message.Connection.Token != proxy.Token { - proxy.Logger.Error("Invalid token") - http.Error(rw, "400 - Invalid token", http.StatusBadRequest) - return - } - - connStr := fmt.Sprintf("user=%s password=%s dbname=%s sslmode=%s", proxy.User, proxy.Password, proxy.Database, message.Connection.SSLMode) - - db, err := sql.Open(proxy.Driver, connStr) - defer db.Close() - - if err != nil { - proxy.Logger.Error(err) - http.Error(rw, fmt.Sprintf("400 - %s", err.Error()), http.StatusBadRequest) - return - - } else { - proxy.Logger.Info("Forwarding SQL: ", message.Command) - rw.Header().Set("Content-Type", "application/json") - - headers, data, err := gosqljson.QueryDbToArray(db, "lower", message.Command, message.Params...) - - if err != nil { - proxy.Logger.Error(err) - http.Error(rw, fmt.Sprintf("400 - %s", err.Error()), http.StatusBadRequest) - return - - } else { - response = Response{headers, data, ""} - } - } - json.NewEncoder(rw).Encode(response) -} diff --git a/dbconnect/client.go b/dbconnect/client.go new file mode 100644 index 00000000..13aaf7ca --- /dev/null +++ b/dbconnect/client.go @@ -0,0 +1,145 @@ +package dbconnect + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +// Client is an interface to talk to any database. +// +// Currently, the only implementation is SQLClient, but its structure +// should be designed to handle a MongoClient or RedisClient in the future. +type Client interface { + Ping(context.Context) error + Submit(context.Context, *Command) (interface{}, error) +} + +// NewClient creates a database client based on its URL scheme. +func NewClient(ctx context.Context, originURL *url.URL) (Client, error) { + return NewSQLClient(ctx, originURL) +} + +// Command is a standard, non-vendor format for submitting database commands. +// +// When determining the scope of this struct, refer to the following litmus test: +// Could this (roughly) conform to SQL, Document-based, and Key-value command formats? +type Command struct { + Statement string `json:"statement"` + Arguments Arguments `json:"arguments,omitempty"` + Mode string `json:"mode,omitempty"` + Isolation string `json:"isolation,omitempty"` + Timeout time.Duration `json:"timeout,omitempty"` +} + +// Validate enforces the contract of Command: non empty statement (both in length and logic), +// lowercase mode and isolation, non-zero timeout, and valid Arguments. +func (cmd *Command) Validate() error { + if cmd.Statement == "" { + return fmt.Errorf("cannot provide an empty statement") + } + + if strings.Map(func(char rune) rune { + if char == ';' || unicode.IsSpace(char) { + return -1 + } + return char + }, cmd.Statement) == "" { + return fmt.Errorf("cannot provide a statement with no logic: '%s'", cmd.Statement) + } + + cmd.Mode = strings.ToLower(cmd.Mode) + cmd.Isolation = strings.ToLower(cmd.Isolation) + + if cmd.Timeout.Nanoseconds() <= 0 { + cmd.Timeout = 24 * time.Hour + } + + return cmd.Arguments.Validate() +} + +// UnmarshalJSON converts a byte representation of JSON into a Command, which is also validated. +func (cmd *Command) UnmarshalJSON(data []byte) error { + // Alias is required to avoid infinite recursion from the default UnmarshalJSON. + type Alias Command + alias := &struct { + *Alias + }{ + Alias: (*Alias)(cmd), + } + + err := json.Unmarshal(data, &alias) + if err == nil { + err = cmd.Validate() + } + + return err +} + +// Arguments is a wrapper for either map-based or array-based Command arguments. +// +// Each field is mutually-exclusive and some Client implementations may not +// support both fields (eg. MySQL does not accept named arguments). +type Arguments struct { + Named map[string]interface{} + Positional []interface{} +} + +// Validate enforces the contract of Arguments: non nil, mutually exclusive, and no empty or reserved keys. +func (args *Arguments) Validate() error { + if args.Named == nil { + args.Named = map[string]interface{}{} + } + if args.Positional == nil { + args.Positional = []interface{}{} + } + + if len(args.Named) > 0 && len(args.Positional) > 0 { + return fmt.Errorf("both named and positional arguments cannot be specified: %+v and %+v", args.Named, args.Positional) + } + + for key := range args.Named { + if key == "" { + return fmt.Errorf("named arguments cannot contain an empty key: %+v", args.Named) + } + if !utf8.ValidString(key) { + return fmt.Errorf("named argument does not conform to UTF-8 encoding: %s", key) + } + if strings.HasPrefix(key, "_") { + return fmt.Errorf("named argument cannot start with a reserved keyword '_': %s", key) + } + if unicode.IsNumber([]rune(key)[0]) { + return fmt.Errorf("named argument cannot start with a number: %s", key) + } + } + + return nil +} + +// UnmarshalJSON converts a byte representation of JSON into Arguments, which is also validated. +func (args *Arguments) UnmarshalJSON(data []byte) error { + var obj interface{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + + named, ok := obj.(map[string]interface{}) + if ok { + args.Named = named + } else { + positional, ok := obj.([]interface{}) + if ok { + args.Positional = positional + } else { + return fmt.Errorf("arguments must either be an object {\"0\":\"val\"} or an array [\"val\"]: %s", string(data)) + } + } + + return args.Validate() +} diff --git a/dbconnect/client_test.go b/dbconnect/client_test.go new file mode 100644 index 00000000..39cd2a3c --- /dev/null +++ b/dbconnect/client_test.go @@ -0,0 +1,183 @@ +package dbconnect + +import ( + "encoding/json" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCommandValidateEmpty(t *testing.T) { + stmts := []string{ + "", + ";", + " \n\t", + ";\n;\t;", + } + + for _, stmt := range stmts { + cmd := Command{Statement: stmt} + + assert.Error(t, cmd.Validate(), stmt) + } +} + +func TestCommandValidateMode(t *testing.T) { + modes := []string{ + "", + "query", + "ExEc", + "PREPARE", + } + + for _, mode := range modes { + cmd := Command{Statement: "Ok", Mode: mode} + + assert.NoError(t, cmd.Validate(), mode) + assert.Equal(t, strings.ToLower(mode), cmd.Mode) + } +} + +func TestCommandValidateIsolation(t *testing.T) { + isos := []string{ + "", + "default", + "read_committed", + "SNAPshot", + } + + for _, iso := range isos { + cmd := Command{Statement: "Ok", Isolation: iso} + + assert.NoError(t, cmd.Validate(), iso) + assert.Equal(t, strings.ToLower(iso), cmd.Isolation) + } +} + +func TestCommandValidateTimeout(t *testing.T) { + cmd := Command{Statement: "Ok", Timeout: 0} + + assert.NoError(t, cmd.Validate()) + assert.NotZero(t, cmd.Timeout) + + cmd = Command{Statement: "Ok", Timeout: 1 * time.Second} + + assert.NoError(t, cmd.Validate()) + assert.Equal(t, 1*time.Second, cmd.Timeout) +} + +func TestCommandValidateArguments(t *testing.T) { + cmd := Command{Statement: "Ok", Arguments: Arguments{ + Named: map[string]interface{}{"key": "val"}, + Positional: []interface{}{"val"}, + }} + + assert.Error(t, cmd.Validate()) +} + +func TestCommandUnmarshalJSON(t *testing.T) { + strs := []string{ + "{\"statement\":\"Ok\"}", + "{\"statement\":\"Ok\",\"arguments\":[0, 3.14, \"apple\"],\"mode\":\"query\"}", + "{\"statement\":\"Ok\",\"isolation\":\"read_uncommitted\",\"timeout\":1000}", + } + + for _, str := range strs { + var cmd Command + assert.NoError(t, json.Unmarshal([]byte(str), &cmd), str) + } + + strs = []string{ + "", + "\"", + "{}", + "{\"argument\":{\"key\":\"val\"}}", + "{\"statement\":[\"Ok\"]}", + } + + for _, str := range strs { + var cmd Command + assert.Error(t, json.Unmarshal([]byte(str), &cmd), str) + } +} + +func TestArgumentsValidateNotNil(t *testing.T) { + args := Arguments{} + + assert.NoError(t, args.Validate()) + assert.NotNil(t, args.Named) + assert.NotNil(t, args.Positional) +} + +func TestArgumentsValidateMutuallyExclusive(t *testing.T) { + args := []Arguments{ + Arguments{}, + Arguments{Named: map[string]interface{}{"key": "val"}}, + Arguments{Positional: []interface{}{"val"}}, + } + + for _, arg := range args { + assert.NoError(t, arg.Validate()) + assert.False(t, len(arg.Named) > 0 && len(arg.Positional) > 0) + } + + args = []Arguments{ + Arguments{ + Named: map[string]interface{}{"key": "val"}, + Positional: []interface{}{"val"}, + }, + } + + for _, arg := range args { + assert.Error(t, arg.Validate()) + assert.True(t, len(arg.Named) > 0 && len(arg.Positional) > 0) + } +} + +func TestArgumentsValidateKeys(t *testing.T) { + keys := []string{ + "", + "_", + "_key", + "1", + "1key", + "\xf0\x28\x8c\xbc", // non-utf8 + } + + for _, key := range keys { + args := Arguments{Named: map[string]interface{}{key: "val"}} + + assert.Error(t, args.Validate(), key) + } +} + +func TestArgumentsUnmarshalJSON(t *testing.T) { + strs := []string{ + "{}", + "{\"key\":\"val\"}", + "{\"key\":[1, 3.14, {\"key\":\"val\"}]}", + "[]", + "[\"key\",\"val\"]", + "[{}]", + } + + for _, str := range strs { + var args Arguments + assert.NoError(t, json.Unmarshal([]byte(str), &args), str) + } + + strs = []string{ + "", + "\"", + "1", + "\"key\"", + "{\"key\",\"val\"}", + } + + for _, str := range strs { + var args Arguments + assert.Error(t, json.Unmarshal([]byte(str), &args), str) + } +} diff --git a/dbconnect/cmd.go b/dbconnect/cmd.go new file mode 100644 index 00000000..e24fec4d --- /dev/null +++ b/dbconnect/cmd.go @@ -0,0 +1,157 @@ +package dbconnect + +import ( + "context" + "log" + "net" + "strconv" + + "gopkg.in/urfave/cli.v2" + "gopkg.in/urfave/cli.v2/altsrc" +) + +// Cmd is the entrypoint command for dbconnect. +// +// The tunnel package is responsible for appending this to tunnel.Commands(). +func Cmd() *cli.Command { + return &cli.Command{ + Category: "Database Connect (ALPHA)", + Name: "db-connect", + Usage: "Access your SQL database from Cloudflare Workers or the browser", + ArgsUsage: " ", + Description: ` + Creates a connection between your database and the Cloudflare edge. + Now you can execute SQL commands anywhere you can send HTTPS requests. + + Connect your database with any of the following commands, you can also try the "playground" without a database: + + cloudflared db-connect --hostname sql.mysite.com --url postgres://user:pass@localhost?sslmode=disable \ + --auth-domain mysite.cloudflareaccess.com --application-aud my-access-policy-tag + cloudflared db-connect --hostname sql-dev.mysite.com --url mysql://localhost --insecure + cloudflared db-connect --playground + + Requests should be authenticated using Cloudflare Access, learn more about how to enable it here: + + https://developers.cloudflare.com/access/service-auth/service-token/ + `, + Flags: []cli.Flag{ + altsrc.NewStringFlag(&cli.StringFlag{ + Name: "url", + Usage: "URL to the database (eg. postgres://user:pass@localhost?sslmode=disable)", + EnvVars: []string{"TUNNEL_URL"}, + }), + altsrc.NewStringFlag(&cli.StringFlag{ + Name: "hostname", + Usage: "Hostname to accept commands over HTTPS (eg. sql.mysite.com)", + EnvVars: []string{"TUNNEL_HOSTNAME"}, + }), + altsrc.NewStringFlag(&cli.StringFlag{ + Name: "auth-domain", + Usage: "Cloudflare Access authentication domain for your account (eg. mysite.cloudflareaccess.com)", + EnvVars: []string{"TUNNEL_ACCESS_AUTH_DOMAIN"}, + }), + altsrc.NewStringFlag(&cli.StringFlag{ + Name: "application-aud", + Usage: "Cloudflare Access application \"AUD\" to verify JWTs from requests", + EnvVars: []string{"TUNNEL_ACCESS_APPLICATION_AUD"}, + }), + altsrc.NewBoolFlag(&cli.BoolFlag{ + Name: "insecure", + Usage: "Disable authentication, the database will be open to the Internet", + Value: false, + EnvVars: []string{"TUNNEL_ACCESS_INSECURE"}, + }), + altsrc.NewBoolFlag(&cli.BoolFlag{ + Name: "playground", + Usage: "Run a temporary, in-memory SQLite3 database for testing", + Value: false, + EnvVars: []string{"TUNNEL_HELLO_WORLD"}, + }), + altsrc.NewStringFlag(&cli.StringFlag{ + Name: "loglevel", + Value: "debug", // Make it more verbose than the tunnel default 'info'. + EnvVars: []string{"TUNNEL_LOGLEVEL"}, + Hidden: true, + }), + }, + Before: CmdBefore, + Action: CmdAction, + Hidden: true, + } +} + +// CmdBefore runs some validation checks before running the command. +func CmdBefore(c *cli.Context) error { + // Show the help text is no flags are specified. + if c.NumFlags() == 0 { + return cli.ShowSubcommandHelp(c) + } + + // Hello-world and playground are synonymous with each other, + // unset hello-world to prevent tunnel from initializing the hello package. + if c.IsSet("hello-world") { + c.Set("playground", "true") + c.Set("hello-world", "false") + } + + // Unix-socket database urls are supported, but the logic is the same as url. + if c.IsSet("unix-socket") { + c.Set("url", c.String("unix-socket")) + c.Set("unix-socket", "") + } + + // When playground mode is enabled, run with an in-memory database. + if c.IsSet("playground") { + c.Set("url", "sqlite3::memory:?cache=shared") + c.Set("insecure", strconv.FormatBool(!c.IsSet("auth-domain") && !c.IsSet("application-aud"))) + } + + // At this point, insecure configurations are valid. + if c.Bool("insecure") { + return nil + } + + // Ensure that secure configurations specify a hostname, domain, and tag for JWT validation. + if !c.IsSet("hostname") || !c.IsSet("auth-domain") || !c.IsSet("application-aud") { + log.Fatal("must specify --hostname, --auth-domain, and --application-aud unless you want to run in --insecure mode") + } + + return nil +} + +// CmdAction starts the Proxy and sets the url in cli.Context to point to the Proxy address. +func CmdAction(c *cli.Context) error { + // STOR-612: sync with context in tunnel daemon. + ctx := context.Background() + + var proxy *Proxy + var err error + if c.Bool("insecure") { + proxy, err = NewInsecureProxy(ctx, c.String("url")) + } else { + proxy, err = NewSecureProxy(ctx, c.String("url"), c.String("auth-domain"), c.String("application-aud")) + } + + if err != nil { + log.Fatal(err) + return err + } + + listenerC := make(chan net.Listener) + defer close(listenerC) + + // Since the Proxy should only talk to the tunnel daemon, find the next available + // localhost port and start to listen to requests. + go func() { + err := proxy.Start(ctx, "127.0.0.1:", listenerC) + if err != nil { + log.Fatal(err) + } + }() + + // Block until the the Proxy is online, retreive its address, and change the url to point to it. + // This is effectively "handing over" control to the tunnel package so it can run the tunnel daemon. + c.Set("url", "https://"+(<-listenerC).Addr().String()) + + return nil +} diff --git a/dbconnect/cmd_test.go b/dbconnect/cmd_test.go new file mode 100644 index 00000000..e8b415f0 --- /dev/null +++ b/dbconnect/cmd_test.go @@ -0,0 +1,27 @@ +package dbconnect + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "gopkg.in/urfave/cli.v2" +) + +func TestCmd(t *testing.T) { + tests := [][]string{ + {"cloudflared", "db-connect", "--playground"}, + {"cloudflared", "db-connect", "--playground", "--hostname", "sql.mysite.com"}, + {"cloudflared", "db-connect", "--url", "sqlite3::memory:?cache=shared", "--insecure"}, + {"cloudflared", "db-connect", "--url", "sqlite3::memory:?cache=shared", "--hostname", "sql.mysite.com", "--auth-domain", "mysite.cloudflareaccess.com", "--application-aud", "aud"}, + } + + app := &cli.App{ + Name: "cloudflared", + Commands: []*cli.Command{Cmd()}, + } + + for _, test := range tests { + assert.NoError(t, app.Run(test)) + } +} diff --git a/dbconnect/integration_test/dbconnect.yaml b/dbconnect/integration_test/dbconnect.yaml new file mode 100644 index 00000000..d549d054 --- /dev/null +++ b/dbconnect/integration_test/dbconnect.yaml @@ -0,0 +1,78 @@ +# docker-compose -f ./dbconnect/integration_test/dbconnect.yaml up --build --force-recreate --renew-anon-volumes --exit-code-from cloudflared + +version: "2.3" +networks: + test-dbconnect-network: + driver: bridge +services: + cloudflared: + build: + context: ../../ + dockerfile: dev.Dockerfile + command: go test github.com/cloudflare/cloudflared/dbconnect/integration_test -v + depends_on: + postgres: + condition: service_healthy + mysql: + condition: service_healthy + mssql: + condition: service_healthy + clickhouse: + condition: service_healthy + environment: + DBCONNECT_INTEGRATION_TEST: "true" + POSTGRESQL_URL: postgres://postgres:secret@postgres/db?sslmode=disable + MYSQL_URL: mysql://root:secret@mysql/db?tls=false + MSSQL_URL: mssql://sa:secret12345!@mssql + CLICKHOUSE_URL: clickhouse://clickhouse:9000/db + networks: + - test-dbconnect-network + postgres: + image: postgres:11.4-alpine + environment: + POSTGRES_DB: db + POSTGRES_PASSWORD: secret + healthcheck: + test: ["CMD", "pg_isready", "-U", "postgres"] + start_period: 3s + interval: 1s + timeout: 3s + retries: 10 + networks: + - test-dbconnect-network + mysql: + image: mysql:8.0 + environment: + MYSQL_DATABASE: db + MYSQL_ROOT_PASSWORD: secret + healthcheck: + test: ["CMD", "mysqladmin", "ping"] + start_period: 3s + interval: 1s + timeout: 3s + retries: 10 + networks: + - test-dbconnect-network + mssql: + image: mcr.microsoft.com/mssql/server:2017-CU8-ubuntu + environment: + ACCEPT_EULA: "Y" + SA_PASSWORD: secret12345! + healthcheck: + test: ["CMD", "/opt/mssql-tools/bin/sqlcmd", "-S", "localhost", "-U", "sa", "-P", "secret12345!", "-Q", "SELECT 1"] + start_period: 3s + interval: 1s + timeout: 3s + retries: 10 + networks: + - test-dbconnect-network + clickhouse: + image: yandex/clickhouse-server:19.11 + healthcheck: + test: ["CMD", "clickhouse-client", "--query", "SELECT 1"] + start_period: 3s + interval: 1s + timeout: 3s + retries: 10 + networks: + - test-dbconnect-network diff --git a/dbconnect/integration_test/sql_test.go b/dbconnect/integration_test/sql_test.go new file mode 100644 index 00000000..2e91b2bc --- /dev/null +++ b/dbconnect/integration_test/sql_test.go @@ -0,0 +1,265 @@ +package dbconnect_test + +import ( + "context" + "log" + "net/url" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/cloudflare/cloudflared/dbconnect" +) + +func TestIntegrationPostgreSQL(t *testing.T) { + ctx, pq := helperNewSQLClient(t, "POSTGRESQL_URL") + + err := pq.Ping(ctx) + assert.NoError(t, err) + + _, err = pq.Submit(ctx, &dbconnect.Command{ + Statement: "CREATE TABLE t (a TEXT, b UUID, c JSON, d INET[], e SERIAL);", + Mode: "exec", + }) + assert.NoError(t, err) + + _, err = pq.Submit(ctx, &dbconnect.Command{ + Statement: "INSERT INTO t VALUES ($1, $2, $3, $4);", + Mode: "exec", + Arguments: dbconnect.Arguments{ + Positional: []interface{}{ + "text", + "6b8d686d-bd8e-43bc-b09a-cfcbbe702c10", + "{\"bool\":true,\"array\":[\"a\", 1, 3.14],\"embed\":{\"num\":21}}", + []string{"1.1.1.1", "1.0.0.1"}, + }, + }, + }) + assert.NoError(t, err) + + _, err = pq.Submit(ctx, &dbconnect.Command{ + Statement: "UPDATE t SET b = NULL;", + Mode: "exec", + }) + assert.NoError(t, err) + + res, err := pq.Submit(ctx, &dbconnect.Command{ + Statement: "SELECT * FROM t;", + Mode: "query", + }) + assert.NoError(t, err) + assert.IsType(t, make([]map[string]interface{}, 0), res) + + actual := res.([]map[string]interface{})[0] + expected := map[string]interface{}{ + "a": "text", + "b": nil, + "c": map[string]interface{}{ + "bool": true, + "array": []interface{}{"a", float64(1), 3.14}, + "embed": map[string]interface{}{"num": float64(21)}, + }, + "d": "{1.1.1.1,1.0.0.1}", + "e": int64(1), + } + assert.EqualValues(t, expected, actual) + + _, err = pq.Submit(ctx, &dbconnect.Command{ + Statement: "DROP TABLE t;", + Mode: "exec", + }) + assert.NoError(t, err) +} + +func TestIntegrationMySQL(t *testing.T) { + ctx, my := helperNewSQLClient(t, "MYSQL_URL") + + err := my.Ping(ctx) + assert.NoError(t, err) + + _, err = my.Submit(ctx, &dbconnect.Command{ + Statement: "CREATE TABLE t (a CHAR, b TINYINT, c FLOAT, d JSON, e YEAR);", + Mode: "exec", + }) + assert.NoError(t, err) + + _, err = my.Submit(ctx, &dbconnect.Command{ + Statement: "INSERT INTO t VALUES (?, ?, ?, ?, ?);", + Mode: "exec", + Arguments: dbconnect.Arguments{ + Positional: []interface{}{ + "a", + 10, + 3.14, + "{\"bool\":true}", + 2000, + }, + }, + }) + assert.NoError(t, err) + + _, err = my.Submit(ctx, &dbconnect.Command{ + Statement: "ALTER TABLE t ADD COLUMN f GEOMETRY;", + Mode: "exec", + }) + assert.NoError(t, err) + + res, err := my.Submit(ctx, &dbconnect.Command{ + Statement: "SELECT * FROM t;", + Mode: "query", + }) + assert.NoError(t, err) + assert.IsType(t, make([]map[string]interface{}, 0), res) + + actual := res.([]map[string]interface{})[0] + expected := map[string]interface{}{ + "a": "a", + "b": float64(10), + "c": 3.14, + "d": map[string]interface{}{"bool": true}, + "e": float64(2000), + "f": nil, + } + assert.EqualValues(t, expected, actual) + + _, err = my.Submit(ctx, &dbconnect.Command{ + Statement: "DROP TABLE t;", + Mode: "exec", + }) + assert.NoError(t, err) +} + +func TestIntegrationMSSQL(t *testing.T) { + ctx, ms := helperNewSQLClient(t, "MSSQL_URL") + + err := ms.Ping(ctx) + assert.NoError(t, err) + + _, err = ms.Submit(ctx, &dbconnect.Command{ + Statement: "CREATE TABLE t (a BIT, b DECIMAL, c MONEY, d TEXT);", + Mode: "exec"}) + assert.NoError(t, err) + + _, err = ms.Submit(ctx, &dbconnect.Command{ + Statement: "INSERT INTO t VALUES (?, ?, ?, ?);", + Mode: "exec", + Arguments: dbconnect.Arguments{ + Positional: []interface{}{ + 0, + 3, + "$0.99", + "text", + }, + }, + }) + assert.NoError(t, err) + + _, err = ms.Submit(ctx, &dbconnect.Command{ + Statement: "UPDATE t SET d = NULL;", + Mode: "exec", + }) + assert.NoError(t, err) + + res, err := ms.Submit(ctx, &dbconnect.Command{ + Statement: "SELECT * FROM t;", + Mode: "query", + }) + assert.NoError(t, err) + assert.IsType(t, make([]map[string]interface{}, 0), res) + + actual := res.([]map[string]interface{})[0] + expected := map[string]interface{}{ + "a": false, + "b": float64(3), + "c": float64(0.99), + "d": nil, + } + assert.EqualValues(t, expected, actual) + + _, err = ms.Submit(ctx, &dbconnect.Command{ + Statement: "DROP TABLE t;", + Mode: "exec", + }) + assert.NoError(t, err) +} + +func TestIntegrationClickhouse(t *testing.T) { + ctx, ch := helperNewSQLClient(t, "CLICKHOUSE_URL") + + err := ch.Ping(ctx) + assert.NoError(t, err) + + _, err = ch.Submit(ctx, &dbconnect.Command{ + Statement: "CREATE TABLE t (a UUID, b String, c Float64, d UInt32, e Int16, f Array(Enum8('a'=1, 'b'=2, 'c'=3))) engine=Memory;", + Mode: "exec", + }) + assert.NoError(t, err) + + _, err = ch.Submit(ctx, &dbconnect.Command{ + Statement: "INSERT INTO t VALUES (?, ?, ?, ?, ?, ?);", + Mode: "exec", + Arguments: dbconnect.Arguments{ + Positional: []interface{}{ + "ec65f626-6f50-4c86-9628-6314ef1edacd", + "", + 3.14, + 314, + -144, + []string{"a", "b", "c"}, + }, + }, + }) + assert.NoError(t, err) + + res, err := ch.Submit(ctx, &dbconnect.Command{ + Statement: "SELECT * FROM t;", + Mode: "query", + }) + assert.NoError(t, err) + assert.IsType(t, make([]map[string]interface{}, 0), res) + + actual := res.([]map[string]interface{})[0] + expected := map[string]interface{}{ + "a": "ec65f626-6f50-4c86-9628-6314ef1edacd", + "b": "", + "c": float64(3.14), + "d": uint32(314), + "e": int16(-144), + "f": []string{"a", "b", "c"}, + } + assert.EqualValues(t, expected, actual) + + _, err = ch.Submit(ctx, &dbconnect.Command{ + Statement: "DROP TABLE t;", + Mode: "exec", + }) + assert.NoError(t, err) +} + +func helperNewSQLClient(t *testing.T, env string) (context.Context, dbconnect.Client) { + _, ok := os.LookupEnv("DBCONNECT_INTEGRATION_TEST") + if ok { + t.Helper() + } else { + t.SkipNow() + } + + val, ok := os.LookupEnv(env) + if !ok { + log.Fatalf("must provide database url as environment variable: %s", env) + } + + parsed, err := url.Parse(val) + if err != nil { + log.Fatalf("cannot provide invalid database url: %s=%s", env, val) + } + + ctx := context.Background() + client, err := dbconnect.NewSQLClient(ctx, parsed) + if err != nil { + log.Fatalf("could not start test client: %s", err) + } + + return ctx, client +} diff --git a/dbconnect/proxy.go b/dbconnect/proxy.go new file mode 100644 index 00000000..6a36b4bf --- /dev/null +++ b/dbconnect/proxy.go @@ -0,0 +1,274 @@ +package dbconnect + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "time" + + "github.com/cloudflare/cloudflared/hello" + "github.com/cloudflare/cloudflared/validation" + "github.com/gorilla/mux" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + timing "github.com/mitchellh/go-server-timing" +) + +// Proxy is an HTTP server that proxies requests to a Client. +type Proxy struct { + client Client + accessValidator *validation.Access + logger *logrus.Logger +} + +// NewInsecureProxy creates a Proxy that talks to a Client at an origin. +// +// In insecure mode, the Proxy will allow all Command requests. +func NewInsecureProxy(ctx context.Context, origin string) (*Proxy, error) { + originURL, err := url.Parse(origin) + if err != nil { + return nil, errors.Wrap(err, "must provide a valid database url") + } + + client, err := NewClient(ctx, originURL) + if err != nil { + return nil, err + } + + err = client.Ping(ctx) + if err != nil { + return nil, errors.Wrap(err, "could not connect to the database") + } + + return &Proxy{client, nil, logrus.New()}, nil +} + +// NewSecureProxy creates a Proxy that talks to a Client at an origin. +// +// In secure mode, the Proxy will reject any Command requests that are +// not authenticated by Cloudflare Access with a valid JWT. +func NewSecureProxy(ctx context.Context, origin, authDomain, applicationAUD string) (*Proxy, error) { + proxy, err := NewInsecureProxy(ctx, origin) + if err != nil { + return nil, err + } + + validator, err := validation.NewAccessValidator(ctx, authDomain, authDomain, applicationAUD) + if err != nil { + return nil, err + } + + proxy.accessValidator = validator + return proxy, err +} + +// IsInsecure gets whether the Proxy will accept a Command from any source. +func (proxy *Proxy) IsInsecure() bool { + return proxy.accessValidator == nil +} + +// IsAllowed checks whether a http.Request is allowed to receive data. +// +// By default, requests must pass through Cloudflare Access for authentication. +// If the proxy is explcitly set to insecure mode, all requests will be allowed. +func (proxy *Proxy) IsAllowed(r *http.Request, verbose ...bool) bool { + if proxy.IsInsecure() { + return true + } + + // Access and Tunnel should prevent bad JWTs from even reaching the origin, + // but validate tokens anyway as an abundance of caution. + err := proxy.accessValidator.ValidateRequest(r.Context(), r) + if err == nil { + return true + } + + // Warn administrators that invalid JWTs are being rejected. This is indicative + // of either a misconfiguration of the CLI or a massive failure of upstream systems. + if len(verbose) > 0 { + proxy.httpLog(r, err).Error("Failed JWT authentication") + } + + return false +} + +// Start the Proxy at a given address and notify the listener channel when the server is online. +func (proxy *Proxy) Start(ctx context.Context, addr string, listenerC chan<- net.Listener) error { + // STOR-611: use a seperate listener and consider web socket support. + httpListener, err := hello.CreateTLSListener(addr) + if err != nil { + return errors.Wrapf(err, "could not create listener at %s", addr) + } + + errC := make(chan error) + defer close(errC) + + // Starts the HTTP server and begins to serve requests. + go func() { + errC <- proxy.httpListen(ctx, httpListener) + }() + + // Continually ping the server until it comes online or 10 attempts fail. + go func() { + var err error + for i := 0; i < 10; i++ { + _, err = http.Get("http://" + httpListener.Addr().String()) + + // Once no error was detected, notify the listener channel and return. + if err == nil { + listenerC <- httpListener + return + } + + // Backoff between requests to ping the server. + <-time.After(1 * time.Second) + } + errC <- errors.Wrap(err, "took too long for the http server to start") + }() + + return <-errC +} + +// httpListen starts the httpServer and blocks until the context closes. +func (proxy *Proxy) httpListen(ctx context.Context, listener net.Listener) error { + httpServer := &http.Server{ + Addr: listener.Addr().String(), + Handler: timing.Middleware(proxy.httpRouter(), nil), + ReadTimeout: 10 * time.Second, + WriteTimeout: 60 * time.Second, + IdleTimeout: 60 * time.Second, + } + + go func() { + <-ctx.Done() + httpServer.Close() + listener.Close() + }() + + return httpServer.Serve(listener) +} + +// httpRouter creates a mux.Router for the Proxy. +func (proxy *Proxy) httpRouter() *mux.Router { + router := mux.NewRouter() + + router.HandleFunc("/ping", proxy.httpPing()).Methods("GET", "HEAD") + router.HandleFunc("/submit", proxy.httpSubmit()).Methods("POST") + + return router +} + +// httpPing tests the connection to the database. +// +// By default, this endpoint is unauthenticated to allow for health checks. +// To enable authentication, Cloudflare Access must be enabled on this route. +func (proxy *Proxy) httpPing() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + metric := timing.FromContext(ctx).NewMetric("db").Start() + err := proxy.client.Ping(ctx) + metric.Stop() + + if err == nil { + proxy.httpRespond(w, r, http.StatusOK, "") + } else { + proxy.httpRespondErr(w, r, http.StatusInternalServerError, err) + } + } +} + +// httpSubmit sends a command to the database and returns its response. +// +// By default, this endpoint will reject requests that do not pass through Cloudflare Access. +// To disable authentication, the --insecure flag must be specified in the command line. +func (proxy *Proxy) httpSubmit() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !proxy.IsAllowed(r, true) { + proxy.httpRespondErr(w, r, http.StatusForbidden, fmt.Errorf("")) + return + } + + var cmd Command + err := json.NewDecoder(r.Body).Decode(&cmd) + if err != nil { + proxy.httpRespondErr(w, r, http.StatusBadRequest, err) + return + } + + ctx := r.Context() + metric := timing.FromContext(ctx).NewMetric("db").Start() + data, err := proxy.client.Submit(ctx, &cmd) + metric.Stop() + + if err != nil { + proxy.httpRespondErr(w, r, http.StatusUnprocessableEntity, err) + return + } + + w.Header().Set("Content-type", "application/json") + err = json.NewEncoder(w).Encode(data) + if err != nil { + proxy.httpRespondErr(w, r, http.StatusInternalServerError, err) + } + } +} + +// httpRespond writes a status code and string response to the response writer. +func (proxy *Proxy) httpRespond(w http.ResponseWriter, r *http.Request, status int, message string) { + w.WriteHeader(status) + + // Only expose the message detail of the reponse if the request is not HEAD + // and the user is authenticated. For example, this prevents an unauthenticated + // failed health check from accidentally leaking sensitive information about the Client. + if r.Method != http.MethodHead && proxy.IsAllowed(r) { + if message == "" { + message = http.StatusText(status) + } + fmt.Fprint(w, message) + } +} + +// httpRespondErr is similar to httpRespond, except it formats errors to be more friendly. +func (proxy *Proxy) httpRespondErr(w http.ResponseWriter, r *http.Request, defaultStatus int, err error) { + status, err := httpError(defaultStatus, err) + + proxy.httpRespond(w, r, status, err.Error()) + proxy.httpLog(r, err).Warn("Database connect error") +} + +// httpLog returns a logrus.Entry that is formatted to output a request Cf-ray. +func (proxy *Proxy) httpLog(r *http.Request, err error) *logrus.Entry { + return proxy.logger.WithContext(r.Context()).WithField("CF-RAY", r.Header.Get("Cf-ray")).WithError(err) +} + +// httpError extracts common errors and returns an status code and friendly error. +func httpError(defaultStatus int, err error) (int, error) { + if err == nil { + return http.StatusNotImplemented, fmt.Errorf("error expected but found none") + } + + if err == io.EOF { + return http.StatusBadRequest, fmt.Errorf("request body cannot be empty") + } + + if err == context.DeadlineExceeded { + return http.StatusRequestTimeout, err + } + + _, ok := err.(net.Error) + if ok { + return http.StatusRequestTimeout, err + } + + if err == context.Canceled { + // Does not exist in Golang, but would be: http.StatusClientClosedWithoutResponse + return 444, err + } + + return defaultStatus, err +} diff --git a/dbconnect/proxy_test.go b/dbconnect/proxy_test.go new file mode 100644 index 00000000..daf120d3 --- /dev/null +++ b/dbconnect/proxy_test.go @@ -0,0 +1,238 @@ +package dbconnect + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/gorilla/mux" + + "github.com/stretchr/testify/assert" +) + +func TestNewInsecureProxy(t *testing.T) { + origins := []string{ + "", + ":/", + "http://localhost", + "tcp://localhost:9000?debug=true", + "mongodb://127.0.0.1", + } + + for _, origin := range origins { + proxy, err := NewInsecureProxy(context.Background(), origin) + + assert.Error(t, err) + assert.Empty(t, proxy) + } +} + +func TestProxyIsAllowed(t *testing.T) { + proxy := helperNewProxy(t) + req := httptest.NewRequest("GET", "https://1.1.1.1/ping", nil) + assert.True(t, proxy.IsAllowed(req)) + + proxy = helperNewProxy(t, true) + req.Header.Set("Cf-access-jwt-assertion", "xxx") + assert.False(t, proxy.IsAllowed(req)) +} + +func TestProxyStart(t *testing.T) { + proxy := helperNewProxy(t) + ctx := context.Background() + listenerC := make(chan net.Listener) + + err := proxy.Start(ctx, "1.1.1.1:", listenerC) + assert.Error(t, err) + + err = proxy.Start(ctx, "127.0.0.1:-1", listenerC) + assert.Error(t, err) + + ctx, cancel := context.WithTimeout(ctx, 0) + defer cancel() + + err = proxy.Start(ctx, "127.0.0.1:", listenerC) + assert.IsType(t, http.ErrServerClosed, err) +} + +func TestProxyHTTPRouter(t *testing.T) { + proxy := helperNewProxy(t) + router := proxy.httpRouter() + + tests := []struct { + path string + method string + valid bool + }{ + {"", "GET", false}, + {"/", "GET", false}, + {"/ping", "GET", true}, + {"/ping", "HEAD", true}, + {"/ping", "POST", false}, + {"/submit", "POST", true}, + {"/submit", "GET", false}, + {"/submit/extra", "POST", false}, + } + + for _, test := range tests { + match := &mux.RouteMatch{} + ok := router.Match(httptest.NewRequest(test.method, "https://1.1.1.1"+test.path, nil), match) + + assert.True(t, ok == test.valid, test.path) + } +} + +func TestProxyHTTPPing(t *testing.T) { + proxy := helperNewProxy(t) + + server := httptest.NewServer(proxy.httpPing()) + defer server.Close() + client := server.Client() + + res, err := client.Get(server.URL) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + assert.Equal(t, int64(2), res.ContentLength) + + res, err = client.Head(server.URL) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + assert.Equal(t, int64(-1), res.ContentLength) +} + +func TestProxyHTTPSubmit(t *testing.T) { + proxy := helperNewProxy(t) + + server := httptest.NewServer(proxy.httpSubmit()) + defer server.Close() + client := server.Client() + + tests := []struct { + input string + status int + output string + }{ + {"", http.StatusBadRequest, "request body cannot be empty"}, + {"{}", http.StatusBadRequest, "cannot provide an empty statement"}, + {"{\"statement\":\"Ok\"}", http.StatusUnprocessableEntity, "cannot provide invalid sql mode: ''"}, + {"{\"statement\":\"Ok\",\"mode\":\"query\"}", http.StatusUnprocessableEntity, "near \"Ok\": syntax error"}, + {"{\"statement\":\"CREATE TABLE t (a INT);\",\"mode\":\"exec\"}", http.StatusOK, "{\"last_insert_id\":0,\"rows_affected\":0}\n"}, + } + + for _, test := range tests { + res, err := client.Post(server.URL, "application/json", strings.NewReader(test.input)) + + assert.NoError(t, err) + assert.Equal(t, test.status, res.StatusCode) + if res.StatusCode > http.StatusOK { + assert.Equal(t, "text/plain; charset=utf-8", res.Header.Get("Content-type")) + } else { + assert.Equal(t, "application/json", res.Header.Get("Content-type")) + } + + data, err := ioutil.ReadAll(res.Body) + defer res.Body.Close() + str := string(data) + + assert.NoError(t, err) + assert.Equal(t, test.output, str) + } +} + +func TestProxyHTTPSubmitForbidden(t *testing.T) { + proxy := helperNewProxy(t, true) + + server := httptest.NewServer(proxy.httpSubmit()) + defer server.Close() + client := server.Client() + + res, err := client.Get(server.URL) + + assert.NoError(t, err) + assert.Equal(t, http.StatusForbidden, res.StatusCode) + assert.Zero(t, res.ContentLength) +} + +func TestProxyHTTPRespond(t *testing.T) { + proxy := helperNewProxy(t) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + proxy.httpRespond(w, r, http.StatusAccepted, "Hello") + })) + defer server.Close() + client := server.Client() + + res, err := client.Get(server.URL) + assert.NoError(t, err) + assert.Equal(t, http.StatusAccepted, res.StatusCode) + assert.Equal(t, int64(5), res.ContentLength) + + data, err := ioutil.ReadAll(res.Body) + defer res.Body.Close() + assert.Equal(t, []byte("Hello"), data) +} + +func TestProxyHTTPRespondForbidden(t *testing.T) { + proxy := helperNewProxy(t, true) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + proxy.httpRespond(w, r, http.StatusAccepted, "Hello") + })) + defer server.Close() + client := server.Client() + + res, err := client.Get(server.URL) + + assert.NoError(t, err) + assert.Equal(t, http.StatusAccepted, res.StatusCode) + assert.Equal(t, int64(0), res.ContentLength) +} + +func TestHTTPError(t *testing.T) { + _, errTimeout := net.DialTimeout("tcp", "127.0.0.1", 0) + assert.Error(t, errTimeout) + + tests := []struct { + input error + status int + output error + }{ + {nil, http.StatusNotImplemented, fmt.Errorf("error expected but found none")}, + {io.EOF, http.StatusBadRequest, fmt.Errorf("request body cannot be empty")}, + {context.DeadlineExceeded, http.StatusRequestTimeout, nil}, + {context.Canceled, 444, nil}, + {errTimeout, http.StatusRequestTimeout, nil}, + {fmt.Errorf(""), http.StatusInternalServerError, nil}, + } + + for _, test := range tests { + status, err := httpError(http.StatusInternalServerError, test.input) + + assert.Error(t, err) + assert.Equal(t, test.status, status) + if test.output == nil { + test.output = test.input + } + assert.Equal(t, test.output, err) + } +} + +func helperNewProxy(t *testing.T, secure ...bool) *Proxy { + t.Helper() + + proxy, err := NewSecureProxy(context.Background(), "file::memory:?cache=shared", "test.cloudflareaccess.com", "") + assert.NoError(t, err) + assert.NotNil(t, proxy) + + if len(secure) == 0 { + proxy.accessValidator = nil // Mark as insecure + } + + return proxy +} diff --git a/dbconnect/sql.go b/dbconnect/sql.go new file mode 100644 index 00000000..2bd5b56f --- /dev/null +++ b/dbconnect/sql.go @@ -0,0 +1,318 @@ +package dbconnect + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "net/url" + "reflect" + "strings" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "github.com/xo/dburl" + + // SQL drivers self-register with the database/sql package. + // https://github.com/golang/go/wiki/SQLDrivers + _ "github.com/denisenkom/go-mssqldb" + _ "github.com/go-sql-driver/mysql" + _ "github.com/mattn/go-sqlite3" + + "github.com/kshvakov/clickhouse" + "github.com/lib/pq" +) + +// SQLClient is a Client that talks to a SQL database. +type SQLClient struct { + Dialect string + driver *sqlx.DB +} + +// NewSQLClient creates a SQL client based on its URL scheme. +func NewSQLClient(ctx context.Context, originURL *url.URL) (Client, error) { + res, err := dburl.Parse(originURL.String()) + if err != nil { + helpText := fmt.Sprintf("supported drivers: %+q, see documentation for more details: %s", sql.Drivers(), "https://godoc.org/github.com/xo/dburl") + return nil, fmt.Errorf("could not parse sql database url '%s': %s\n%s", originURL, err.Error(), helpText) + } + + // Establishes the driver, but does not test the connection. + driver, err := sqlx.Open(res.Driver, res.DSN) + if err != nil { + return nil, fmt.Errorf("could not open sql driver %s: %s\n%s", res.Driver, err.Error(), res.DSN) + } + + // Closes the driver, will occur when the context finishes. + go func() { + <-ctx.Done() + driver.Close() + }() + + return &SQLClient{driver.DriverName(), driver}, nil +} + +// Ping verifies a connection to the database is still alive. +func (client *SQLClient) Ping(ctx context.Context) error { + return client.driver.PingContext(ctx) +} + +// Submit queries or executes a command to the SQL database. +func (client *SQLClient) Submit(ctx context.Context, cmd *Command) (interface{}, error) { + txx, err := cmd.ValidateSQL(client.Dialect) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, cmd.Timeout) + defer cancel() + + var res interface{} + + // Get the next available sql.Conn and submit the Command. + err = sqlConn(ctx, client.driver, txx, func(conn *sql.Conn) error { + stmt := cmd.Statement + args := cmd.Arguments.Positional + + if cmd.Mode == "query" { + res, err = sqlQuery(ctx, conn, stmt, args) + } else { + res, err = sqlExec(ctx, conn, stmt, args) + } + + return err + }) + + return res, err +} + +// ValidateSQL extends the contract of Command for SQL dialects: +// mode is conformed, arguments are []sql.NamedArg, and isolation is a sql.IsolationLevel. +// +// When the command should not be wrapped in a transaction, *sql.TxOptions and error will both be nil. +func (cmd *Command) ValidateSQL(dialect string) (*sql.TxOptions, error) { + err := cmd.Validate() + if err != nil { + return nil, err + } + + mode, err := sqlMode(cmd.Mode) + if err != nil { + return nil, err + } + + // Mutates Arguments to only use positional arguments with the type sql.NamedArg. + // This is a required by the sql.Driver before submitting arguments. + cmd.Arguments.sql(dialect) + + iso, err := sqlIsolation(cmd.Isolation) + if err != nil { + return nil, err + } + + // When isolation is out-of-range, this is indicative that no + // transaction should be executed and sql.TxOptions should be nil. + if iso < sql.LevelDefault { + return nil, nil + } + + // In query mode, execute the transaction in read-only, unless it's Microsoft SQL + // which does not support that type of transaction. + readOnly := mode == "query" && dialect != "mssql" + + return &sql.TxOptions{Isolation: iso, ReadOnly: readOnly}, nil +} + +// sqlConn gets the next available sql.Conn in the connection pool and runs a function to use it. +// +// If the transaction options are nil, run the useIt function outside a transaction. +// This is potentially an unsafe operation if the command does not clean up its state. +func sqlConn(ctx context.Context, driver *sqlx.DB, txx *sql.TxOptions, useIt func(*sql.Conn) error) error { + conn, err := driver.Conn(ctx) + if err != nil { + return err + } + defer conn.Close() + + // If transaction options are specified, begin and defer a rollback to catch errors. + var tx *sql.Tx + if txx != nil { + tx, err = conn.BeginTx(ctx, txx) + if err != nil { + return err + } + defer tx.Rollback() + } + + err = useIt(conn) + + // Check if useIt was successful and a transaction exists before committing. + if err == nil && tx != nil { + err = tx.Commit() + } + + return err +} + +// sqlQuery queries rows on a sql.Conn and returns an array of result objects. +func sqlQuery(ctx context.Context, conn *sql.Conn, stmt string, args []interface{}) ([]map[string]interface{}, error) { + rows, err := conn.QueryContext(ctx, stmt, args...) + if err == nil { + return sqlRows(rows) + } + return nil, err +} + +// sqlExec executes a command on a sql.Conn and returns the result of the operation. +func sqlExec(ctx context.Context, conn *sql.Conn, stmt string, args []interface{}) (sqlResult, error) { + exec, err := conn.ExecContext(ctx, stmt, args...) + if err == nil { + return sqlResultFrom(exec), nil + } + return sqlResult{}, err +} + +// sql mutates Arguments to contain a positional []sql.NamedArg. +// +// The actual return type is []interface{} due to the native Golang +// function signatures for sql.Exec and sql.Query being generic. +func (args *Arguments) sql(dialect string) { + result := args.Positional + + for i, val := range result { + result[i] = sqlArg("", val, dialect) + } + + for key, val := range args.Named { + result = append(result, sqlArg(key, val, dialect)) + } + + args.Positional = result + args.Named = map[string]interface{}{} +} + +// sqlArg creates a sql.NamedArg from a key-value pair and an optional dialect. +// +// Certain dialects will need to wrap objects, such as arrays, to conform its driver requirements. +func sqlArg(key, val interface{}, dialect string) sql.NamedArg { + switch reflect.ValueOf(val).Kind() { + + // PostgreSQL and Clickhouse require arrays to be wrapped before + // being inserted into the driver interface. + case reflect.Slice, reflect.Array: + switch dialect { + case "postgres": + val = pq.Array(val) + case "clickhouse": + val = clickhouse.Array(val) + } + } + + return sql.Named(fmt.Sprint(key), val) +} + +// sqlIsolation tries to match a string to a sql.IsolationLevel. +func sqlIsolation(str string) (sql.IsolationLevel, error) { + if str == "none" { + return sql.IsolationLevel(-1), nil + } + + for iso := sql.LevelDefault; ; iso++ { + if iso > sql.LevelLinearizable { + return -1, fmt.Errorf("cannot provide an invalid sql isolation level: '%s'", str) + } + + if str == "" || strings.EqualFold(iso.String(), strings.ReplaceAll(str, "_", " ")) { + return iso, nil + } + } +} + +// sqlMode tries to match a string to a command mode: 'query' or 'exec' for now. +func sqlMode(str string) (string, error) { + switch str { + case "query", "exec": + return str, nil + default: + return "", fmt.Errorf("cannot provide invalid sql mode: '%s'", str) + } +} + +// sqlRows scans through a SQL result set and returns an array of objects. +func sqlRows(rows *sql.Rows) ([]map[string]interface{}, error) { + columns, err := rows.Columns() + if err != nil { + return nil, errors.Wrap(err, "could not extract columns from result") + } + defer rows.Close() + + types, err := rows.ColumnTypes() + if err != nil { + // Some drivers do not support type extraction, so fail silently and continue. + types = make([]*sql.ColumnType, len(columns)) + } + + values := make([]interface{}, len(columns)) + pointers := make([]interface{}, len(columns)) + + var results []map[string]interface{} + for rows.Next() { + for i := range columns { + pointers[i] = &values[i] + } + rows.Scan(pointers...) + + // Convert a row, an array of values, into an object where + // each key is the name of its respective column. + entry := make(map[string]interface{}) + for i, col := range columns { + entry[col] = sqlValue(values[i], types[i]) + } + results = append(results, entry) + } + + return results, nil +} + +// sqlValue handles special cases where sql.Rows does not return a "human-readable" object. +func sqlValue(val interface{}, col *sql.ColumnType) interface{} { + bytes, ok := val.([]byte) + if ok { + // Opportunistically check for embeded JSON and convert it to a first-class object. + var embeded interface{} + if json.Unmarshal(bytes, &embeded) == nil { + return embeded + } + + // STOR-604: investigate a way to coerce PostgreSQL arrays '{a, b, ...}' into JSON. + // Although easy with strings, it becomes more difficult with special types like INET[]. + + return string(bytes) + } + + return val +} + +// sqlResult is a thin wrapper around sql.Result. +type sqlResult struct { + LastInsertId int64 `json:"last_insert_id"` + RowsAffected int64 `json:"rows_affected"` +} + +// sqlResultFrom converts sql.Result into a JSON-marshable sqlResult. +func sqlResultFrom(res sql.Result) sqlResult { + insertID, errID := res.LastInsertId() + rowsAffected, errRows := res.RowsAffected() + + // If an error occurs when extracting the result, it is because the + // driver does not support that specific field. Instead of passing this + // to the user, omit the field in the response. + if errID != nil { + insertID = -1 + } + if errRows != nil { + rowsAffected = -1 + } + + return sqlResult{insertID, rowsAffected} +} diff --git a/dbconnect/sql_test.go b/dbconnect/sql_test.go new file mode 100644 index 00000000..4445a784 --- /dev/null +++ b/dbconnect/sql_test.go @@ -0,0 +1,336 @@ +package dbconnect + +import ( + "context" + "database/sql" + "fmt" + "net/url" + "strings" + "testing" + "time" + + "github.com/kshvakov/clickhouse" + "github.com/lib/pq" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/stretchr/testify/assert" +) + +func TestNewSQLClient(t *testing.T) { + originURLs := []string{ + "postgres://localhost", + "cockroachdb://localhost:1337", + "postgresql://user:pass@127.0.0.1", + "mysql://localhost", + "clickhouse://127.0.0.1:9000/?debug", + "sqlite3::memory:", + "file:test.db?cache=shared", + } + + for _, originURL := range originURLs { + origin, _ := url.Parse(originURL) + _, err := NewSQLClient(context.Background(), origin) + + assert.NoError(t, err, originURL) + } + + originURLs = []string{ + "", + "/", + "http://localhost", + "coolthing://user:pass@127.0.0.1", + } + + for _, originURL := range originURLs { + origin, _ := url.Parse(originURL) + _, err := NewSQLClient(context.Background(), origin) + + assert.Error(t, err, originURL) + } +} + +func TestArgumentsSQL(t *testing.T) { + args := []Arguments{ + Arguments{ + Positional: []interface{}{ + "val", 10, 3.14, + }, + }, + Arguments{ + Named: map[string]interface{}{ + "key": time.Unix(0, 0), + }, + }, + } + + var nameType sql.NamedArg + for _, arg := range args { + arg.sql("") + for _, named := range arg.Positional { + assert.IsType(t, nameType, named) + } + } +} + +func TestSQLArg(t *testing.T) { + tests := []struct { + key interface{} + val interface{} + dialect string + arg sql.NamedArg + }{ + {"key", "val", "mssql", sql.Named("key", "val")}, + {0, 1, "sqlite3", sql.Named("0", 1)}, + {1, []string{"a", "b", "c"}, "postgres", sql.Named("1", pq.Array([]string{"a", "b", "c"}))}, + {"in", []uint{0, 1}, "clickhouse", sql.Named("in", clickhouse.Array([]uint{0, 1}))}, + {"", time.Unix(0, 0), "", sql.Named("", time.Unix(0, 0))}, + } + + for _, test := range tests { + arg := sqlArg(test.key, test.val, test.dialect) + assert.Equal(t, test.arg, arg, test.key) + } +} + +func TestSQLIsolation(t *testing.T) { + tests := []struct { + str string + iso sql.IsolationLevel + }{ + {"", sql.LevelDefault}, + {"DEFAULT", sql.LevelDefault}, + {"read_UNcommitted", sql.LevelReadUncommitted}, + {"serializable", sql.LevelSerializable}, + {"none", sql.IsolationLevel(-1)}, + {"SNAP shot", -2}, + {"blah", -2}, + } + + for _, test := range tests { + iso, err := sqlIsolation(test.str) + + if test.iso < -1 { + assert.Error(t, err, test.str) + } else { + assert.NoError(t, err) + assert.Equal(t, test.iso, iso, test.str) + } + } +} + +func TestSQLMode(t *testing.T) { + modes := []string{ + "query", + "exec", + } + + for _, mode := range modes { + actual, err := sqlMode(mode) + + assert.NoError(t, err) + assert.Equal(t, strings.ToLower(mode), actual, mode) + } + + modes = []string{ + "", + "blah", + } + + for _, mode := range modes { + _, err := sqlMode(mode) + + assert.Error(t, err) + } +} + +func helperRows(mockRows *sqlmock.Rows) *sql.Rows { + db, mock, _ := sqlmock.New() + mock.ExpectQuery("SELECT").WillReturnRows(mockRows) + rows, _ := db.Query("SELECT") + return rows +} + +func TestSQLRows(t *testing.T) { + actual, err := sqlRows(helperRows(sqlmock.NewRows( + []string{"name", "age", "dept"}). + AddRow("alice", 19, "prod"))) + expected := []map[string]interface{}{map[string]interface{}{ + "name": "alice", + "age": int64(19), + "dept": "prod"}} + + assert.NoError(t, err) + assert.ElementsMatch(t, expected, actual) +} + +func TestSQLValue(t *testing.T) { + tests := []struct { + input interface{} + output interface{} + }{ + {"hello", "hello"}, + {1, 1}, + {false, false}, + {[]byte("random"), "random"}, + {[]byte("{\"json\":true}"), map[string]interface{}{"json": true}}, + {[]byte("[]"), []interface{}{}}, + } + + for _, test := range tests { + assert.Equal(t, test.output, sqlValue(test.input, nil), test.input) + } +} + +func TestSQLResultFrom(t *testing.T) { + res := sqlResultFrom(sqlmock.NewResult(1, 2)) + assert.Equal(t, sqlResult{1, 2}, res) + + res = sqlResultFrom(sqlmock.NewErrorResult(fmt.Errorf(""))) + assert.Equal(t, sqlResult{-1, -1}, res) +} + +func helperSQLite3(t *testing.T) (context.Context, Client) { + t.Helper() + + ctx := context.Background() + url, _ := url.Parse("file::memory:?cache=shared") + + sqlite3, err := NewSQLClient(ctx, url) + assert.NoError(t, err) + + return ctx, sqlite3 +} + +func TestPing(t *testing.T) { + ctx, sqlite3 := helperSQLite3(t) + err := sqlite3.Ping(ctx) + + assert.NoError(t, err) +} + +func TestSubmit(t *testing.T) { + ctx, sqlite3 := helperSQLite3(t) + + res, err := sqlite3.Submit(ctx, &Command{ + Statement: "CREATE TABLE t (a INTEGER, b FLOAT, c TEXT, d BLOB);", + Mode: "exec", + }) + assert.NoError(t, err) + assert.Equal(t, sqlResult{0, 0}, res) + + res, err = sqlite3.Submit(ctx, &Command{ + Statement: "SELECT * FROM t;", + Mode: "query", + }) + assert.NoError(t, err) + assert.Empty(t, res) + + res, err = sqlite3.Submit(ctx, &Command{ + Statement: "INSERT INTO t VALUES (?, ?, ?, ?);", + Mode: "exec", + Arguments: Arguments{ + Positional: []interface{}{ + 1, + 3.14, + "text", + "blob", + }, + }, + }) + assert.NoError(t, err) + assert.Equal(t, sqlResult{1, 1}, res) + + res, err = sqlite3.Submit(ctx, &Command{ + Statement: "UPDATE t SET c = NULL;", + Mode: "exec", + }) + assert.NoError(t, err) + assert.Equal(t, sqlResult{1, 1}, res) + + res, err = sqlite3.Submit(ctx, &Command{ + Statement: "SELECT * FROM t WHERE a = ?;", + Mode: "query", + Arguments: Arguments{ + Positional: []interface{}{1}, + }, + }) + assert.NoError(t, err) + assert.Len(t, res, 1) + + resf, ok := res.([]map[string]interface{}) + assert.True(t, ok) + assert.EqualValues(t, map[string]interface{}{ + "a": int64(1), + "b": 3.14, + "c": nil, + "d": "blob", + }, resf[0]) + + res, err = sqlite3.Submit(ctx, &Command{ + Statement: "DROP TABLE t;", + Mode: "exec", + }) + assert.NoError(t, err) + assert.Equal(t, sqlResult{1, 1}, res) +} + +func TestSubmitTransaction(t *testing.T) { + ctx, sqlite3 := helperSQLite3(t) + + res, err := sqlite3.Submit(ctx, &Command{ + Statement: "BEGIN;", + Mode: "exec", + }) + assert.Error(t, err) + assert.Empty(t, res) + + res, err = sqlite3.Submit(ctx, &Command{ + Statement: "BEGIN; CREATE TABLE tt (a INT); COMMIT;", + Mode: "exec", + Isolation: "none", + }) + assert.NoError(t, err) + assert.Equal(t, sqlResult{0, 0}, res) + + rows, err := sqlite3.Submit(ctx, &Command{ + Statement: "SELECT * FROM tt;", + Mode: "query", + Isolation: "repeatable_read", + }) + assert.NoError(t, err) + assert.Empty(t, rows) +} + +func TestSubmitTimeout(t *testing.T) { + ctx, sqlite3 := helperSQLite3(t) + + res, err := sqlite3.Submit(ctx, &Command{ + Statement: "SELECT * FROM t;", + Mode: "query", + Timeout: 1 * time.Nanosecond, + }) + assert.Error(t, err) + assert.Empty(t, res) +} + +func TestSubmitMode(t *testing.T) { + ctx, sqlite3 := helperSQLite3(t) + + res, err := sqlite3.Submit(ctx, &Command{ + Statement: "SELECT * FROM t;", + Mode: "notanoption", + }) + assert.Error(t, err) + assert.Empty(t, res) +} + +func TestSubmitEmpty(t *testing.T) { + ctx, sqlite3 := helperSQLite3(t) + + res, err := sqlite3.Submit(ctx, &Command{ + Statement: "; ; ; ;", + Mode: "query", + }) + assert.Error(t, err) + assert.Empty(t, res) +} diff --git a/dev.Dockerfile b/dev.Dockerfile new file mode 100644 index 00000000..dcf67365 --- /dev/null +++ b/dev.Dockerfile @@ -0,0 +1,4 @@ +FROM golang:1.12 as builder +WORKDIR /go/src/github.com/cloudflare/cloudflared/ +RUN apt-get update +COPY . . diff --git a/sshgen/sshgen.go b/sshgen/sshgen.go index 12538c84..ece05b9e 100644 --- a/sshgen/sshgen.go +++ b/sshgen/sshgen.go @@ -18,9 +18,9 @@ import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/config" cfpath "github.com/cloudflare/cloudflared/cmd/cloudflared/path" - "github.com/coreos/go-oidc/jose" homedir "github.com/mitchellh/go-homedir" gossh "golang.org/x/crypto/ssh" + "gopkg.in/coreos/go-oidc.v1/jose" ) const ( diff --git a/sshgen/sshgen_test.go b/sshgen/sshgen_test.go index d7094d14..51f4ee7e 100644 --- a/sshgen/sshgen_test.go +++ b/sshgen/sshgen_test.go @@ -16,8 +16,8 @@ import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/config" cfpath "github.com/cloudflare/cloudflared/cmd/cloudflared/path" - "github.com/coreos/go-oidc/jose" "github.com/stretchr/testify/assert" + "gopkg.in/coreos/go-oidc.v1/jose" ) const ( diff --git a/validation/validation.go b/validation/validation.go index 91e795fe..fb3fb5a2 100644 --- a/validation/validation.go +++ b/validation/validation.go @@ -1,6 +1,7 @@ package validation import ( + "context" "fmt" "net" "net/url" @@ -9,15 +10,21 @@ import ( "net/http" + "github.com/coreos/go-oidc" "github.com/pkg/errors" "golang.org/x/net/idna" ) -const defaultScheme = "http" +const ( + defaultScheme = "http" + accessDomain = "cloudflareaccess.com" + accessCertPath = "/cdn-cgi/access/certs" + accessJwtHeader = "Cf-access-jwt-assertion" +) var ( supportedProtocols = []string{"http", "https", "rdp"} - validationTimeout = time.Duration(30 * time.Second) + validationTimeout = time.Duration(30 * time.Second) ) func ValidateHostname(hostname string) (string, error) { @@ -197,3 +204,50 @@ func toggleProtocol(httpProtocol string) string { return httpProtocol } } + +// Access checks if a JWT from Cloudflare Access is valid. +type Access struct { + verifier *oidc.IDTokenVerifier +} + +func NewAccessValidator(ctx context.Context, domain, issuer, applicationAUD string) (*Access, error) { + domainURL, err := ValidateUrl(domain) + if err != nil { + return nil, err + } + + issuerURL, err := ValidateUrl(issuer) + if err != nil { + return nil, err + } + + // An issuerURL from Cloudflare Access will always use HTTPS. + issuerURL = strings.Replace(issuerURL, "http:", "https:", 1) + + keySet := oidc.NewRemoteKeySet(ctx, domainURL+accessCertPath) + return &Access{oidc.NewVerifier(issuerURL, keySet, &oidc.Config{ClientID: applicationAUD})}, nil +} + +func (a *Access) Validate(ctx context.Context, jwt string) error { + token, err := a.verifier.Verify(ctx, jwt) + + if err != nil { + return errors.Wrapf(err, "token is invalid: %s", jwt) + } + + // Perform extra sanity checks, just to be safe. + + if token == nil { + return fmt.Errorf("token is nil: %s", jwt) + } + + if !strings.HasSuffix(token.Issuer, accessDomain) { + return fmt.Errorf("token has non-cloudflare issuer of %s: %s", token.Issuer, jwt) + } + + return nil +} + +func (a *Access) ValidateRequest(ctx context.Context, r *http.Request) error { + return a.Validate(ctx, r.Header.Get(accessJwtHeader)) +} diff --git a/validation/validation_test.go b/validation/validation_test.go index 88147d41..6a7ec48b 100644 --- a/validation/validation_test.go +++ b/validation/validation_test.go @@ -320,6 +320,39 @@ func TestValidateHTTPService_NonResponsiveOrigin(t *testing.T) { } } +func TestNewAccessValidatorOk(t *testing.T) { + ctx := context.Background() + url := "test.cloudflareaccess.com" + access, err := NewAccessValidator(ctx, url, url, "") + + assert.NoError(t, err) + assert.NotNil(t, access) + + assert.Error(t, access.Validate(ctx, "")) + assert.Error(t, access.Validate(ctx, "invalid")) + + req := httptest.NewRequest("GET", "https://test.cloudflareaccess.com", nil) + req.Header.Set(accessJwtHeader, "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c") + assert.Error(t, access.ValidateRequest(ctx, req)) +} + +func TestNewAccessValidatorErr(t *testing.T) { + ctx := context.Background() + + urls := []string{ + "", + "tcp://test.cloudflareaccess.com", + "wss://cloudflarenone.com", + } + + for _, url := range urls { + access, err := NewAccessValidator(ctx, url, url, "") + + assert.Error(t, err, url) + assert.Nil(t, access) + } +} + type testRoundTripper func(req *http.Request) (*http.Response, error) func (f testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/civil/civil.go b/vendor/cloud.google.com/go/civil/civil.go new file mode 100644 index 00000000..29272ef2 --- /dev/null +++ b/vendor/cloud.google.com/go/civil/civil.go @@ -0,0 +1,277 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package civil + +import ( + "fmt" + "time" +) + +// A Date represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type Date struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// DateOf returns the Date in which a time occurs in that time's location. +func DateOf(t time.Time) Date { + var d Date + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseDate(s string) (Date, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return Date{}, err + } + return DateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d Date) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d Date) IsValid() bool { + return DateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.Date, even when time.Date returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d Date) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d Date) AddDays(n int) Date { + return DateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d Date) DaysSince(s Date) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 Date) Before(d2 Date) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 Date) After(d2 Date) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseDate. +func (d *Date) UnmarshalText(data []byte) error { + var err error + *d, err = ParseDate(string(data)) + return err +} + +// A Time represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type. +type Time struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// TimeOf returns the Time representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func TimeOf(t time.Time) Time { + var tm Time + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseTime parses a string and returns the time value it represents. +// ParseTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseTime(s string) (Time, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return Time{}, err + } + return TimeOf(t), nil +} + +// String returns the date in the format described in ParseTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t Time) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t Time) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return TimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t Time) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseTime. +func (t *Time) UnmarshalText(data []byte) error { + var err error + *t, err = ParseTime(string(data)) + return err +} + +// A DateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type DateTime struct { + Date Date + Time Time +} + +// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub. + +// DateTimeOf returns the DateTime in which a time occurs in that time's location. +func DateTimeOf(t time.Time) DateTime { + return DateTime{ + Date: DateOf(t), + Time: TimeOf(t), + } +} + +// ParseDateTime parses a string and returns the DateTime it represents. +// ParseDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseDateTime(s string) (DateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return DateTime{}, err + } + } + return DateTimeOf(t), nil +} + +// String returns the date in the format described in ParseDate. +func (dt DateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt DateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the DateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.Date. For example, if loc is America/Indiana/Vincennes, then +// both +// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.DateTime{ +// civil.Date{Year: 1955, Month: time.May, Day: 1}}, +// civil.Time{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt DateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 DateTime) Before(dt2 DateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 DateTime) After(dt2 DateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt DateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseDateTime +func (dt *DateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseDateTime(string(data)) + return err +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/.gitignore b/vendor/github.com/DATA-DOG/go-sqlmock/.gitignore new file mode 100644 index 00000000..e4001c08 --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/.gitignore @@ -0,0 +1,3 @@ +/examples/blog/blog +/examples/orders/orders +/examples/basic/basic diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/.travis.yml b/vendor/github.com/DATA-DOG/go-sqlmock/.travis.yml new file mode 100644 index 00000000..77ce421e --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/.travis.yml @@ -0,0 +1,25 @@ +language: go + +go_import_path: github.com/DATA-DOG/go-sqlmock + +go: + - 1.2.x + - 1.3.x + - 1.4 # has no cover tool for latest releases + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + +sudo: false + +script: + - go vet + - test -z "$(go fmt ./...)" # fail if not formatted properly + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/LICENSE b/vendor/github.com/DATA-DOG/go-sqlmock/LICENSE new file mode 100644 index 00000000..6ee063ce --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/LICENSE @@ -0,0 +1,28 @@ +The three clause BSD license (http://en.wikipedia.org/wiki/BSD_licenses) + +Copyright (c) 2013-2019, DATA-DOG team +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* The name DataDog.lt may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/README.md b/vendor/github.com/DATA-DOG/go-sqlmock/README.md new file mode 100644 index 00000000..5d8ad35f --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/README.md @@ -0,0 +1,259 @@ +[![Build Status](https://travis-ci.org/DATA-DOG/go-sqlmock.svg)](https://travis-ci.org/DATA-DOG/go-sqlmock) +[![GoDoc](https://godoc.org/github.com/DATA-DOG/go-sqlmock?status.svg)](https://godoc.org/github.com/DATA-DOG/go-sqlmock) +[![Go Report Card](https://goreportcard.com/badge/github.com/DATA-DOG/go-sqlmock)](https://goreportcard.com/report/github.com/DATA-DOG/go-sqlmock) +[![codecov.io](https://codecov.io/github/DATA-DOG/go-sqlmock/branch/master/graph/badge.svg)](https://codecov.io/github/DATA-DOG/go-sqlmock) + +# Sql driver mock for Golang + +**sqlmock** is a mock library implementing [sql/driver](https://godoc.org/database/sql/driver). Which has one and only +purpose - to simulate any **sql** driver behavior in tests, without needing a real database connection. It helps to +maintain correct **TDD** workflow. + +- this library is now complete and stable. (you may not find new changes for this reason) +- supports concurrency and multiple connections. +- supports **go1.8** Context related feature mocking and Named sql parameters. +- does not require any modifications to your source code. +- the driver allows to mock any sql driver method behavior. +- has strict by default expectation order matching. +- has no third party dependencies. + +**NOTE:** in **v1.2.0** **sqlmock.Rows** has changed to struct from interface, if you were using any type references to that +interface, you will need to switch it to a pointer struct type. Also, **sqlmock.Rows** were used to implement **driver.Rows** +interface, which was not required or useful for mocking and was removed. Hope it will not cause issues. + +## Install + + go get github.com/DATA-DOG/go-sqlmock + +## Documentation and Examples + +Visit [godoc](http://godoc.org/github.com/DATA-DOG/go-sqlmock) for general examples and public api reference. +See **.travis.yml** for supported **go** versions. +Different use case, is to functionally test with a real database - [go-txdb](https://github.com/DATA-DOG/go-txdb) +all database related actions are isolated within a single transaction so the database can remain in the same state. + +See implementation examples: + +- [blog API server](https://github.com/DATA-DOG/go-sqlmock/tree/master/examples/blog) +- [the same orders example](https://github.com/DATA-DOG/go-sqlmock/tree/master/examples/orders) + +### Something you may want to test, assuming you use the [go-mysql-driver](https://github.com/go-sql-driver/mysql) + +``` go +package main + +import ( + "database/sql" + + _ "github.com/go-sql-driver/mysql" +) + +func recordStats(db *sql.DB, userID, productID int64) (err error) { + tx, err := db.Begin() + if err != nil { + return + } + + defer func() { + switch err { + case nil: + err = tx.Commit() + default: + tx.Rollback() + } + }() + + if _, err = tx.Exec("UPDATE products SET views = views + 1"); err != nil { + return + } + if _, err = tx.Exec("INSERT INTO product_viewers (user_id, product_id) VALUES (?, ?)", userID, productID); err != nil { + return + } + return +} + +func main() { + // @NOTE: the real connection is not required for tests + db, err := sql.Open("mysql", "root@/blog") + if err != nil { + panic(err) + } + defer db.Close() + + if err = recordStats(db, 1 /*some user id*/, 5 /*some product id*/); err != nil { + panic(err) + } +} +``` + +### Tests with sqlmock + +``` go +package main + +import ( + "fmt" + "testing" + + "github.com/DATA-DOG/go-sqlmock" +) + +// a successful case +func TestShouldUpdateStats(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) + } + defer db.Close() + + mock.ExpectBegin() + mock.ExpectExec("UPDATE products").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectExec("INSERT INTO product_viewers").WithArgs(2, 3).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + + // now we execute our method + if err = recordStats(db, 2, 3); err != nil { + t.Errorf("error was not expected while updating stats: %s", err) + } + + // we make sure that all expectations were met + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +// a failing test case +func TestShouldRollbackStatUpdatesOnFailure(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("an error '%s' was not expected when opening a stub database connection", err) + } + defer db.Close() + + mock.ExpectBegin() + mock.ExpectExec("UPDATE products").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectExec("INSERT INTO product_viewers"). + WithArgs(2, 3). + WillReturnError(fmt.Errorf("some error")) + mock.ExpectRollback() + + // now we execute our method + if err = recordStats(db, 2, 3); err == nil { + t.Errorf("was expecting an error, but there was none") + } + + // we make sure that all expectations were met + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} +``` + +## Customize SQL query matching + +There were plenty of requests from users regarding SQL query string validation or different matching option. +We have now implemented the `QueryMatcher` interface, which can be passed through an option when calling +`sqlmock.New` or `sqlmock.NewWithDSN`. + +This now allows to include some library, which would allow for example to parse and validate `mysql` SQL AST. +And create a custom QueryMatcher in order to validate SQL in sophisticated ways. + +By default, **sqlmock** is preserving backward compatibility and default query matcher is `sqlmock.QueryMatcherRegexp` +which uses expected SQL string as a regular expression to match incoming query string. There is an equality matcher: +`QueryMatcherEqual` which will do a full case sensitive match. + +In order to customize the QueryMatcher, use the following: + +``` go + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) +``` + +The query matcher can be fully customized based on user needs. **sqlmock** will not +provide a standard sql parsing matchers, since various drivers may not follow the same SQL standard. + +## Matching arguments like time.Time + +There may be arguments which are of `struct` type and cannot be compared easily by value like `time.Time`. In this case +**sqlmock** provides an [Argument](https://godoc.org/github.com/DATA-DOG/go-sqlmock#Argument) interface which +can be used in more sophisticated matching. Here is a simple example of time argument matching: + +``` go +type AnyTime struct{} + +// Match satisfies sqlmock.Argument interface +func (a AnyTime) Match(v driver.Value) bool { + _, ok := v.(time.Time) + return ok +} + +func TestAnyTimeArgument(t *testing.T) { + t.Parallel() + db, mock, err := New() + if err != nil { + t.Errorf("an error '%s' was not expected when opening a stub database connection", err) + } + defer db.Close() + + mock.ExpectExec("INSERT INTO users"). + WithArgs("john", AnyTime{}). + WillReturnResult(NewResult(1, 1)) + + _, err = db.Exec("INSERT INTO users(name, created_at) VALUES (?, ?)", "john", time.Now()) + if err != nil { + t.Errorf("error '%s' was not expected, while inserting a row", err) + } + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} +``` + +It only asserts that argument is of `time.Time` type. + +## Run tests + + go test -race + +## Change Log + +- **2019-02-13** - added `go.mod` removed the references and suggestions using `gopkg.in`. +- **2018-12-11** - added expectation of Rows to be closed, while mocking expected query. +- **2018-12-11** - introduced an option to provide **QueryMatcher** in order to customize SQL query matching. +- **2017-09-01** - it is now possible to expect that prepared statement will be closed, + using **ExpectedPrepare.WillBeClosed**. +- **2017-02-09** - implemented support for **go1.8** features. **Rows** interface was changed to struct + but contains all methods as before and should maintain backwards compatibility. **ExpectedQuery.WillReturnRows** may now + accept multiple row sets. +- **2016-11-02** - `db.Prepare()` was not validating expected prepare SQL + query. It should still be validated even if Exec or Query is not + executed on that prepared statement. +- **2016-02-23** - added **sqlmock.AnyArg()** function to provide any kind + of argument matcher. +- **2016-02-23** - convert expected arguments to driver.Value as natural + driver does, the change may affect time.Time comparison and will be + stricter. See [issue](https://github.com/DATA-DOG/go-sqlmock/issues/31). +- **2015-08-27** - **v1** api change, concurrency support, all known issues fixed. +- **2014-08-16** instead of **panic** during reflect type mismatch when comparing query arguments - now return error +- **2014-08-14** added **sqlmock.NewErrorResult** which gives an option to return driver.Result with errors for +interface methods, see [issue](https://github.com/DATA-DOG/go-sqlmock/issues/5) +- **2014-05-29** allow to match arguments in more sophisticated ways, by providing an **sqlmock.Argument** interface +- **2014-04-21** introduce **sqlmock.New()** to open a mock database connection for tests. This method +calls sql.DB.Ping to ensure that connection is open, see [issue](https://github.com/DATA-DOG/go-sqlmock/issues/4). +This way on Close it will surely assert if all expectations are met, even if database was not triggered at all. +The old way is still available, but it is advisable to call db.Ping manually before asserting with db.Close. +- **2014-02-14** RowsFromCSVString is now a part of Rows interface named as FromCSVString. +It has changed to allow more ways to construct rows and to easily extend this API in future. +See [issue 1](https://github.com/DATA-DOG/go-sqlmock/issues/1) +**RowsFromCSVString** is deprecated and will be removed in future + +## Contributions + +Feel free to open a pull request. Note, if you wish to contribute an extension to public (exported methods or types) - +please open an issue before, to discuss whether these changes can be accepted. All backward incompatible changes are +and will be treated cautiously + +## License + +The [three clause BSD license](http://en.wikipedia.org/wiki/BSD_licenses) + diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/argument.go b/vendor/github.com/DATA-DOG/go-sqlmock/argument.go new file mode 100644 index 00000000..7727481a --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/argument.go @@ -0,0 +1,24 @@ +package sqlmock + +import "database/sql/driver" + +// Argument interface allows to match +// any argument in specific way when used with +// ExpectedQuery and ExpectedExec expectations. +type Argument interface { + Match(driver.Value) bool +} + +// AnyArg will return an Argument which can +// match any kind of arguments. +// +// Useful for time.Time or similar kinds of arguments. +func AnyArg() Argument { + return anyArgument{} +} + +type anyArgument struct{} + +func (a anyArgument) Match(_ driver.Value) bool { + return true +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/driver.go b/vendor/github.com/DATA-DOG/go-sqlmock/driver.go new file mode 100644 index 00000000..802f8fbe --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/driver.go @@ -0,0 +1,81 @@ +package sqlmock + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "sync" +) + +var pool *mockDriver + +func init() { + pool = &mockDriver{ + conns: make(map[string]*sqlmock), + } + sql.Register("sqlmock", pool) +} + +type mockDriver struct { + sync.Mutex + counter int + conns map[string]*sqlmock +} + +func (d *mockDriver) Open(dsn string) (driver.Conn, error) { + d.Lock() + defer d.Unlock() + + c, ok := d.conns[dsn] + if !ok { + return c, fmt.Errorf("expected a connection to be available, but it is not") + } + + c.opened++ + return c, nil +} + +// New creates sqlmock database connection and a mock to manage expectations. +// Accepts options, like ValueConverterOption, to use a ValueConverter from +// a specific driver. +// Pings db so that all expectations could be +// asserted. +func New(options ...func(*sqlmock) error) (*sql.DB, Sqlmock, error) { + pool.Lock() + dsn := fmt.Sprintf("sqlmock_db_%d", pool.counter) + pool.counter++ + + smock := &sqlmock{dsn: dsn, drv: pool, ordered: true} + pool.conns[dsn] = smock + pool.Unlock() + + return smock.open(options) +} + +// NewWithDSN creates sqlmock database connection with a specific DSN +// and a mock to manage expectations. +// Accepts options, like ValueConverterOption, to use a ValueConverter from +// a specific driver. +// Pings db so that all expectations could be asserted. +// +// This method is introduced because of sql abstraction +// libraries, which do not provide a way to initialize +// with sql.DB instance. For example GORM library. +// +// Note, it will error if attempted to create with an +// already used dsn +// +// It is not recommended to use this method, unless you +// really need it and there is no other way around. +func NewWithDSN(dsn string, options ...func(*sqlmock) error) (*sql.DB, Sqlmock, error) { + pool.Lock() + if _, ok := pool.conns[dsn]; ok { + pool.Unlock() + return nil, nil, fmt.Errorf("cannot create a new mock database with the same dsn: %s", dsn) + } + smock := &sqlmock{dsn: dsn, drv: pool, ordered: true} + pool.conns[dsn] = smock + pool.Unlock() + + return smock.open(options) +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/expectations.go b/vendor/github.com/DATA-DOG/go-sqlmock/expectations.go new file mode 100644 index 00000000..38c0d170 --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/expectations.go @@ -0,0 +1,355 @@ +package sqlmock + +import ( + "database/sql/driver" + "fmt" + "strings" + "sync" + "time" +) + +// an expectation interface +type expectation interface { + fulfilled() bool + Lock() + Unlock() + String() string +} + +// common expectation struct +// satisfies the expectation interface +type commonExpectation struct { + sync.Mutex + triggered bool + err error +} + +func (e *commonExpectation) fulfilled() bool { + return e.triggered +} + +// ExpectedClose is used to manage *sql.DB.Close expectation +// returned by *Sqlmock.ExpectClose. +type ExpectedClose struct { + commonExpectation +} + +// WillReturnError allows to set an error for *sql.DB.Close action +func (e *ExpectedClose) WillReturnError(err error) *ExpectedClose { + e.err = err + return e +} + +// String returns string representation +func (e *ExpectedClose) String() string { + msg := "ExpectedClose => expecting database Close" + if e.err != nil { + msg += fmt.Sprintf(", which should return error: %s", e.err) + } + return msg +} + +// ExpectedBegin is used to manage *sql.DB.Begin expectation +// returned by *Sqlmock.ExpectBegin. +type ExpectedBegin struct { + commonExpectation + delay time.Duration +} + +// WillReturnError allows to set an error for *sql.DB.Begin action +func (e *ExpectedBegin) WillReturnError(err error) *ExpectedBegin { + e.err = err + return e +} + +// String returns string representation +func (e *ExpectedBegin) String() string { + msg := "ExpectedBegin => expecting database transaction Begin" + if e.err != nil { + msg += fmt.Sprintf(", which should return error: %s", e.err) + } + return msg +} + +// WillDelayFor allows to specify duration for which it will delay +// result. May be used together with Context +func (e *ExpectedBegin) WillDelayFor(duration time.Duration) *ExpectedBegin { + e.delay = duration + return e +} + +// ExpectedCommit is used to manage *sql.Tx.Commit expectation +// returned by *Sqlmock.ExpectCommit. +type ExpectedCommit struct { + commonExpectation +} + +// WillReturnError allows to set an error for *sql.Tx.Close action +func (e *ExpectedCommit) WillReturnError(err error) *ExpectedCommit { + e.err = err + return e +} + +// String returns string representation +func (e *ExpectedCommit) String() string { + msg := "ExpectedCommit => expecting transaction Commit" + if e.err != nil { + msg += fmt.Sprintf(", which should return error: %s", e.err) + } + return msg +} + +// ExpectedRollback is used to manage *sql.Tx.Rollback expectation +// returned by *Sqlmock.ExpectRollback. +type ExpectedRollback struct { + commonExpectation +} + +// WillReturnError allows to set an error for *sql.Tx.Rollback action +func (e *ExpectedRollback) WillReturnError(err error) *ExpectedRollback { + e.err = err + return e +} + +// String returns string representation +func (e *ExpectedRollback) String() string { + msg := "ExpectedRollback => expecting transaction Rollback" + if e.err != nil { + msg += fmt.Sprintf(", which should return error: %s", e.err) + } + return msg +} + +// ExpectedQuery is used to manage *sql.DB.Query, *dql.DB.QueryRow, *sql.Tx.Query, +// *sql.Tx.QueryRow, *sql.Stmt.Query or *sql.Stmt.QueryRow expectations. +// Returned by *Sqlmock.ExpectQuery. +type ExpectedQuery struct { + queryBasedExpectation + rows driver.Rows + delay time.Duration + rowsMustBeClosed bool + rowsWereClosed bool +} + +// WithArgs will match given expected args to actual database query arguments. +// if at least one argument does not match, it will return an error. For specific +// arguments an sqlmock.Argument interface can be used to match an argument. +func (e *ExpectedQuery) WithArgs(args ...driver.Value) *ExpectedQuery { + e.args = args + return e +} + +// RowsWillBeClosed expects this query rows to be closed. +func (e *ExpectedQuery) RowsWillBeClosed() *ExpectedQuery { + e.rowsMustBeClosed = true + return e +} + +// WillReturnError allows to set an error for expected database query +func (e *ExpectedQuery) WillReturnError(err error) *ExpectedQuery { + e.err = err + return e +} + +// WillDelayFor allows to specify duration for which it will delay +// result. May be used together with Context +func (e *ExpectedQuery) WillDelayFor(duration time.Duration) *ExpectedQuery { + e.delay = duration + return e +} + +// String returns string representation +func (e *ExpectedQuery) String() string { + msg := "ExpectedQuery => expecting Query, QueryContext or QueryRow which:" + msg += "\n - matches sql: '" + e.expectSQL + "'" + + if len(e.args) == 0 { + msg += "\n - is without arguments" + } else { + msg += "\n - is with arguments:\n" + for i, arg := range e.args { + msg += fmt.Sprintf(" %d - %+v\n", i, arg) + } + msg = strings.TrimSpace(msg) + } + + if e.rows != nil { + msg += fmt.Sprintf("\n - %s", e.rows) + } + + if e.err != nil { + msg += fmt.Sprintf("\n - should return error: %s", e.err) + } + + return msg +} + +// ExpectedExec is used to manage *sql.DB.Exec, *sql.Tx.Exec or *sql.Stmt.Exec expectations. +// Returned by *Sqlmock.ExpectExec. +type ExpectedExec struct { + queryBasedExpectation + result driver.Result + delay time.Duration +} + +// WithArgs will match given expected args to actual database exec operation arguments. +// if at least one argument does not match, it will return an error. For specific +// arguments an sqlmock.Argument interface can be used to match an argument. +func (e *ExpectedExec) WithArgs(args ...driver.Value) *ExpectedExec { + e.args = args + return e +} + +// WillReturnError allows to set an error for expected database exec action +func (e *ExpectedExec) WillReturnError(err error) *ExpectedExec { + e.err = err + return e +} + +// WillDelayFor allows to specify duration for which it will delay +// result. May be used together with Context +func (e *ExpectedExec) WillDelayFor(duration time.Duration) *ExpectedExec { + e.delay = duration + return e +} + +// String returns string representation +func (e *ExpectedExec) String() string { + msg := "ExpectedExec => expecting Exec or ExecContext which:" + msg += "\n - matches sql: '" + e.expectSQL + "'" + + if len(e.args) == 0 { + msg += "\n - is without arguments" + } else { + msg += "\n - is with arguments:\n" + var margs []string + for i, arg := range e.args { + margs = append(margs, fmt.Sprintf(" %d - %+v", i, arg)) + } + msg += strings.Join(margs, "\n") + } + + if e.result != nil { + res, _ := e.result.(*result) + msg += "\n - should return Result having:" + msg += fmt.Sprintf("\n LastInsertId: %d", res.insertID) + msg += fmt.Sprintf("\n RowsAffected: %d", res.rowsAffected) + if res.err != nil { + msg += fmt.Sprintf("\n Error: %s", res.err) + } + } + + if e.err != nil { + msg += fmt.Sprintf("\n - should return error: %s", e.err) + } + + return msg +} + +// WillReturnResult arranges for an expected Exec() to return a particular +// result, there is sqlmock.NewResult(lastInsertID int64, affectedRows int64) method +// to build a corresponding result. Or if actions needs to be tested against errors +// sqlmock.NewErrorResult(err error) to return a given error. +func (e *ExpectedExec) WillReturnResult(result driver.Result) *ExpectedExec { + e.result = result + return e +} + +// ExpectedPrepare is used to manage *sql.DB.Prepare or *sql.Tx.Prepare expectations. +// Returned by *Sqlmock.ExpectPrepare. +type ExpectedPrepare struct { + commonExpectation + mock *sqlmock + expectSQL string + statement driver.Stmt + closeErr error + mustBeClosed bool + wasClosed bool + delay time.Duration +} + +// WillReturnError allows to set an error for the expected *sql.DB.Prepare or *sql.Tx.Prepare action. +func (e *ExpectedPrepare) WillReturnError(err error) *ExpectedPrepare { + e.err = err + return e +} + +// WillReturnCloseError allows to set an error for this prepared statement Close action +func (e *ExpectedPrepare) WillReturnCloseError(err error) *ExpectedPrepare { + e.closeErr = err + return e +} + +// WillDelayFor allows to specify duration for which it will delay +// result. May be used together with Context +func (e *ExpectedPrepare) WillDelayFor(duration time.Duration) *ExpectedPrepare { + e.delay = duration + return e +} + +// WillBeClosed expects this prepared statement to +// be closed. +func (e *ExpectedPrepare) WillBeClosed() *ExpectedPrepare { + e.mustBeClosed = true + return e +} + +// ExpectQuery allows to expect Query() or QueryRow() on this prepared statement. +// This method is convenient in order to prevent duplicating sql query string matching. +func (e *ExpectedPrepare) ExpectQuery() *ExpectedQuery { + eq := &ExpectedQuery{} + eq.expectSQL = e.expectSQL + eq.converter = e.mock.converter + e.mock.expected = append(e.mock.expected, eq) + return eq +} + +// ExpectExec allows to expect Exec() on this prepared statement. +// This method is convenient in order to prevent duplicating sql query string matching. +func (e *ExpectedPrepare) ExpectExec() *ExpectedExec { + eq := &ExpectedExec{} + eq.expectSQL = e.expectSQL + eq.converter = e.mock.converter + e.mock.expected = append(e.mock.expected, eq) + return eq +} + +// String returns string representation +func (e *ExpectedPrepare) String() string { + msg := "ExpectedPrepare => expecting Prepare statement which:" + msg += "\n - matches sql: '" + e.expectSQL + "'" + + if e.err != nil { + msg += fmt.Sprintf("\n - should return error: %s", e.err) + } + + if e.closeErr != nil { + msg += fmt.Sprintf("\n - should return error on Close: %s", e.closeErr) + } + + return msg +} + +// query based expectation +// adds a query matching logic +type queryBasedExpectation struct { + commonExpectation + expectSQL string + converter driver.ValueConverter + args []driver.Value +} + +func (e *queryBasedExpectation) attemptArgMatch(args []namedValue) (err error) { + // catch panic + defer func() { + if e := recover(); e != nil { + _, ok := e.(error) + if !ok { + err = fmt.Errorf(e.(string)) + } + } + }() + + err = e.argsMatches(args) + return +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/expectations_before_go18.go b/vendor/github.com/DATA-DOG/go-sqlmock/expectations_before_go18.go new file mode 100644 index 00000000..e368e040 --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/expectations_before_go18.go @@ -0,0 +1,52 @@ +// +build !go1.8 + +package sqlmock + +import ( + "database/sql/driver" + "fmt" + "reflect" +) + +// WillReturnRows specifies the set of resulting rows that will be returned +// by the triggered query +func (e *ExpectedQuery) WillReturnRows(rows *Rows) *ExpectedQuery { + e.rows = &rowSets{sets: []*Rows{rows}, ex: e} + return e +} + +func (e *queryBasedExpectation) argsMatches(args []namedValue) error { + if nil == e.args { + return nil + } + if len(args) != len(e.args) { + return fmt.Errorf("expected %d, but got %d arguments", len(e.args), len(args)) + } + for k, v := range args { + // custom argument matcher + matcher, ok := e.args[k].(Argument) + if ok { + // @TODO: does it make sense to pass value instead of named value? + if !matcher.Match(v.Value) { + return fmt.Errorf("matcher %T could not match %d argument %T - %+v", matcher, k, args[k], args[k]) + } + continue + } + + dval := e.args[k] + // convert to driver converter + darg, err := e.converter.ConvertValue(dval) + if err != nil { + return fmt.Errorf("could not convert %d argument %T - %+v to driver value: %s", k, e.args[k], e.args[k], err) + } + + if !driver.IsValue(darg) { + return fmt.Errorf("argument %d: non-subset type %T returned from Value", k, darg) + } + + if !reflect.DeepEqual(darg, v.Value) { + return fmt.Errorf("argument %d expected [%T - %+v] does not match actual [%T - %+v]", k, darg, darg, v.Value, v.Value) + } + } + return nil +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/expectations_go18.go b/vendor/github.com/DATA-DOG/go-sqlmock/expectations_go18.go new file mode 100644 index 00000000..2d5ccba0 --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/expectations_go18.go @@ -0,0 +1,66 @@ +// +build go1.8 + +package sqlmock + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" +) + +// WillReturnRows specifies the set of resulting rows that will be returned +// by the triggered query +func (e *ExpectedQuery) WillReturnRows(rows ...*Rows) *ExpectedQuery { + sets := make([]*Rows, len(rows)) + for i, r := range rows { + sets[i] = r + } + e.rows = &rowSets{sets: sets, ex: e} + return e +} + +func (e *queryBasedExpectation) argsMatches(args []namedValue) error { + if nil == e.args { + return nil + } + if len(args) != len(e.args) { + return fmt.Errorf("expected %d, but got %d arguments", len(e.args), len(args)) + } + // @TODO should we assert either all args are named or ordinal? + for k, v := range args { + // custom argument matcher + matcher, ok := e.args[k].(Argument) + if ok { + if !matcher.Match(v.Value) { + return fmt.Errorf("matcher %T could not match %d argument %T - %+v", matcher, k, args[k], args[k]) + } + continue + } + + dval := e.args[k] + if named, isNamed := dval.(sql.NamedArg); isNamed { + dval = named.Value + if v.Name != named.Name { + return fmt.Errorf("named argument %d: name: \"%s\" does not match expected: \"%s\"", k, v.Name, named.Name) + } + } else if k+1 != v.Ordinal { + return fmt.Errorf("argument %d: ordinal position: %d does not match expected: %d", k, k+1, v.Ordinal) + } + + // convert to driver converter + darg, err := e.converter.ConvertValue(dval) + if err != nil { + return fmt.Errorf("could not convert %d argument %T - %+v to driver value: %s", k, e.args[k], e.args[k], err) + } + + if !driver.IsValue(darg) { + return fmt.Errorf("argument %d: non-subset type %T returned from Value", k, darg) + } + + if !reflect.DeepEqual(darg, v.Value) { + return fmt.Errorf("argument %d expected [%T - %+v] does not match actual [%T - %+v]", k, darg, darg, v.Value, v.Value) + } + } + return nil +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/go.mod b/vendor/github.com/DATA-DOG/go-sqlmock/go.mod new file mode 100644 index 00000000..eaf8a5ae --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/go.mod @@ -0,0 +1 @@ +module github.com/DATA-DOG/go-sqlmock diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/options.go b/vendor/github.com/DATA-DOG/go-sqlmock/options.go new file mode 100644 index 00000000..29053eee --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/options.go @@ -0,0 +1,22 @@ +package sqlmock + +import "database/sql/driver" + +// ValueConverterOption allows to create a sqlmock connection +// with a custom ValueConverter to support drivers with special data types. +func ValueConverterOption(converter driver.ValueConverter) func(*sqlmock) error { + return func(s *sqlmock) error { + s.converter = converter + return nil + } +} + +// QueryMatcherOption allows to customize SQL query matcher +// and match SQL query strings in more sophisticated ways. +// The default QueryMatcher is QueryMatcherRegexp. +func QueryMatcherOption(queryMatcher QueryMatcher) func(*sqlmock) error { + return func(s *sqlmock) error { + s.queryMatcher = queryMatcher + return nil + } +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/query.go b/vendor/github.com/DATA-DOG/go-sqlmock/query.go new file mode 100644 index 00000000..47d3796c --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/query.go @@ -0,0 +1,68 @@ +package sqlmock + +import ( + "fmt" + "regexp" + "strings" +) + +var re = regexp.MustCompile("\\s+") + +// strip out new lines and trim spaces +func stripQuery(q string) (s string) { + return strings.TrimSpace(re.ReplaceAllString(q, " ")) +} + +// QueryMatcher is an SQL query string matcher interface, +// which can be used to customize validation of SQL query strings. +// As an example, external library could be used to build +// and validate SQL ast, columns selected. +// +// sqlmock can be customized to implement a different QueryMatcher +// configured through an option when sqlmock.New or sqlmock.NewWithDSN +// is called, default QueryMatcher is QueryMatcherRegexp. +type QueryMatcher interface { + + // Match expected SQL query string without whitespace to + // actual SQL. + Match(expectedSQL, actualSQL string) error +} + +// QueryMatcherFunc type is an adapter to allow the use of +// ordinary functions as QueryMatcher. If f is a function +// with the appropriate signature, QueryMatcherFunc(f) is a +// QueryMatcher that calls f. +type QueryMatcherFunc func(expectedSQL, actualSQL string) error + +// Match implements the QueryMatcher +func (f QueryMatcherFunc) Match(expectedSQL, actualSQL string) error { + return f(expectedSQL, actualSQL) +} + +// QueryMatcherRegexp is the default SQL query matcher +// used by sqlmock. It parses expectedSQL to a regular +// expression and attempts to match actualSQL. +var QueryMatcherRegexp QueryMatcher = QueryMatcherFunc(func(expectedSQL, actualSQL string) error { + expect := stripQuery(expectedSQL) + actual := stripQuery(actualSQL) + re, err := regexp.Compile(expect) + if err != nil { + return err + } + if !re.MatchString(actual) { + return fmt.Errorf(`could not match actual sql: "%s" with expected regexp "%s"`, actual, re.String()) + } + return nil +}) + +// QueryMatcherEqual is the SQL query matcher +// which simply tries a case sensitive match of +// expected and actual SQL strings without whitespace. +var QueryMatcherEqual QueryMatcher = QueryMatcherFunc(func(expectedSQL, actualSQL string) error { + expect := stripQuery(expectedSQL) + actual := stripQuery(actualSQL) + if actual != expect { + return fmt.Errorf(`actual sql: "%s" does not equal to expected "%s"`, actual, expect) + } + return nil +}) diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/result.go b/vendor/github.com/DATA-DOG/go-sqlmock/result.go new file mode 100644 index 00000000..a63e72ba --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/result.go @@ -0,0 +1,39 @@ +package sqlmock + +import ( + "database/sql/driver" +) + +// Result satisfies sql driver Result, which +// holds last insert id and rows affected +// by Exec queries +type result struct { + insertID int64 + rowsAffected int64 + err error +} + +// NewResult creates a new sql driver Result +// for Exec based query mocks. +func NewResult(lastInsertID int64, rowsAffected int64) driver.Result { + return &result{ + insertID: lastInsertID, + rowsAffected: rowsAffected, + } +} + +// NewErrorResult creates a new sql driver Result +// which returns an error given for both interface methods +func NewErrorResult(err error) driver.Result { + return &result{ + err: err, + } +} + +func (r *result) LastInsertId() (int64, error) { + return r.insertID, r.err +} + +func (r *result) RowsAffected() (int64, error) { + return r.rowsAffected, r.err +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/rows.go b/vendor/github.com/DATA-DOG/go-sqlmock/rows.go new file mode 100644 index 00000000..4dcd65c2 --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/rows.go @@ -0,0 +1,176 @@ +package sqlmock + +import ( + "database/sql/driver" + "encoding/csv" + "fmt" + "io" + "strings" +) + +// CSVColumnParser is a function which converts trimmed csv +// column string to a []byte representation. Currently +// transforms NULL to nil +var CSVColumnParser = func(s string) []byte { + switch { + case strings.ToLower(s) == "null": + return nil + } + return []byte(s) +} + +type rowSets struct { + sets []*Rows + pos int + ex *ExpectedQuery +} + +func (rs *rowSets) Columns() []string { + return rs.sets[rs.pos].cols +} + +func (rs *rowSets) Close() error { + rs.ex.rowsWereClosed = true + return rs.sets[rs.pos].closeErr +} + +// advances to next row +func (rs *rowSets) Next(dest []driver.Value) error { + r := rs.sets[rs.pos] + r.pos++ + if r.pos > len(r.rows) { + return io.EOF // per interface spec + } + + for i, col := range r.rows[r.pos-1] { + dest[i] = col + } + + return r.nextErr[r.pos-1] +} + +// transforms to debuggable printable string +func (rs *rowSets) String() string { + if rs.empty() { + return "with empty rows" + } + + msg := "should return rows:\n" + if len(rs.sets) == 1 { + for n, row := range rs.sets[0].rows { + msg += fmt.Sprintf(" row %d - %+v\n", n, row) + } + return strings.TrimSpace(msg) + } + for i, set := range rs.sets { + msg += fmt.Sprintf(" result set: %d\n", i) + for n, row := range set.rows { + msg += fmt.Sprintf(" row %d - %+v\n", n, row) + } + } + return strings.TrimSpace(msg) +} + +func (rs *rowSets) empty() bool { + for _, set := range rs.sets { + if len(set.rows) > 0 { + return false + } + } + return true +} + +// Rows is a mocked collection of rows to +// return for Query result +type Rows struct { + converter driver.ValueConverter + cols []string + rows [][]driver.Value + pos int + nextErr map[int]error + closeErr error +} + +// NewRows allows Rows to be created from a +// sql driver.Value slice or from the CSV string and +// to be used as sql driver.Rows. +// Use Sqlmock.NewRows instead if using a custom converter +func NewRows(columns []string) *Rows { + return &Rows{ + cols: columns, + nextErr: make(map[int]error), + converter: driver.DefaultParameterConverter, + } +} + +// CloseError allows to set an error +// which will be returned by rows.Close +// function. +// +// The close error will be triggered only in cases +// when rows.Next() EOF was not yet reached, that is +// a default sql library behavior +func (r *Rows) CloseError(err error) *Rows { + r.closeErr = err + return r +} + +// RowError allows to set an error +// which will be returned when a given +// row number is read +func (r *Rows) RowError(row int, err error) *Rows { + r.nextErr[row] = err + return r +} + +// AddRow composed from database driver.Value slice +// return the same instance to perform subsequent actions. +// Note that the number of values must match the number +// of columns +func (r *Rows) AddRow(values ...driver.Value) *Rows { + if len(values) != len(r.cols) { + panic("Expected number of values to match number of columns") + } + + row := make([]driver.Value, len(r.cols)) + for i, v := range values { + // Convert user-friendly values (such as int or driver.Valuer) + // to database/sql native value (driver.Value such as int64) + var err error + v, err = r.converter.ConvertValue(v) + if err != nil { + panic(fmt.Errorf( + "row #%d, column #%d (%q) type %T: %s", + len(r.rows)+1, i, r.cols[i], values[i], err, + )) + } + + row[i] = v + } + + r.rows = append(r.rows, row) + return r +} + +// FromCSVString build rows from csv string. +// return the same instance to perform subsequent actions. +// Note that the number of values must match the number +// of columns +func (r *Rows) FromCSVString(s string) *Rows { + res := strings.NewReader(strings.TrimSpace(s)) + csvReader := csv.NewReader(res) + + for { + res, err := csvReader.Read() + if err != nil || res == nil { + break + } + + row := make([]driver.Value, len(r.cols)) + for i, v := range res { + row[i] = CSVColumnParser(strings.TrimSpace(v)) + } + r.rows = append(r.rows, row) + } + return r +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/rows_go18.go b/vendor/github.com/DATA-DOG/go-sqlmock/rows_go18.go new file mode 100644 index 00000000..4ecf84e7 --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/rows_go18.go @@ -0,0 +1,20 @@ +// +build go1.8 + +package sqlmock + +import "io" + +// Implement the "RowsNextResultSet" interface +func (rs *rowSets) HasNextResultSet() bool { + return rs.pos+1 < len(rs.sets) +} + +// Implement the "RowsNextResultSet" interface +func (rs *rowSets) NextResultSet() error { + if !rs.HasNextResultSet() { + return io.EOF + } + + rs.pos++ + return nil +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/sqlmock.go b/vendor/github.com/DATA-DOG/go-sqlmock/sqlmock.go new file mode 100644 index 00000000..4896307d --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/sqlmock.go @@ -0,0 +1,589 @@ +/* +Package sqlmock is a mock library implementing sql driver. Which has one and only +purpose - to simulate any sql driver behavior in tests, without needing a real +database connection. It helps to maintain correct **TDD** workflow. + +It does not require any modifications to your source code in order to test +and mock database operations. Supports concurrency and multiple database mocking. + +The driver allows to mock any sql driver method behavior. +*/ +package sqlmock + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "time" +) + +// Sqlmock interface serves to create expectations +// for any kind of database action in order to mock +// and test real database behavior. +type Sqlmock interface { + + // ExpectClose queues an expectation for this database + // action to be triggered. the *ExpectedClose allows + // to mock database response + ExpectClose() *ExpectedClose + + // ExpectationsWereMet checks whether all queued expectations + // were met in order. If any of them was not met - an error is returned. + ExpectationsWereMet() error + + // ExpectPrepare expects Prepare() to be called with expectedSQL query. + // the *ExpectedPrepare allows to mock database response. + // Note that you may expect Query() or Exec() on the *ExpectedPrepare + // statement to prevent repeating expectedSQL + ExpectPrepare(expectedSQL string) *ExpectedPrepare + + // ExpectQuery expects Query() or QueryRow() to be called with expectedSQL query. + // the *ExpectedQuery allows to mock database response. + ExpectQuery(expectedSQL string) *ExpectedQuery + + // ExpectExec expects Exec() to be called with expectedSQL query. + // the *ExpectedExec allows to mock database response + ExpectExec(expectedSQL string) *ExpectedExec + + // ExpectBegin expects *sql.DB.Begin to be called. + // the *ExpectedBegin allows to mock database response + ExpectBegin() *ExpectedBegin + + // ExpectCommit expects *sql.Tx.Commit to be called. + // the *ExpectedCommit allows to mock database response + ExpectCommit() *ExpectedCommit + + // ExpectRollback expects *sql.Tx.Rollback to be called. + // the *ExpectedRollback allows to mock database response + ExpectRollback() *ExpectedRollback + + // MatchExpectationsInOrder gives an option whether to match all + // expectations in the order they were set or not. + // + // By default it is set to - true. But if you use goroutines + // to parallelize your query executation, that option may + // be handy. + // + // This option may be turned on anytime during tests. As soon + // as it is switched to false, expectations will be matched + // in any order. Or otherwise if switched to true, any unmatched + // expectations will be expected in order + MatchExpectationsInOrder(bool) + + // NewRows allows Rows to be created from a + // sql driver.Value slice or from the CSV string and + // to be used as sql driver.Rows. + NewRows(columns []string) *Rows +} + +type sqlmock struct { + ordered bool + dsn string + opened int + drv *mockDriver + converter driver.ValueConverter + queryMatcher QueryMatcher + + expected []expectation +} + +func (c *sqlmock) open(options []func(*sqlmock) error) (*sql.DB, Sqlmock, error) { + db, err := sql.Open("sqlmock", c.dsn) + if err != nil { + return db, c, err + } + for _, option := range options { + err := option(c) + if err != nil { + return db, c, err + } + } + if c.converter == nil { + c.converter = driver.DefaultParameterConverter + } + if c.queryMatcher == nil { + c.queryMatcher = QueryMatcherRegexp + } + return db, c, db.Ping() +} + +func (c *sqlmock) ExpectClose() *ExpectedClose { + e := &ExpectedClose{} + c.expected = append(c.expected, e) + return e +} + +func (c *sqlmock) MatchExpectationsInOrder(b bool) { + c.ordered = b +} + +// Close a mock database driver connection. It may or may not +// be called depending on the circumstances, but if it is called +// there must be an *ExpectedClose expectation satisfied. +// meets http://golang.org/pkg/database/sql/driver/#Conn interface +func (c *sqlmock) Close() error { + c.drv.Lock() + defer c.drv.Unlock() + + c.opened-- + if c.opened == 0 { + delete(c.drv.conns, c.dsn) + } + + var expected *ExpectedClose + var fulfilled int + var ok bool + for _, next := range c.expected { + next.Lock() + if next.fulfilled() { + next.Unlock() + fulfilled++ + continue + } + + if expected, ok = next.(*ExpectedClose); ok { + break + } + + next.Unlock() + if c.ordered { + return fmt.Errorf("call to database Close, was not expected, next expectation is: %s", next) + } + } + + if expected == nil { + msg := "call to database Close was not expected" + if fulfilled == len(c.expected) { + msg = "all expectations were already fulfilled, " + msg + } + return fmt.Errorf(msg) + } + + expected.triggered = true + expected.Unlock() + return expected.err +} + +func (c *sqlmock) ExpectationsWereMet() error { + for _, e := range c.expected { + e.Lock() + fulfilled := e.fulfilled() + e.Unlock() + + if !fulfilled { + return fmt.Errorf("there is a remaining expectation which was not matched: %s", e) + } + + // for expected prepared statement check whether it was closed if expected + if prep, ok := e.(*ExpectedPrepare); ok { + if prep.mustBeClosed && !prep.wasClosed { + return fmt.Errorf("expected prepared statement to be closed, but it was not: %s", prep) + } + } + + // must check whether all expected queried rows are closed + if query, ok := e.(*ExpectedQuery); ok { + if query.rowsMustBeClosed && !query.rowsWereClosed { + return fmt.Errorf("expected query rows to be closed, but it was not: %s", query) + } + } + } + return nil +} + +// Begin meets http://golang.org/pkg/database/sql/driver/#Conn interface +func (c *sqlmock) Begin() (driver.Tx, error) { + ex, err := c.begin() + if ex != nil { + time.Sleep(ex.delay) + } + if err != nil { + return nil, err + } + + return c, nil +} + +func (c *sqlmock) begin() (*ExpectedBegin, error) { + var expected *ExpectedBegin + var ok bool + var fulfilled int + for _, next := range c.expected { + next.Lock() + if next.fulfilled() { + next.Unlock() + fulfilled++ + continue + } + + if expected, ok = next.(*ExpectedBegin); ok { + break + } + + next.Unlock() + if c.ordered { + return nil, fmt.Errorf("call to database transaction Begin, was not expected, next expectation is: %s", next) + } + } + if expected == nil { + msg := "call to database transaction Begin was not expected" + if fulfilled == len(c.expected) { + msg = "all expectations were already fulfilled, " + msg + } + return nil, fmt.Errorf(msg) + } + + expected.triggered = true + expected.Unlock() + + return expected, expected.err +} + +func (c *sqlmock) ExpectBegin() *ExpectedBegin { + e := &ExpectedBegin{} + c.expected = append(c.expected, e) + return e +} + +// Exec meets http://golang.org/pkg/database/sql/driver/#Execer +func (c *sqlmock) Exec(query string, args []driver.Value) (driver.Result, error) { + namedArgs := make([]namedValue, len(args)) + for i, v := range args { + namedArgs[i] = namedValue{ + Ordinal: i + 1, + Value: v, + } + } + + ex, err := c.exec(query, namedArgs) + if ex != nil { + time.Sleep(ex.delay) + } + if err != nil { + return nil, err + } + + return ex.result, nil +} + +func (c *sqlmock) exec(query string, args []namedValue) (*ExpectedExec, error) { + var expected *ExpectedExec + var fulfilled int + var ok bool + for _, next := range c.expected { + next.Lock() + if next.fulfilled() { + next.Unlock() + fulfilled++ + continue + } + + if c.ordered { + if expected, ok = next.(*ExpectedExec); ok { + break + } + next.Unlock() + return nil, fmt.Errorf("call to ExecQuery '%s' with args %+v, was not expected, next expectation is: %s", query, args, next) + } + if exec, ok := next.(*ExpectedExec); ok { + if err := c.queryMatcher.Match(exec.expectSQL, query); err != nil { + next.Unlock() + continue + } + + if err := exec.attemptArgMatch(args); err == nil { + expected = exec + break + } + } + next.Unlock() + } + if expected == nil { + msg := "call to ExecQuery '%s' with args %+v was not expected" + if fulfilled == len(c.expected) { + msg = "all expectations were already fulfilled, " + msg + } + return nil, fmt.Errorf(msg, query, args) + } + defer expected.Unlock() + + if err := c.queryMatcher.Match(expected.expectSQL, query); err != nil { + return nil, fmt.Errorf("ExecQuery: %v", err) + } + + if err := expected.argsMatches(args); err != nil { + return nil, fmt.Errorf("ExecQuery '%s', arguments do not match: %s", query, err) + } + + expected.triggered = true + if expected.err != nil { + return expected, expected.err // mocked to return error + } + + if expected.result == nil { + return nil, fmt.Errorf("ExecQuery '%s' with args %+v, must return a database/sql/driver.Result, but it was not set for expectation %T as %+v", query, args, expected, expected) + } + + return expected, nil +} + +func (c *sqlmock) ExpectExec(expectedSQL string) *ExpectedExec { + e := &ExpectedExec{} + e.expectSQL = expectedSQL + e.converter = c.converter + c.expected = append(c.expected, e) + return e +} + +// Prepare meets http://golang.org/pkg/database/sql/driver/#Conn interface +func (c *sqlmock) Prepare(query string) (driver.Stmt, error) { + ex, err := c.prepare(query) + if ex != nil { + time.Sleep(ex.delay) + } + if err != nil { + return nil, err + } + + return &statement{c, ex, query}, nil +} + +func (c *sqlmock) prepare(query string) (*ExpectedPrepare, error) { + var expected *ExpectedPrepare + var fulfilled int + var ok bool + + for _, next := range c.expected { + next.Lock() + if next.fulfilled() { + next.Unlock() + fulfilled++ + continue + } + + if c.ordered { + if expected, ok = next.(*ExpectedPrepare); ok { + break + } + + next.Unlock() + return nil, fmt.Errorf("call to Prepare statement with query '%s', was not expected, next expectation is: %s", query, next) + } + + if pr, ok := next.(*ExpectedPrepare); ok { + if err := c.queryMatcher.Match(pr.expectSQL, query); err == nil { + expected = pr + break + } + } + next.Unlock() + } + + if expected == nil { + msg := "call to Prepare '%s' query was not expected" + if fulfilled == len(c.expected) { + msg = "all expectations were already fulfilled, " + msg + } + return nil, fmt.Errorf(msg, query) + } + defer expected.Unlock() + if err := c.queryMatcher.Match(expected.expectSQL, query); err != nil { + return nil, fmt.Errorf("Prepare: %v", err) + } + + expected.triggered = true + return expected, expected.err +} + +func (c *sqlmock) ExpectPrepare(expectedSQL string) *ExpectedPrepare { + e := &ExpectedPrepare{expectSQL: expectedSQL, mock: c} + c.expected = append(c.expected, e) + return e +} + +type namedValue struct { + Name string + Ordinal int + Value driver.Value +} + +// Query meets http://golang.org/pkg/database/sql/driver/#Queryer +func (c *sqlmock) Query(query string, args []driver.Value) (driver.Rows, error) { + namedArgs := make([]namedValue, len(args)) + for i, v := range args { + namedArgs[i] = namedValue{ + Ordinal: i + 1, + Value: v, + } + } + + ex, err := c.query(query, namedArgs) + if ex != nil { + time.Sleep(ex.delay) + } + if err != nil { + return nil, err + } + + return ex.rows, nil +} + +func (c *sqlmock) query(query string, args []namedValue) (*ExpectedQuery, error) { + var expected *ExpectedQuery + var fulfilled int + var ok bool + for _, next := range c.expected { + next.Lock() + if next.fulfilled() { + next.Unlock() + fulfilled++ + continue + } + + if c.ordered { + if expected, ok = next.(*ExpectedQuery); ok { + break + } + next.Unlock() + return nil, fmt.Errorf("call to Query '%s' with args %+v, was not expected, next expectation is: %s", query, args, next) + } + if qr, ok := next.(*ExpectedQuery); ok { + if err := c.queryMatcher.Match(qr.expectSQL, query); err != nil { + next.Unlock() + continue + } + if err := qr.attemptArgMatch(args); err == nil { + expected = qr + break + } + } + next.Unlock() + } + + if expected == nil { + msg := "call to Query '%s' with args %+v was not expected" + if fulfilled == len(c.expected) { + msg = "all expectations were already fulfilled, " + msg + } + return nil, fmt.Errorf(msg, query, args) + } + + defer expected.Unlock() + + if err := c.queryMatcher.Match(expected.expectSQL, query); err != nil { + return nil, fmt.Errorf("Query: %v", err) + } + + if err := expected.argsMatches(args); err != nil { + return nil, fmt.Errorf("Query '%s', arguments do not match: %s", query, err) + } + + expected.triggered = true + if expected.err != nil { + return expected, expected.err // mocked to return error + } + + if expected.rows == nil { + return nil, fmt.Errorf("Query '%s' with args %+v, must return a database/sql/driver.Rows, but it was not set for expectation %T as %+v", query, args, expected, expected) + } + return expected, nil +} + +func (c *sqlmock) ExpectQuery(expectedSQL string) *ExpectedQuery { + e := &ExpectedQuery{} + e.expectSQL = expectedSQL + e.converter = c.converter + c.expected = append(c.expected, e) + return e +} + +func (c *sqlmock) ExpectCommit() *ExpectedCommit { + e := &ExpectedCommit{} + c.expected = append(c.expected, e) + return e +} + +func (c *sqlmock) ExpectRollback() *ExpectedRollback { + e := &ExpectedRollback{} + c.expected = append(c.expected, e) + return e +} + +// Commit meets http://golang.org/pkg/database/sql/driver/#Tx +func (c *sqlmock) Commit() error { + var expected *ExpectedCommit + var fulfilled int + var ok bool + for _, next := range c.expected { + next.Lock() + if next.fulfilled() { + next.Unlock() + fulfilled++ + continue + } + + if expected, ok = next.(*ExpectedCommit); ok { + break + } + + next.Unlock() + if c.ordered { + return fmt.Errorf("call to Commit transaction, was not expected, next expectation is: %s", next) + } + } + if expected == nil { + msg := "call to Commit transaction was not expected" + if fulfilled == len(c.expected) { + msg = "all expectations were already fulfilled, " + msg + } + return fmt.Errorf(msg) + } + + expected.triggered = true + expected.Unlock() + return expected.err +} + +// Rollback meets http://golang.org/pkg/database/sql/driver/#Tx +func (c *sqlmock) Rollback() error { + var expected *ExpectedRollback + var fulfilled int + var ok bool + for _, next := range c.expected { + next.Lock() + if next.fulfilled() { + next.Unlock() + fulfilled++ + continue + } + + if expected, ok = next.(*ExpectedRollback); ok { + break + } + + next.Unlock() + if c.ordered { + return fmt.Errorf("call to Rollback transaction, was not expected, next expectation is: %s", next) + } + } + if expected == nil { + msg := "call to Rollback transaction was not expected" + if fulfilled == len(c.expected) { + msg = "all expectations were already fulfilled, " + msg + } + return fmt.Errorf(msg) + } + + expected.triggered = true + expected.Unlock() + return expected.err +} + +// NewRows allows Rows to be created from a +// sql driver.Value slice or from the CSV string and +// to be used as sql driver.Rows. +func (c *sqlmock) NewRows(columns []string) *Rows { + r := NewRows(columns) + r.converter = c.converter + return r +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/sqlmock_go18.go b/vendor/github.com/DATA-DOG/go-sqlmock/sqlmock_go18.go new file mode 100644 index 00000000..0afb2968 --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/sqlmock_go18.go @@ -0,0 +1,121 @@ +// +build go1.8 + +package sqlmock + +import ( + "context" + "database/sql/driver" + "errors" + "time" +) + +// ErrCancelled defines an error value, which can be expected in case of +// such cancellation error. +var ErrCancelled = errors.New("canceling query due to user request") + +// Implement the "QueryerContext" interface +func (c *sqlmock) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + namedArgs := make([]namedValue, len(args)) + for i, nv := range args { + namedArgs[i] = namedValue(nv) + } + + ex, err := c.query(query, namedArgs) + if ex != nil { + select { + case <-time.After(ex.delay): + if err != nil { + return nil, err + } + return ex.rows, nil + case <-ctx.Done(): + return nil, ErrCancelled + } + } + + return nil, err +} + +// Implement the "ExecerContext" interface +func (c *sqlmock) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + namedArgs := make([]namedValue, len(args)) + for i, nv := range args { + namedArgs[i] = namedValue(nv) + } + + ex, err := c.exec(query, namedArgs) + if ex != nil { + select { + case <-time.After(ex.delay): + if err != nil { + return nil, err + } + return ex.result, nil + case <-ctx.Done(): + return nil, ErrCancelled + } + } + + return nil, err +} + +// Implement the "ConnBeginTx" interface +func (c *sqlmock) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + ex, err := c.begin() + if ex != nil { + select { + case <-time.After(ex.delay): + if err != nil { + return nil, err + } + return c, nil + case <-ctx.Done(): + return nil, ErrCancelled + } + } + + return nil, err +} + +// Implement the "ConnPrepareContext" interface +func (c *sqlmock) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + ex, err := c.prepare(query) + if ex != nil { + select { + case <-time.After(ex.delay): + if err != nil { + return nil, err + } + return &statement{c, ex, query}, nil + case <-ctx.Done(): + return nil, ErrCancelled + } + } + + return nil, err +} + +// Implement the "Pinger" interface +// for now we do not have a Ping expectation +// may be something for the future +func (c *sqlmock) Ping(ctx context.Context) error { + return nil +} + +// Implement the "StmtExecContext" interface +func (stmt *statement) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + return stmt.conn.ExecContext(ctx, stmt.query, args) +} + +// Implement the "StmtQueryContext" interface +func (stmt *statement) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + return stmt.conn.QueryContext(ctx, stmt.query, args) +} + +// @TODO maybe add ExpectedBegin.WithOptions(driver.TxOptions) + +// CheckNamedValue meets https://golang.org/pkg/database/sql/driver/#NamedValueChecker +func (c *sqlmock) CheckNamedValue(nv *driver.NamedValue) (err error) { + nv.Value, err = c.converter.ConvertValue(nv.Value) + return err +} diff --git a/vendor/github.com/DATA-DOG/go-sqlmock/statement.go b/vendor/github.com/DATA-DOG/go-sqlmock/statement.go new file mode 100644 index 00000000..570efd99 --- /dev/null +++ b/vendor/github.com/DATA-DOG/go-sqlmock/statement.go @@ -0,0 +1,28 @@ +package sqlmock + +import ( + "database/sql/driver" +) + +type statement struct { + conn *sqlmock + ex *ExpectedPrepare + query string +} + +func (stmt *statement) Close() error { + stmt.ex.wasClosed = true + return stmt.ex.closeErr +} + +func (stmt *statement) NumInput() int { + return -1 +} + +func (stmt *statement) Exec(args []driver.Value) (driver.Result, error) { + return stmt.conn.Exec(stmt.query, args) +} + +func (stmt *statement) Query(args []driver.Value) (driver.Rows, error) { + return stmt.conn.Query(stmt.query, args) +} diff --git a/vendor/github.com/elgs/gosqljson/.gitignore b/vendor/github.com/cloudflare/golz4/.gitignore similarity index 93% rename from vendor/github.com/elgs/gosqljson/.gitignore rename to vendor/github.com/cloudflare/golz4/.gitignore index 5c0b00b8..00268614 100644 --- a/vendor/github.com/elgs/gosqljson/.gitignore +++ b/vendor/github.com/cloudflare/golz4/.gitignore @@ -20,6 +20,3 @@ _cgo_export.* _testmain.go *.exe -*.test - -.DS_Store \ No newline at end of file diff --git a/vendor/github.com/cloudflare/golz4/LICENSE b/vendor/github.com/cloudflare/golz4/LICENSE new file mode 100644 index 00000000..1579e81a --- /dev/null +++ b/vendor/github.com/cloudflare/golz4/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 CloudFlare, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +* Neither the name of the CloudFlare, Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cloudflare/golz4/Makefile b/vendor/github.com/cloudflare/golz4/Makefile new file mode 100644 index 00000000..2296d80e --- /dev/null +++ b/vendor/github.com/cloudflare/golz4/Makefile @@ -0,0 +1,14 @@ +GCFLAGS := +LDFLAGS := + +.PHONY: install +install: + @go install -v . + +.PHONY: test +test: + @go test -gcflags='$(GCFLAGS)' -ldflags='$(LDFLAGS)' . + +.PHONY: bench +bench: + @go test -gcflags='$(GCFLAGS)' -ldflags='$(LDFLAGS)' -bench . diff --git a/vendor/github.com/cloudflare/golz4/README.md b/vendor/github.com/cloudflare/golz4/README.md new file mode 100644 index 00000000..e1bdb26e --- /dev/null +++ b/vendor/github.com/cloudflare/golz4/README.md @@ -0,0 +1,4 @@ +golz4 +===== + +Golang interface to LZ4 compression diff --git a/vendor/github.com/cloudflare/golz4/doc.go b/vendor/github.com/cloudflare/golz4/doc.go new file mode 100644 index 00000000..4876be87 --- /dev/null +++ b/vendor/github.com/cloudflare/golz4/doc.go @@ -0,0 +1,4 @@ +// Package lz4 implements compression using lz4.c and lz4hc.c +// +// Copyright (c) 2013 CloudFlare, Inc. +package lz4 diff --git a/vendor/github.com/cloudflare/golz4/lz4.go b/vendor/github.com/cloudflare/golz4/lz4.go new file mode 100644 index 00000000..f9abcb2d --- /dev/null +++ b/vendor/github.com/cloudflare/golz4/lz4.go @@ -0,0 +1,55 @@ +package lz4 + +// #cgo CFLAGS: -O3 +// #include "src/lz4.h" +// #include "src/lz4.c" +import "C" + +import ( + "errors" + "fmt" + "unsafe" +) + +// p gets a char pointer to the first byte of a []byte slice +func p(in []byte) *C.char { + if len(in) == 0 { + return (*C.char)(unsafe.Pointer(nil)) + } + return (*C.char)(unsafe.Pointer(&in[0])) +} + +// clen gets the length of a []byte slice as a char * +func clen(s []byte) C.int { + return C.int(len(s)) +} + +// Uncompress with a known output size. len(out) should be equal to +// the length of the uncompressed out. +func Uncompress(in, out []byte) (error) { + if int(C.LZ4_decompress_safe(p(in), p(out), clen(in), clen(out))) < 0 { + return errors.New("Malformed compression stream") + } + + return nil +} + +// CompressBound calculates the size of the output buffer needed by +// Compress. This is based on the following macro: +// +// #define LZ4_COMPRESSBOUND(isize) +// ((unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) +func CompressBound(in []byte) int { + return len(in) + ((len(in) / 255) + 16) +} + +// Compress compresses in and puts the content in out. len(out) +// should have enough space for the compressed data (use CompressBound +// to calculate). Returns the number of bytes in the out slice. +func Compress(in, out []byte) (outSize int, err error) { + outSize = int(C.LZ4_compress_limitedOutput(p(in), p(out), clen(in), clen(out))) + if outSize == 0 { + err = fmt.Errorf("insufficient space for compression") + } + return +} diff --git a/vendor/github.com/cloudflare/golz4/lz4_hc.go b/vendor/github.com/cloudflare/golz4/lz4_hc.go new file mode 100644 index 00000000..9779352c --- /dev/null +++ b/vendor/github.com/cloudflare/golz4/lz4_hc.go @@ -0,0 +1,38 @@ +package lz4 + +// #cgo CFLAGS: -O3 +// #include "src/lz4hc.h" +// #include "src/lz4hc.c" +import "C" + +import ( + "fmt" +) + +// CompressHC compresses in and puts the content in out. len(out) +// should have enough space for the compressed data (use CompressBound +// to calculate). Returns the number of bytes in the out slice. Determines +// the compression level automatically. +func CompressHC(in, out []byte) (int, error) { + // 0 automatically sets the compression level. + return CompressHCLevel(in, out, 0) +} + +// CompressHCLevel compresses in at the given compression level and puts the +// content in out. len(out) should have enough space for the compressed data +// (use CompressBound to calculate). Returns the number of bytes in the out +// slice. To automatically choose the compression level, use 0. Otherwise, use +// any value in the inclusive range 1 (worst) through 16 (best). Most +// applications will prefer CompressHC. +func CompressHCLevel(in, out []byte, level int) (outSize int, err error) { + // LZ4HC does not handle empty buffers. Pass through to Compress. + if len(in) == 0 || len(out) == 0 { + return Compress(in, out) + } + + outSize = int(C.LZ4_compressHC2_limitedOutput(p(in), p(out), clen(in), clen(out), C.int(level))) + if outSize == 0 { + err = fmt.Errorf("insufficient space for compression") + } + return +} diff --git a/vendor/github.com/cloudflare/golz4/sample.txt b/vendor/github.com/cloudflare/golz4/sample.txt new file mode 100644 index 00000000..3bb27fb7 --- /dev/null +++ b/vendor/github.com/cloudflare/golz4/sample.txt @@ -0,0 +1,143 @@ +CANTO I + + +IN the midway of this our mortal life, +I found me in a gloomy wood, astray +Gone from the path direct: and e'en to tell +It were no easy task, how savage wild +That forest, how robust and rough its growth, +Which to remember only, my dismay +Renews, in bitterness not far from death. +Yet to discourse of what there good befell, +All else will I relate discover'd there. +How first I enter'd it I scarce can say, +Such sleepy dullness in that instant weigh'd +My senses down, when the true path I left, +But when a mountain's foot I reach'd, where clos'd +The valley, that had pierc'd my heart with dread, +I look'd aloft, and saw his shoulders broad +Already vested with that planet's beam, +Who leads all wanderers safe through every way. + +Then was a little respite to the fear, +That in my heart's recesses deep had lain, +All of that night, so pitifully pass'd: +And as a man, with difficult short breath, +Forespent with toiling, 'scap'd from sea to shore, +Turns to the perilous wide waste, and stands +At gaze; e'en so my spirit, that yet fail'd +Struggling with terror, turn'd to view the straits, +That none hath pass'd and liv'd. My weary frame +After short pause recomforted, again +I journey'd on over that lonely steep, + +The hinder foot still firmer. Scarce the ascent +Began, when, lo! a panther, nimble, light, +And cover'd with a speckled skin, appear'd, +Nor, when it saw me, vanish'd, rather strove +To check my onward going; that ofttimes +With purpose to retrace my steps I turn'd. + +The hour was morning's prime, and on his way +Aloft the sun ascended with those stars, +That with him rose, when Love divine first mov'd +Those its fair works: so that with joyous hope +All things conspir'd to fill me, the gay skin +Of that swift animal, the matin dawn +And the sweet season. Soon that joy was chas'd, +And by new dread succeeded, when in view +A lion came, 'gainst me, as it appear'd, + +With his head held aloft and hunger-mad, +That e'en the air was fear-struck. A she-wolf +Was at his heels, who in her leanness seem'd +Full of all wants, and many a land hath made +Disconsolate ere now. She with such fear +O'erwhelmed me, at the sight of her appall'd, +That of the height all hope I lost. As one, +Who with his gain elated, sees the time +When all unwares is gone, he inwardly +Mourns with heart-griping anguish; such was I, +Haunted by that fell beast, never at peace, +Who coming o'er against me, by degrees +Impell'd me where the sun in silence rests. + +While to the lower space with backward step +I fell, my ken discern'd the form one of one, +Whose voice seem'd faint through long disuse of speech. +When him in that great desert I espied, +"Have mercy on me!" cried I out aloud, +"Spirit! or living man! what e'er thou be!" + +He answer'd: "Now not man, man once I was, +And born of Lombard parents, Mantuana both +By country, when the power of Julius yet +Was scarcely firm. At Rome my life was past +Beneath the mild Augustus, in the time +Of fabled deities and false. A bard +Was I, and made Anchises' upright son +The subject of my song, who came from Troy, +When the flames prey'd on Ilium's haughty towers. +But thou, say wherefore to such perils past +Return'st thou? wherefore not this pleasant mount +Ascendest, cause and source of all delight?" +"And art thou then that Virgil, that well-spring, +From which such copious floods of eloquence +Have issued?" I with front abash'd replied. +"Glory and light of all the tuneful train! +May it avail me that I long with zeal +Have sought thy volume, and with love immense +Have conn'd it o'er. My master thou and guide! +Thou he from whom alone I have deriv'd +That style, which for its beauty into fame +Exalts me. See the beast, from whom I fled. +O save me from her, thou illustrious sage!" + +"For every vein and pulse throughout my frame +She hath made tremble." He, soon as he saw +That I was weeping, answer'd, "Thou must needs +Another way pursue, if thou wouldst 'scape +From out that savage wilderness. This beast, +At whom thou criest, her way will suffer none +To pass, and no less hindrance makes than death: +So bad and so accursed in her kind, +That never sated is her ravenous will, +Still after food more craving than before. +To many an animal in wedlock vile +She fastens, and shall yet to many more, +Until that greyhound come, who shall destroy +Her with sharp pain. He will not life support +By earth nor its base metals, but by love, +Wisdom, and virtue, and his land shall be +The land 'twixt either Feltro. In his might +Shall safety to Italia's plains arise, +For whose fair realm, Camilla, virgin pure, +Nisus, Euryalus, and Turnus fell. +He with incessant chase through every town +Shall worry, until he to hell at length +Restore her, thence by envy first let loose. +I for thy profit pond'ring now devise, +That thou mayst follow me, and I thy guide +Will lead thee hence through an eternal space, +Where thou shalt hear despairing shrieks, and see +Spirits of old tormented, who invoke +A second death; and those next view, who dwell +Content in fire, for that they hope to come, +Whene'er the time may be, among the blest, +Into whose regions if thou then desire +T' ascend, a spirit worthier then I +Must lead thee, in whose charge, when I depart, +Thou shalt be left: for that Almighty King, +Who reigns above, a rebel to his law, +Adjudges me, and therefore hath decreed, +That to his city none through me should come. +He in all parts hath sway; there rules, there holds +His citadel and throne. O happy those, +Whom there he chooses!" I to him in few: +"Bard! by that God, whom thou didst not adore, +I do beseech thee (that this ill and worse +I may escape) to lead me, where thou saidst, +That I Saint Peter's gate may view, and those +Who as thou tell'st, are in such dismal plight." + +Onward he mov'd, I close his steps pursu'd. diff --git a/vendor/github.com/coreos/go-oidc/.gitignore b/vendor/github.com/coreos/go-oidc/.gitignore new file mode 100644 index 00000000..c96f2f47 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/.gitignore @@ -0,0 +1,2 @@ +/bin +/gopath diff --git a/vendor/github.com/coreos/go-oidc/.travis.yml b/vendor/github.com/coreos/go-oidc/.travis.yml new file mode 100644 index 00000000..6ff9dd96 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - "1.9" + - "1.10" + +install: + - go get -v -t github.com/coreos/go-oidc/... + - go get golang.org/x/tools/cmd/cover + - go get github.com/golang/lint/golint + +script: + - ./test + +notifications: + email: false diff --git a/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md b/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md new file mode 100644 index 00000000..6662073a --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md @@ -0,0 +1,71 @@ +# How to Contribute + +CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via +GitHub pull requests. This document outlines some of the conventions on +development workflow, commit message formatting, contact points and other +resources to make it easier to get your contribution accepted. + +# Certificate of Origin + +By contributing to this project you agree to the Developer Certificate of +Origin (DCO). This document was created by the Linux Kernel community and is a +simple statement that you, as a contributor, have the legal right to make the +contribution. See the [DCO](DCO) file for details. + +# Email and Chat + +The project currently uses the general CoreOS email list and IRC channel: +- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev) +- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org + +Please avoid emailing maintainers found in the MAINTAINERS file directly. They +are very busy and read the mailing lists. + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.md) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +