TUN-1099: Bring back changes in 2018.10.1
This commit is contained in:
parent
995e773096
commit
ca9902a8d1
|
@ -56,12 +56,34 @@
|
|||
]
|
||||
revision = "992e7928c7c258628d2b13b769acc86781b9faea"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/go-oidc"
|
||||
packages = [
|
||||
"http",
|
||||
"jose",
|
||||
"key",
|
||||
"oauth2",
|
||||
"oidc"
|
||||
]
|
||||
revision = "a93f71fdfe73d2c0f5413c0565eea0af6523a6df"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/coreos/go-systemd"
|
||||
packages = ["daemon"]
|
||||
revision = "39ca1b05acc7ad1220e09f133283b8859a8b71ab"
|
||||
version = "v17"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/coreos/pkg"
|
||||
packages = [
|
||||
"health",
|
||||
"httputil",
|
||||
"timeutil"
|
||||
]
|
||||
revision = "97fdf19511ea361ae1c100dd393cc47f8dcfa1e1"
|
||||
version = "v4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
|
@ -153,6 +175,12 @@
|
|||
packages = ["go/otgrpc"]
|
||||
revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jonboulle/clockwork"
|
||||
packages = ["."]
|
||||
revision = "2eee05ed794112d45db504eb05aa693efd2b8b09"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/lib/pq"
|
||||
|
@ -282,8 +310,14 @@
|
|||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"internal/subtle",
|
||||
"nacl/box",
|
||||
"nacl/secretbox",
|
||||
"poly1305",
|
||||
"salsa20/salsa",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
|
||||
|
@ -427,6 +461,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "9aac6a0260101988582bfa55d39559e53cf8ccd7e37caad3350aeea768f5bdb2"
|
||||
inputs-digest = "ee681bef3527e49801c841e313f98b40116eafe8b60be21273956eeb96487486"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
[[constraint]]
|
||||
name = "zombiezen.com/go/capnproto2"
|
||||
source = "https://github.com/zombiezen/go-capnproto2"
|
||||
version = "2.17.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
|
@ -64,3 +65,11 @@
|
|||
[[override]]
|
||||
name = "github.com/mholt/caddy"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/go-oidc"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
|
4
Makefile
4
Makefile
|
@ -55,3 +55,7 @@ release: bin/equinox
|
|||
bin/equinox:
|
||||
mkdir -p bin
|
||||
curl -s https://bin.equinox.io/c/75JtLRTsJ3n/release-tool-beta-$(EQUINOX_PLATFORM).tgz | tar xz -C bin/
|
||||
|
||||
.PHONY: tunnel-deps
|
||||
tunnel-deps:
|
||||
capnp compile -ogo -I ./tunnelrpc tunnelrpc/tunnelrpc.capnp
|
||||
|
|
|
@ -0,0 +1,199 @@
|
|||
package access
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/shell"
|
||||
"golang.org/x/net/idna"
|
||||
|
||||
"github.com/cloudflare/cloudflared/log"
|
||||
raven "github.com/getsentry/raven-go"
|
||||
cli "gopkg.in/urfave/cli.v2"
|
||||
)
|
||||
|
||||
const sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b@sentry.io/189878"
|
||||
|
||||
// Flags return the global flags for Access related commands (hopefully none)
|
||||
func Flags() []cli.Flag {
|
||||
return []cli.Flag{} // no flags yet.
|
||||
}
|
||||
|
||||
// Commands returns all the Access related subcommands
|
||||
func Commands() []*cli.Command {
|
||||
return []*cli.Command{
|
||||
{
|
||||
Name: "access",
|
||||
Category: "Access (BETA)",
|
||||
Usage: "access <subcommand>",
|
||||
Description: `(BETA) Cloudflare Access protects internal resources by securing, authenticating and monitoring access
|
||||
per-user and by application. With Cloudflare Access, only authenticated users with the required permissions are
|
||||
able to reach sensitive resources. The commands provided here allow you to interact with Access protected
|
||||
applications from the command line. This feature is considered beta. Your feedback is greatly appreciated!
|
||||
https://cfl.re/CLIAuthBeta`,
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "login",
|
||||
Action: login,
|
||||
Usage: "login <url of access application>",
|
||||
Description: `The login subcommand initiates an authentication flow with your identity provider.
|
||||
The subcommand will launch a browser. For headless systems, a url is provided.
|
||||
Once authenticated with your identity provider, the login command will generate a JSON Web Token (JWT)
|
||||
scoped to your identity, the application you intend to reach, and valid for a session duration set by your
|
||||
administrator. cloudflared stores the token in local storage.`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "url",
|
||||
Hidden: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "curl",
|
||||
Action: curl,
|
||||
Usage: "curl <args>",
|
||||
Description: `The curl subcommand wraps curl and automatically injects the JWT into a cf-access-token
|
||||
header when using curl to reach an application behind Access.`,
|
||||
ArgsUsage: "allow-request will allow the curl request to continue even if the jwt is not present.",
|
||||
SkipFlagParsing: true,
|
||||
},
|
||||
{
|
||||
Name: "token",
|
||||
Action: token,
|
||||
Usage: "token -app=<url of access application>",
|
||||
ArgsUsage: "url of Access application",
|
||||
Description: `The token subcommand produces a JWT which can be used to authenticate requests.`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "app",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// login pops up the browser window to do the actual login and JWT generation
|
||||
func login(c *cli.Context) error {
|
||||
raven.SetDSN(sentryDSN)
|
||||
logger := log.CreateLogger()
|
||||
args := c.Args()
|
||||
appURL, err := url.Parse(args.First())
|
||||
if args.Len() < 1 || err != nil {
|
||||
logger.Errorf("Please provide the url of the Access application\n")
|
||||
return err
|
||||
}
|
||||
if _, err := fetchToken(c, appURL); err != nil {
|
||||
logger.Errorf("Failed to fetch token: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// curl provides a wrapper around curl, passing Access JWT along in request
|
||||
func curl(c *cli.Context) error {
|
||||
raven.SetDSN(sentryDSN)
|
||||
logger := log.CreateLogger()
|
||||
args := c.Args()
|
||||
if args.Len() < 1 {
|
||||
logger.Error("Please provide the access app and command you wish to run.")
|
||||
return errors.New("incorrect args")
|
||||
}
|
||||
|
||||
cmdArgs, appURL, allowRequest, err := buildCurlCmdArgs(args.Slice())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
token, err := getTokenIfExists(appURL)
|
||||
if err != nil || token == "" {
|
||||
if allowRequest {
|
||||
logger.Warn("You don't have an Access token set. Please run access token <access application> to fetch one.")
|
||||
return shell.Run("curl", cmdArgs...)
|
||||
}
|
||||
token, err = fetchToken(c, appURL)
|
||||
if err != nil {
|
||||
logger.Error("Failed to refresh token: ", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cmdArgs = append(cmdArgs, "-H")
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("cf-access-token: %s", token))
|
||||
return shell.Run("curl", cmdArgs...)
|
||||
}
|
||||
|
||||
// token dumps provided token to stdout
|
||||
func token(c *cli.Context) error {
|
||||
raven.SetDSN(sentryDSN)
|
||||
appURL, err := url.Parse(c.String("app"))
|
||||
if err != nil || c.NumFlags() < 1 {
|
||||
fmt.Fprintln(os.Stderr, "Please provide a url.")
|
||||
return err
|
||||
}
|
||||
token, err := getTokenIfExists(appURL)
|
||||
if err != nil || token == "" {
|
||||
fmt.Fprintln(os.Stderr, "Unable to find token for provided application. Please run token command to generate token.")
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprint(os.Stdout, token); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Failed to write token to stdout.")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processURL will preprocess the string (parse to a url, convert to punycode, etc).
|
||||
func processURL(s string) (*url.URL, error) {
|
||||
u, err := url.ParseRequestURI(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host, err := idna.ToASCII(u.Hostname())
|
||||
if err != nil { // we fail to convert to punycode, just return the url we parsed.
|
||||
return u, nil
|
||||
}
|
||||
if u.Port() != "" {
|
||||
u.Host = fmt.Sprintf("%s:%s", host, u.Port())
|
||||
} else {
|
||||
u.Host = host
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// buildCurlCmdArgs will build the curl cmd args
|
||||
func buildCurlCmdArgs(cmdArgs []string) ([]string, *url.URL, bool, error) {
|
||||
allowRequest, iAllowRequest := false, 0
|
||||
var appURL *url.URL
|
||||
for i, arg := range cmdArgs {
|
||||
if arg == "-allow-request" || arg == "-ar" {
|
||||
iAllowRequest = i
|
||||
allowRequest = true
|
||||
}
|
||||
|
||||
u, err := processURL(arg)
|
||||
if err == nil {
|
||||
appURL = u
|
||||
cmdArgs[i] = appURL.String()
|
||||
}
|
||||
}
|
||||
|
||||
if appURL == nil {
|
||||
logger.Error("Please provide a valid URL.")
|
||||
return cmdArgs, appURL, allowRequest, errors.New("invalid url")
|
||||
}
|
||||
|
||||
if allowRequest {
|
||||
// remove from cmdArgs
|
||||
cmdArgs[iAllowRequest] = cmdArgs[len(cmdArgs)-1]
|
||||
cmdArgs = cmdArgs[:len(cmdArgs)-1]
|
||||
}
|
||||
|
||||
return cmdArgs, appURL, allowRequest, nil
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package access
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/transfer"
|
||||
"github.com/cloudflare/cloudflared/log"
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/go-oidc/oidc"
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
cli "gopkg.in/urfave/cli.v2"
|
||||
)
|
||||
|
||||
var logger = log.CreateLogger()
|
||||
|
||||
// fetchToken will either load a stored token or generate a new one
|
||||
func fetchToken(c *cli.Context, appURL *url.URL) (string, error) {
|
||||
if token, err := getTokenIfExists(appURL); token != "" && err == nil {
|
||||
fmt.Fprintf(os.Stdout, "You have an existing token:\n\n%s\n\n", token)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
path, err := generateFilePathForTokenURL(appURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// this weird parameter is the resource name (token) and the key/value
|
||||
// we want to send to the transfer service. the key is token and the value
|
||||
// is blank (basically just the id generated in the transfer service)
|
||||
const resourceName, key, value = "token", "token", ""
|
||||
token, err := transfer.Run(c, appURL, resourceName, key, value, path, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stdout, "Successfully fetched your token:\n\n%s\n\n", string(token))
|
||||
return string(token), nil
|
||||
}
|
||||
|
||||
// getTokenIfExists will return the token from local storage if it exists
|
||||
func getTokenIfExists(url *url.URL) (string, error) {
|
||||
path, err := generateFilePathForTokenURL(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
content, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
token, err := jose.ParseJWT(string(content))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
claims, err := token.Claims()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ident, err := oidc.IdentityFromClaims(claims)
|
||||
if err == nil && ident.ExpiresAt.After(time.Now()) {
|
||||
return token.Encode(), nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// generateFilePathForTokenURL will return a filepath for given access application url
|
||||
func generateFilePathForTokenURL(url *url.URL) (string, error) {
|
||||
configPath, err := homedir.Expand(config.DefaultConfigDirs[0])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ok, err := config.FileExists(configPath)
|
||||
if !ok && err == nil {
|
||||
// create config directory if doesn't already exist
|
||||
err = os.Mkdir(configPath, 0700)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
name := strings.Replace(fmt.Sprintf("%s%s-token", url.Hostname(), url.EscapedPath()), "/", "-", -1)
|
||||
return filepath.Join(configPath, name), nil
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
"gopkg.in/urfave/cli.v2/altsrc"
|
||||
)
|
||||
|
||||
var (
|
||||
// File names from which we attempt to read configuration.
|
||||
DefaultConfigFiles = []string{"config.yml", "config.yaml"}
|
||||
|
||||
// Launchd doesn't set root env variables, so there is default
|
||||
// Windows default config dir was ~/cloudflare-warp in documentation; let's keep it compatible
|
||||
DefaultConfigDirs = []string{"~/.cloudflared", "~/.cloudflare-warp", "~/cloudflare-warp", "/usr/local/etc/cloudflared", "/etc/cloudflared"}
|
||||
)
|
||||
|
||||
const DefaultCredentialFile = "cert.pem"
|
||||
|
||||
// FileExists checks to see if a file exist at the provided path.
|
||||
func FileExists(path string) (bool, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// ignore missing files
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
f.Close()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// FindInputSourceContext pulls the input source from the config flag.
|
||||
func FindInputSourceContext(context *cli.Context) (altsrc.InputSourceContext, error) {
|
||||
if context.String("config") != "" {
|
||||
return altsrc.NewYamlSourceFromFile(context.String("config"))
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FindDefaultConfigPath returns the first path that contains a config file.
|
||||
// If none of the combination of DefaultConfigDirs and DefaultConfigFiles
|
||||
// contains a config file, return empty string.
|
||||
func FindDefaultConfigPath() string {
|
||||
for _, configDir := range DefaultConfigDirs {
|
||||
for _, configFile := range DefaultConfigFiles {
|
||||
dirPath, err := homedir.Expand(configDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(dirPath, configFile)
|
||||
if ok, _ := FileExists(path); ok {
|
||||
return path
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
|
@ -0,0 +1,176 @@
|
|||
// Package encrypter is suitable for encrypting messages you would like to securely share between two points.
|
||||
// Useful for providing end to end encryption (E2EE). It uses Box (NaCl) for encrypting the messages.
|
||||
// tldr is it uses Elliptic Curves (Curve25519) for the keys, XSalsa20 and Poly1305 for encryption.
|
||||
// You can read more here https://godoc.org/golang.org/x/crypto/nacl/box.
|
||||
//
|
||||
// msg := []byte("super safe message.")
|
||||
// alice, err := New("alice_priv_key.pem", "alice_pub_key.pem")
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// bob, err := New("bob_priv_key.pem", "bob_pub_key.pem")
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
// encrypted, err := alice.Encrypt(msg, bob.PublicKey())
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// data, err := bob.Decrypt(encrypted, alice.PublicKey())
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
// fmt.Println(string(data))
|
||||
package encrypter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/crypto/nacl/box"
|
||||
)
|
||||
|
||||
// Encrypter represents a keypair value with auxiliary functions to make
|
||||
// doing encryption and decryption easier
|
||||
type Encrypter struct {
|
||||
privateKey *[32]byte
|
||||
publicKey *[32]byte
|
||||
}
|
||||
|
||||
// New returns a new encrypter with initialized keypair
|
||||
func New(privateKey, publicKey string) (*Encrypter, error) {
|
||||
e := &Encrypter{}
|
||||
pubKey, key, err := e.fetchOrGenerateKeys(privateKey, publicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.privateKey, e.publicKey = key, pubKey
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// PublicKey returns a base64 encoded public key. Useful for transport (like in HTTP requests)
|
||||
func (e *Encrypter) PublicKey() string {
|
||||
return base64.URLEncoding.EncodeToString(e.publicKey[:])
|
||||
}
|
||||
|
||||
// Decrypt data that was encrypted using our publicKey. It will use our privateKey and the sender's publicKey to decrypt
|
||||
// data is an encrypted buffer of data, mostly like from the Encrypt function. Messages contain the nonce data on the front
|
||||
// of the message.
|
||||
// senderPublicKey is a base64 encoded version of the sender's public key (most likely from the PublicKey function).
|
||||
// The return value is the decrypted buffer or an error.
|
||||
func (e *Encrypter) Decrypt(data []byte, senderPublicKey string) ([]byte, error) {
|
||||
var decryptNonce [24]byte
|
||||
copy(decryptNonce[:], data[:24]) // we pull the nonce from the front of the actual message.
|
||||
pubKey, err := e.decodePublicKey(senderPublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decrypted, ok := box.Open(nil, data[24:], &decryptNonce, pubKey, e.privateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("failed to decrypt message")
|
||||
}
|
||||
return decrypted, nil
|
||||
}
|
||||
|
||||
// Encrypt data using our privateKey and the recipient publicKey
|
||||
// data is a buffer of data that we would like to encrypt. Messages will have the nonce added to front
|
||||
// as they have to unique for each message shared.
|
||||
// recipientPublicKey is a base64 encoded version of the sender's public key (most likely from the PublicKey function).
|
||||
// The return value is the encrypted buffer or an error.
|
||||
func (e *Encrypter) Encrypt(data []byte, recipientPublicKey string) ([]byte, error) {
|
||||
var nonce [24]byte
|
||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := e.decodePublicKey(recipientPublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This encrypts msg and adds the nonce to the front of the message, since the nonce has to be
|
||||
// the same for encrypting and decrypting
|
||||
return box.Seal(nonce[:], data, &nonce, pubKey, e.privateKey), nil
|
||||
}
|
||||
|
||||
// WriteKeys keys will take the currently initialized keypair and write them to provided filenames
|
||||
func (e *Encrypter) WriteKeys(privateKey, publicKey string) error {
|
||||
if err := e.writeKey(e.privateKey[:], "BOX PRIVATE KEY", privateKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.writeKey(e.publicKey[:], "PUBLIC KEY", publicKey)
|
||||
}
|
||||
|
||||
// fetchOrGenerateKeys will either load or create a keypair if it doesn't exist
|
||||
func (e *Encrypter) fetchOrGenerateKeys(privateKey, publicKey string) (*[32]byte, *[32]byte, error) {
|
||||
key, err := e.fetchKey(privateKey)
|
||||
if os.IsNotExist(err) {
|
||||
return box.GenerateKey(rand.Reader)
|
||||
} else if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
pub, err := e.fetchKey(publicKey)
|
||||
if os.IsNotExist(err) {
|
||||
return box.GenerateKey(rand.Reader)
|
||||
} else if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return pub, key, nil
|
||||
}
|
||||
|
||||
// writeKey will write a key to disk in DER format (it's a standard pem key)
|
||||
func (e *Encrypter) writeKey(key []byte, pemType, filename string) error {
|
||||
data := pem.EncodeToMemory(&pem.Block{
|
||||
Type: pemType,
|
||||
Bytes: key,
|
||||
})
|
||||
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchKey will load a a DER formatted key from disk
|
||||
func (e *Encrypter) fetchKey(filename string) (*[32]byte, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
io.Copy(buf, f)
|
||||
|
||||
p, _ := pem.Decode(buf.Bytes())
|
||||
if p == nil {
|
||||
return nil, errors.New("Failed to decode key")
|
||||
}
|
||||
var newKey [32]byte
|
||||
copy(newKey[:], p.Bytes)
|
||||
|
||||
return &newKey, nil
|
||||
}
|
||||
|
||||
// decodePublicKey will base64 decode the provided key to the box representation
|
||||
func (e *Encrypter) decodePublicKey(key string) (*[32]byte, error) {
|
||||
pub, err := base64.URLEncoding.DecodeString(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var newKey [32]byte
|
||||
copy(newKey[:], pub)
|
||||
return &newKey, nil
|
||||
}
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||
cli "gopkg.in/urfave/cli.v2"
|
||||
)
|
||||
|
||||
|
@ -30,7 +31,13 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
|||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
const serviceConfigDir = "/etc/cloudflared"
|
||||
// The directory and files that are used by the service.
|
||||
// These are hard-coded in the templates below.
|
||||
const (
|
||||
serviceConfigDir = "/etc/cloudflared"
|
||||
serviceConfigFile = "config.yml"
|
||||
serviceCredentialFile = "cert.pem"
|
||||
)
|
||||
|
||||
var systemdTemplates = []ServiceTemplate{
|
||||
{
|
||||
|
@ -174,6 +181,23 @@ func isSystemd() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile string) error {
|
||||
if err := ensureConfigDirExists(serviceConfigDir); err != nil {
|
||||
return err
|
||||
}
|
||||
srcCredentialPath := filepath.Join(userConfigDir, userCredentialFile)
|
||||
destCredentialPath := filepath.Join(serviceConfigDir, serviceCredentialFile)
|
||||
if err := copyCredential(srcCredentialPath, destCredentialPath); err != nil {
|
||||
return err
|
||||
}
|
||||
srcConfigPath := filepath.Join(userConfigDir, userConfigFile)
|
||||
destConfigPath := filepath.Join(serviceConfigDir, serviceConfigFile)
|
||||
if err := copyConfig(srcConfigPath, destConfigPath); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func installLinuxService(c *cli.Context) error {
|
||||
etPath, err := os.Executable()
|
||||
if err != nil {
|
||||
|
@ -181,11 +205,12 @@ func installLinuxService(c *cli.Context) error {
|
|||
}
|
||||
templateArgs := ServiceTemplateArgs{Path: etPath}
|
||||
|
||||
defaultConfigDir := filepath.Dir(c.String("config"))
|
||||
defaultConfigFile := filepath.Base(c.String("config"))
|
||||
if err = copyCredentials(serviceConfigDir, defaultConfigDir, defaultConfigFile, defaultCredentialFile); err != nil {
|
||||
userConfigDir := filepath.Dir(c.String("config"))
|
||||
userConfigFile := filepath.Base(c.String("config"))
|
||||
userCredentialFile := config.DefaultCredentialFile
|
||||
if err = copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile); err != nil {
|
||||
logger.WithError(err).Infof("Failed to copy user configuration. Before running the service, ensure that %s contains two files, %s and %s",
|
||||
serviceConfigDir, defaultCredentialFile, defaultConfigFiles[0])
|
||||
serviceConfigDir, serviceCredentialFile, serviceConfigFile)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -1,194 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base32"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
cli "gopkg.in/urfave/cli.v2"
|
||||
)
|
||||
|
||||
const baseLoginURL = "https://dash.cloudflare.com/warp"
|
||||
const baseCertStoreURL = "https://login.cloudflarewarp.com"
|
||||
const clientTimeout = time.Minute * 20
|
||||
|
||||
func login(c *cli.Context) error {
|
||||
configPath, err := homedir.Expand(defaultConfigDirs[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ok, err := fileExists(configPath)
|
||||
if !ok && err == nil {
|
||||
// create config directory if doesn't already exist
|
||||
err = os.Mkdir(configPath, 0700)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := filepath.Join(configPath, defaultCredentialFile)
|
||||
fileInfo, err := os.Stat(path)
|
||||
if err == nil && fileInfo.Size() > 0 {
|
||||
fmt.Fprintf(os.Stderr, `You have an existing certificate at %s which login would overwrite.
|
||||
If this is intentional, please move or delete that file then run this command again.
|
||||
`, path)
|
||||
return nil
|
||||
}
|
||||
if err != nil && err.(*os.PathError).Err != syscall.ENOENT {
|
||||
return err
|
||||
}
|
||||
|
||||
// for local debugging
|
||||
baseURL := baseCertStoreURL
|
||||
if c.IsSet("url") {
|
||||
baseURL = c.String("url")
|
||||
}
|
||||
// Generate a random post URL
|
||||
certURL := baseURL + generateRandomPath()
|
||||
loginURL, err := url.Parse(baseLoginURL)
|
||||
if err != nil {
|
||||
// shouldn't happen, URL is hardcoded
|
||||
return err
|
||||
}
|
||||
loginURL.RawQuery = "callback=" + url.QueryEscape(certURL)
|
||||
|
||||
err = open(loginURL.String())
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, `Please open the following URL and log in with your Cloudflare account:
|
||||
|
||||
%s
|
||||
|
||||
Leave cloudflared running to install the certificate automatically.
|
||||
`, loginURL.String())
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, `A browser window should have opened at the following URL:
|
||||
|
||||
%s
|
||||
|
||||
If the browser failed to open, open it yourself and visit the URL above.
|
||||
|
||||
`, loginURL.String())
|
||||
}
|
||||
|
||||
if download(certURL, path) {
|
||||
fmt.Fprintf(os.Stderr, `You have successfully logged in.
|
||||
If you wish to copy your credentials to a server, they have been saved to:
|
||||
%s
|
||||
`, path)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, `Failed to write the certificate due to the following error:
|
||||
%v
|
||||
|
||||
Your browser will download the certificate instead. You will have to manually
|
||||
copy it to the following path:
|
||||
|
||||
%s
|
||||
|
||||
`, err, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateRandomPath generates a random URL to associate with the certificate.
|
||||
func generateRandomPath() string {
|
||||
randomBytes := make([]byte, 40)
|
||||
_, err := rand.Read(randomBytes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return "/" + base32.StdEncoding.EncodeToString(randomBytes)
|
||||
}
|
||||
|
||||
// open opens the specified URL in the default browser of the user.
|
||||
func open(url string) error {
|
||||
var cmd string
|
||||
var args []string
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
cmd = "cmd"
|
||||
args = []string{"/c", "start"}
|
||||
case "darwin":
|
||||
cmd = "open"
|
||||
default: // "linux", "freebsd", "openbsd", "netbsd"
|
||||
cmd = "xdg-open"
|
||||
}
|
||||
args = append(args, url)
|
||||
return exec.Command(cmd, args...).Start()
|
||||
}
|
||||
|
||||
func download(certURL, filePath string) bool {
|
||||
client := &http.Client{Timeout: clientTimeout}
|
||||
// attempt a (long-running) certificate get
|
||||
for i := 0; i < 20; i++ {
|
||||
ok, err := tryDownload(client, certURL, filePath)
|
||||
if ok {
|
||||
putSuccess(client, certURL)
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Error fetching certificate")
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func tryDownload(client *http.Client, certURL, filePath string) (ok bool, err error) {
|
||||
resp, err := client.Get(certURL)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == 404 {
|
||||
return false, nil
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return false, fmt.Errorf("Unexpected HTTP error code %d", resp.StatusCode)
|
||||
}
|
||||
if resp.Header.Get("Content-Type") != "application/x-pem-file" {
|
||||
return false, fmt.Errorf("Unexpected content type %s", resp.Header.Get("Content-Type"))
|
||||
}
|
||||
// write response
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer file.Close()
|
||||
written, err := io.Copy(file, resp.Body)
|
||||
switch {
|
||||
case err != nil:
|
||||
return false, err
|
||||
case resp.ContentLength != written && resp.ContentLength != -1:
|
||||
return false, fmt.Errorf("Short read (%d bytes) from server while writing certificate", written)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func putSuccess(client *http.Client, certURL string) {
|
||||
// indicate success to the relay server
|
||||
req, err := http.NewRequest("PUT", certURL+"/ok", nil)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("HTTP request error")
|
||||
return
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("HTTP error")
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
logger.Errorf("Unexpected HTTP error code %d", resp.StatusCode)
|
||||
}
|
||||
}
|
|
@ -2,48 +2,37 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime/trace"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/sqlgateway"
|
||||
"github.com/cloudflare/cloudflared/hello"
|
||||
"github.com/cloudflare/cloudflared/metrics"
|
||||
"github.com/cloudflare/cloudflared/origin"
|
||||
"github.com/cloudflare/cloudflared/tunneldns"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/access"
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
|
||||
"github.com/cloudflare/cloudflared/log"
|
||||
"github.com/cloudflare/cloudflared/metrics"
|
||||
|
||||
"github.com/getsentry/raven-go"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
"gopkg.in/urfave/cli.v2/altsrc"
|
||||
|
||||
"github.com/coreos/go-systemd/daemon"
|
||||
"github.com/facebookgo/grace/gracenet"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b:3e8827f6f9f740738eb11138f7bebb68@sentry.io/189878"
|
||||
developerPortal = "https://developers.cloudflare.com/argo-tunnel"
|
||||
quickStartUrl = developerPortal + "/quickstart/quickstart/"
|
||||
serviceUrl = developerPortal + "/reference/service/"
|
||||
argumentsUrl = developerPortal + "/reference/arguments/"
|
||||
licenseUrl = developerPortal + "/licence/"
|
||||
)
|
||||
|
||||
var (
|
||||
Version = "DEV"
|
||||
BuildTime = "unknown"
|
||||
logger = log.CreateLogger()
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics.RegisterBuildInfo(BuildTime, Version)
|
||||
raven.SetDSN(sentryDSN)
|
||||
raven.SetRelease(Version)
|
||||
|
||||
// Force shutdown channel used by the app. When closed, app must terminate.
|
||||
|
@ -55,588 +44,63 @@ func main() {
|
|||
|
||||
app := &cli.App{}
|
||||
app.Name = "cloudflared"
|
||||
app.Usage = "Cloudflare's command-line tool and agent"
|
||||
app.ArgsUsage = "origin-url"
|
||||
app.Copyright = fmt.Sprintf(`(c) %d Cloudflare Inc.
|
||||
Use is subject to the license agreement at %s`, time.Now().Year(), licenseUrl)
|
||||
app.Usage = "Cloudflare reverse tunnelling proxy agent"
|
||||
app.ArgsUsage = "origin-url"
|
||||
app.Version = fmt.Sprintf("%s (built %s)", Version, BuildTime)
|
||||
app.Description = `A reverse tunnel proxy agent that connects to Cloudflare's infrastructure.
|
||||
Upon connecting, you are assigned a unique subdomain on cftunnel.com.
|
||||
You need to specify a hostname on a zone you control.
|
||||
A DNS record will be created to CNAME your hostname to the unique subdomain on cftunnel.com.
|
||||
app.Description = `cloudflared connects your machine or user identity to Cloudflare's global network.
|
||||
You can use it to authenticate a session to reach an API behind Access, route web traffic to this machine,
|
||||
and configure access control.`
|
||||
app.Flags = flags()
|
||||
app.Action = action(Version, shutdownC, graceShutdownC)
|
||||
app.Before = tunnel.Before
|
||||
app.Commands = commands()
|
||||
|
||||
Requests made to Cloudflare's servers for your hostname will be proxied
|
||||
through the tunnel to your local webserver.`
|
||||
app.Flags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Specifies a config file in YAML format.",
|
||||
Value: findDefaultConfigPath(),
|
||||
},
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "autoupdate-freq",
|
||||
Usage: "Autoupdate frequency. Default is 24h.",
|
||||
Value: time.Hour * 24,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "no-autoupdate",
|
||||
Usage: "Disable periodic check for updates, restarting the server with the new version.",
|
||||
Value: false,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "is-autoupdated",
|
||||
Usage: "Signal the new process that Argo Tunnel client has been autoupdated",
|
||||
Value: false,
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "edge",
|
||||
Usage: "Address of the Cloudflare tunnel server.",
|
||||
EnvVars: []string{"TUNNEL_EDGE"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "cacert",
|
||||
Usage: "Certificate Authority authenticating the Cloudflare tunnel connection.",
|
||||
EnvVars: []string{"TUNNEL_CACERT"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "no-tls-verify",
|
||||
Usage: "Disables TLS verification of the certificate presented by your origin. Will allow any certificate from the origin to be accepted. Note: The connection from your machine to Cloudflare's Edge is still encrypted.",
|
||||
EnvVars: []string{"NO_TLS_VERIFY"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "origincert",
|
||||
Usage: "Path to the certificate generated for your origin when you run cloudflared login.",
|
||||
EnvVars: []string{"TUNNEL_ORIGIN_CERT"},
|
||||
Value: findDefaultOriginCertPath(),
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "origin-ca-pool",
|
||||
Usage: "Path to the CA for the certificate of your origin. This option should be used only if your certificate is not signed by Cloudflare.",
|
||||
EnvVars: []string{"TUNNEL_ORIGIN_CA_POOL"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "url",
|
||||
Value: "https://localhost:8080",
|
||||
Usage: "Connect to the local webserver at `URL`.",
|
||||
EnvVars: []string{"TUNNEL_URL"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "hostname",
|
||||
Usage: "Set a hostname on a Cloudflare zone to route traffic through this tunnel.",
|
||||
EnvVars: []string{"TUNNEL_HOSTNAME"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "origin-server-name",
|
||||
Usage: "Hostname on the origin server certificate.",
|
||||
EnvVars: []string{"TUNNEL_ORIGIN_SERVER_NAME"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "id",
|
||||
Usage: "A unique identifier used to tie connections to this tunnel instance.",
|
||||
EnvVars: []string{"TUNNEL_ID"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "lb-pool",
|
||||
Usage: "The name of a (new/existing) load balancing pool to add this origin to.",
|
||||
EnvVars: []string{"TUNNEL_LB_POOL"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "api-key",
|
||||
Usage: "This parameter has been deprecated since version 2017.10.1.",
|
||||
EnvVars: []string{"TUNNEL_API_KEY"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "api-email",
|
||||
Usage: "This parameter has been deprecated since version 2017.10.1.",
|
||||
EnvVars: []string{"TUNNEL_API_EMAIL"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "api-ca-key",
|
||||
Usage: "This parameter has been deprecated since version 2017.10.1.",
|
||||
EnvVars: []string{"TUNNEL_API_CA_KEY"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "metrics",
|
||||
Value: "localhost:",
|
||||
Usage: "Listen address for metrics reporting.",
|
||||
EnvVars: []string{"TUNNEL_METRICS"},
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "metrics-update-freq",
|
||||
Usage: "Frequency to update tunnel metrics",
|
||||
Value: time.Second * 5,
|
||||
EnvVars: []string{"TUNNEL_METRICS_UPDATE_FREQ"},
|
||||
}),
|
||||
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "tag",
|
||||
Usage: "Custom tags used to identify this tunnel, in format `KEY=VALUE`. Multiple tags may be specified",
|
||||
EnvVars: []string{"TUNNEL_TAG"},
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "heartbeat-interval",
|
||||
Usage: "Minimum idle time before sending a heartbeat.",
|
||||
Value: time.Second * 5,
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewUint64Flag(&cli.Uint64Flag{
|
||||
Name: "heartbeat-count",
|
||||
Usage: "Minimum number of unacked heartbeats to send before closing the connection.",
|
||||
Value: 5,
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "loglevel",
|
||||
Value: "info",
|
||||
Usage: "Application logging level {panic, fatal, error, warn, info, debug}",
|
||||
EnvVars: []string{"TUNNEL_LOGLEVEL"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "proto-loglevel",
|
||||
Value: "warn",
|
||||
Usage: "Protocol logging level {panic, fatal, error, warn, info, debug}",
|
||||
EnvVars: []string{"TUNNEL_PROTO_LOGLEVEL"},
|
||||
}),
|
||||
altsrc.NewUintFlag(&cli.UintFlag{
|
||||
Name: "retries",
|
||||
Value: 5,
|
||||
Usage: "Maximum number of retries for connection/protocol errors.",
|
||||
EnvVars: []string{"TUNNEL_RETRIES"},
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "hello-world",
|
||||
Value: false,
|
||||
Usage: "Run Hello World Server",
|
||||
EnvVars: []string{"TUNNEL_HELLO_WORLD"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "pidfile",
|
||||
Usage: "Write the application's PID to this file after first successful connection.",
|
||||
EnvVars: []string{"TUNNEL_PIDFILE"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "logfile",
|
||||
Usage: "Save application log to this file for reporting issues.",
|
||||
EnvVars: []string{"TUNNEL_LOGFILE"},
|
||||
}),
|
||||
altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "ha-connections",
|
||||
Value: 4,
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "proxy-connect-timeout",
|
||||
Usage: "HTTP proxy timeout for establishing a new connection",
|
||||
Value: time.Second * 30,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "proxy-tls-timeout",
|
||||
Usage: "HTTP proxy timeout for completing a TLS handshake",
|
||||
Value: time.Second * 10,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "proxy-tcp-keepalive",
|
||||
Usage: "HTTP proxy TCP keepalive duration",
|
||||
Value: time.Second * 30,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "proxy-no-happy-eyeballs",
|
||||
Usage: "HTTP proxy should disable \"happy eyeballs\" for IPv4/v6 fallback",
|
||||
}),
|
||||
altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "proxy-keepalive-connections",
|
||||
Usage: "HTTP proxy maximum keepalive connection pool size",
|
||||
Value: 100,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "proxy-keepalive-timeout",
|
||||
Usage: "HTTP proxy timeout for closing an idle connection",
|
||||
Value: time.Second * 90,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "proxy-dns",
|
||||
Usage: "Run a DNS over HTTPS proxy server.",
|
||||
EnvVars: []string{"TUNNEL_DNS"},
|
||||
}),
|
||||
altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "proxy-dns-port",
|
||||
Value: 53,
|
||||
Usage: "Listen on given port for the DNS over HTTPS proxy server.",
|
||||
EnvVars: []string{"TUNNEL_DNS_PORT"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "proxy-dns-address",
|
||||
Usage: "Listen address for the DNS over HTTPS proxy server.",
|
||||
Value: "localhost",
|
||||
EnvVars: []string{"TUNNEL_DNS_ADDRESS"},
|
||||
}),
|
||||
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "proxy-dns-upstream",
|
||||
Usage: "Upstream endpoint URL, you can specify multiple endpoints for redundancy.",
|
||||
Value: cli.NewStringSlice("https://1.1.1.1/dns-query", "https://1.0.0.1/dns-query"),
|
||||
EnvVars: []string{"TUNNEL_DNS_UPSTREAM"},
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "grace-period",
|
||||
Usage: "Duration to accept new requests after cloudflared receives first SIGINT/SIGTERM. A second SIGINT/SIGTERM will force cloudflared to shutdown immediately.",
|
||||
Value: time.Second * 30,
|
||||
EnvVars: []string{"TUNNEL_GRACE_PERIOD"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewUintFlag(&cli.UintFlag{
|
||||
Name: "compression-quality",
|
||||
Value: 0,
|
||||
Usage: "Use cross-stream compression instead HTTP compression. 0-off, 1-low, 2-medium, >=3-high",
|
||||
EnvVars: []string{"TUNNEL_COMPRESSION_LEVEL"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "no-chunked-encoding",
|
||||
Usage: "Disables chunked transfer encoding; useful if you are running a WSGI server.",
|
||||
EnvVars: []string{"TUNNEL_NO_CHUNKED_ENCODING"},
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "trace-output",
|
||||
Usage: "Name of trace output file, generated when cloudflared stops.",
|
||||
EnvVars: []string{"TUNNEL_TRACE_OUTPUT"},
|
||||
}),
|
||||
}
|
||||
app.Action = func(c *cli.Context) (err error) {
|
||||
tags := make(map[string]string)
|
||||
tags["hostname"] = c.String("hostname")
|
||||
raven.SetTagsContext(tags)
|
||||
raven.CapturePanic(func() { err = startServer(c, shutdownC, graceShutdownC) }, nil)
|
||||
if err != nil {
|
||||
raven.CaptureError(err, nil)
|
||||
}
|
||||
return err
|
||||
}
|
||||
app.Before = func(context *cli.Context) error {
|
||||
if context.String("config") == "" {
|
||||
logger.Warnf("Cannot determine default configuration path. No file %v in %v", defaultConfigFiles, defaultConfigDirs)
|
||||
}
|
||||
inputSource, err := findInputSourceContext(context)
|
||||
if err != nil {
|
||||
logger.WithError(err).Infof("Cannot load configuration from %s", context.String("config"))
|
||||
return err
|
||||
} else if inputSource != nil {
|
||||
err := altsrc.ApplyInputSourceValues(context, inputSource, app.Flags)
|
||||
if err != nil {
|
||||
logger.WithError(err).Infof("Cannot apply configuration from %s", context.String("config"))
|
||||
return err
|
||||
}
|
||||
logger.Infof("Applied configuration from %s", context.String("config"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
app.Commands = []*cli.Command{
|
||||
{
|
||||
Name: "update",
|
||||
Action: update,
|
||||
Usage: "Update the agent if a new version exists",
|
||||
ArgsUsage: " ",
|
||||
Description: `Looks for a new version on the offical download server.
|
||||
If a new version exists, updates the agent binary and quits.
|
||||
Otherwise, does nothing.
|
||||
|
||||
To determine if an update happened in a script, check for error code 64.`,
|
||||
},
|
||||
{
|
||||
Name: "login",
|
||||
Action: login,
|
||||
Usage: "Generate a configuration file with your login details",
|
||||
ArgsUsage: " ",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "url",
|
||||
Hidden: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "hello",
|
||||
Action: helloWorld,
|
||||
Usage: "Run a simple \"Hello World\" server for testing Argo Tunnel.",
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "port",
|
||||
Usage: "Listen on the selected port.",
|
||||
Value: 8080,
|
||||
},
|
||||
},
|
||||
ArgsUsage: " ", // can't be the empty string or we get the default output
|
||||
},
|
||||
{
|
||||
Name: "proxy-dns",
|
||||
Action: tunneldns.Run,
|
||||
Usage: "Run a DNS over HTTPS proxy server.",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "metrics",
|
||||
Value: "localhost:",
|
||||
Usage: "Listen address for metrics reporting.",
|
||||
EnvVars: []string{"TUNNEL_METRICS"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Usage: "Listen address for the DNS over HTTPS proxy server.",
|
||||
Value: "localhost",
|
||||
EnvVars: []string{"TUNNEL_DNS_ADDRESS"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "port",
|
||||
Usage: "Listen on given port for the DNS over HTTPS proxy server.",
|
||||
Value: 53,
|
||||
EnvVars: []string{"TUNNEL_DNS_PORT"},
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "upstream",
|
||||
Usage: "Upstream endpoint URL, you can specify multiple endpoints for redundancy.",
|
||||
Value: cli.NewStringSlice("https://1.1.1.1/dns-query", "https://1.0.0.1/dns-query"),
|
||||
EnvVars: []string{"TUNNEL_DNS_UPSTREAM"},
|
||||
},
|
||||
},
|
||||
ArgsUsage: " ", // can't be the empty string or we get the default output
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
Action: func(c *cli.Context) error {
|
||||
tags := make(map[string]string)
|
||||
tags["hostname"] = c.String("hostname")
|
||||
raven.SetTagsContext(tags)
|
||||
|
||||
fmt.Printf("\nSQL Database Password: ")
|
||||
pass, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
go sqlgateway.StartProxy(c, logger, string(pass))
|
||||
|
||||
raven.CapturePanic(func() { err = startServer(c, shutdownC, graceShutdownC) }, nil)
|
||||
if err != nil {
|
||||
raven.CaptureError(err, nil)
|
||||
}
|
||||
return err
|
||||
},
|
||||
Before: func(c *cli.Context) error {
|
||||
if c.String("config") == "" {
|
||||
logger.Warnf("Cannot determine default configuration path. No file %v in %v", defaultConfigFiles, defaultConfigDirs)
|
||||
}
|
||||
inputSource, err := findInputSourceContext(c)
|
||||
if err != nil {
|
||||
logger.WithError(err).Infof("Cannot load configuration from %s", c.String("config"))
|
||||
return err
|
||||
} else if inputSource != nil {
|
||||
err := altsrc.ApplyInputSourceValues(c, inputSource, app.Flags)
|
||||
if err != nil {
|
||||
logger.WithError(err).Infof("Cannot apply configuration from %s", c.String("config"))
|
||||
return err
|
||||
}
|
||||
logger.Infof("Applied configuration from %s", c.String("config"))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Usage: "SQL Gateway is an SQL over HTTP reverse proxy",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "db",
|
||||
Value: true,
|
||||
Usage: "Enable the SQL Gateway Proxy",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Value: "",
|
||||
Usage: "Database connection string: db://user:pass",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
tunnel.Init(Version, shutdownC, graceShutdownC) // we need this to support the tunnel sub command...
|
||||
runApp(app, shutdownC, graceShutdownC)
|
||||
}
|
||||
|
||||
func startServer(c *cli.Context, shutdownC, graceShutdownC chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
listeners := gracenet.Net{}
|
||||
errC := make(chan error)
|
||||
connectedSignal := make(chan struct{})
|
||||
dnsReadySignal := make(chan struct{})
|
||||
func commands() []*cli.Command {
|
||||
cmds := []*cli.Command{
|
||||
{
|
||||
Name: "update",
|
||||
Action: updater.Update,
|
||||
Usage: "Update the agent if a new version exists",
|
||||
ArgsUsage: " ",
|
||||
Description: `Looks for a new version on the official download server.
|
||||
If a new version exists, updates the agent binary and quits.
|
||||
Otherwise, does nothing.
|
||||
|
||||
// check whether client provides enough flags or env variables. If not, print help.
|
||||
if ok := enoughOptionsSet(c); !ok {
|
||||
To determine if an update happened in a script, check for error code 64.`,
|
||||
},
|
||||
}
|
||||
cmds = append(cmds, tunnel.Commands()...)
|
||||
cmds = append(cmds, access.Commands()...)
|
||||
return cmds
|
||||
}
|
||||
|
||||
func flags() []cli.Flag {
|
||||
flags := tunnel.Flags()
|
||||
return append(flags, access.Flags()...)
|
||||
}
|
||||
|
||||
func action(version string, shutdownC, graceShutdownC chan struct{}) cli.ActionFunc {
|
||||
return func(c *cli.Context) (err error) {
|
||||
if isRunningFromTerminal() {
|
||||
logger.Error("Use of cloudflared without commands is deprecated.")
|
||||
cli.ShowAppHelp(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := configMainLogger(c); err != nil {
|
||||
return errors.Wrap(err, "Error configuring logger")
|
||||
}
|
||||
|
||||
protoLogger, err := configProtoLogger(c)
|
||||
tags := make(map[string]string)
|
||||
tags["hostname"] = c.String("hostname")
|
||||
raven.SetTagsContext(tags)
|
||||
raven.CapturePanic(func() { err = tunnel.StartServer(c, version, shutdownC, graceShutdownC) }, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error configuring protocol logger")
|
||||
raven.CaptureError(err, nil)
|
||||
}
|
||||
|
||||
if c.IsSet("trace-output") {
|
||||
tmpTraceFile, err := ioutil.TempFile("", "trace")
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Failed to create new temporary file to save trace output")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := tmpTraceFile.Close(); err != nil {
|
||||
logger.WithError(err).Error("Failed to close trace output file %s", tmpTraceFile.Name())
|
||||
}
|
||||
if err := os.Rename(tmpTraceFile.Name(), c.String("trace-output")); err != nil {
|
||||
logger.WithError(err).Errorf("Failed to rename temporary trace output file %s to %s", tmpTraceFile.Name(), c.String("trace-output"))
|
||||
} else {
|
||||
os.Remove(tmpTraceFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
if err := trace.Start(tmpTraceFile); err != nil {
|
||||
logger.WithError(err).Error("Failed to start trace")
|
||||
return errors.Wrap(err, "Error starting tracing")
|
||||
}
|
||||
defer trace.Stop()
|
||||
}
|
||||
|
||||
if c.String("logfile") != "" {
|
||||
if err := initLogFile(c, logger, protoLogger); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := handleDeprecatedOptions(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buildInfo := origin.GetBuildInfo()
|
||||
logger.Infof("Build info: %+v", *buildInfo)
|
||||
logger.Infof("Version %s", Version)
|
||||
logClientOptions(c)
|
||||
|
||||
if c.IsSet("proxy-dns") {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
errC <- runDNSProxyServer(c, dnsReadySignal, shutdownC)
|
||||
}()
|
||||
} else {
|
||||
close(dnsReadySignal)
|
||||
}
|
||||
|
||||
// Wait for proxy-dns to come up (if used)
|
||||
<-dnsReadySignal
|
||||
|
||||
// update needs to be after DNS proxy is up to resolve equinox server address
|
||||
if isAutoupdateEnabled(c) {
|
||||
logger.Infof("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq"))
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
errC <- autoupdate(c.Duration("autoupdate-freq"), &listeners, shutdownC)
|
||||
}()
|
||||
}
|
||||
|
||||
metricsListener, err := listeners.Listen("tcp", c.String("metrics"))
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Error opening metrics server listener")
|
||||
return errors.Wrap(err, "Error opening metrics server listener")
|
||||
}
|
||||
defer metricsListener.Close()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
errC <- metrics.ServeMetrics(metricsListener, shutdownC, logger)
|
||||
}()
|
||||
|
||||
go notifySystemd(connectedSignal)
|
||||
if c.IsSet("pidfile") {
|
||||
go writePidFile(connectedSignal, c.String("pidfile"))
|
||||
}
|
||||
|
||||
// Serve DNS proxy stand-alone if no hostname or tag or app is going to run
|
||||
if dnsProxyStandAlone(c) {
|
||||
close(connectedSignal)
|
||||
// no grace period, handle SIGINT/SIGTERM immediately
|
||||
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, 0)
|
||||
}
|
||||
|
||||
if c.IsSet("hello-world") {
|
||||
helloListener, err := hello.CreateTLSListener("127.0.0.1:")
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Cannot start Hello World Server")
|
||||
return errors.Wrap(err, "Cannot start Hello World Server")
|
||||
}
|
||||
defer helloListener.Close()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
hello.StartHelloWorldServer(logger, helloListener, shutdownC)
|
||||
}()
|
||||
c.Set("url", "https://"+helloListener.Addr().String())
|
||||
}
|
||||
|
||||
tunnelConfig, err := prepareTunnelConfig(c, buildInfo, logger, protoLogger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
errC <- origin.StartTunnelDaemon(tunnelConfig, graceShutdownC, connectedSignal)
|
||||
}()
|
||||
|
||||
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, c.Duration("grace-period"))
|
||||
}
|
||||
|
||||
func waitToShutdown(wg *sync.WaitGroup,
|
||||
errC chan error,
|
||||
shutdownC, graceShutdownC chan struct{},
|
||||
gracePeriod time.Duration,
|
||||
) error {
|
||||
var err error
|
||||
if gracePeriod > 0 {
|
||||
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceShutdownC, gracePeriod)
|
||||
} else {
|
||||
err = waitForSignal(errC, shutdownC)
|
||||
close(graceShutdownC)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Quitting due to error")
|
||||
} else {
|
||||
logger.Info("Quitting...")
|
||||
}
|
||||
// Wait for clean exit, discarding all errors
|
||||
go func() {
|
||||
for range errC {
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
func notifySystemd(waitForSignal chan struct{}) {
|
||||
<-waitForSignal
|
||||
daemon.SdNotify(false, "READY=1")
|
||||
}
|
||||
|
||||
func writePidFile(waitForSignal chan struct{}, pidFile string) {
|
||||
<-waitForSignal
|
||||
file, err := os.Create(pidFile)
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("Unable to write pid to %s", pidFile)
|
||||
}
|
||||
defer file.Close()
|
||||
fmt.Fprintf(file, "%d", os.Getpid())
|
||||
}
|
||||
|
||||
func userHomeDir() (string, error) {
|
||||
|
@ -651,3 +115,7 @@ func userHomeDir() (string, error) {
|
|||
}
|
||||
return homeDir, nil
|
||||
}
|
||||
|
||||
func isRunningFromTerminal() bool {
|
||||
return terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||
}
|
||||
|
|
|
@ -8,9 +8,9 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
|
@ -94,7 +94,7 @@ func runCommand(command string, args ...string) error {
|
|||
}
|
||||
|
||||
func ensureConfigDirExists(configDir string) error {
|
||||
ok, err := fileExists(configDir)
|
||||
ok, err := config.FileExists(configDir)
|
||||
if !ok && err == nil {
|
||||
err = os.Mkdir(configDir, 0700)
|
||||
}
|
||||
|
@ -119,8 +119,7 @@ func openFile(path string, create bool) (file *os.File, exists bool, err error)
|
|||
return file, false, err
|
||||
}
|
||||
|
||||
func copyCertificate(srcConfigDir, destConfigDir, credentialFile string) error {
|
||||
destCredentialPath := filepath.Join(destConfigDir, credentialFile)
|
||||
func copyCredential(srcCredentialPath, destCredentialPath string) error {
|
||||
destFile, exists, err := openFile(destCredentialPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -130,7 +129,6 @@ func copyCertificate(srcConfigDir, destConfigDir, credentialFile string) error {
|
|||
}
|
||||
defer destFile.Close()
|
||||
|
||||
srcCredentialPath := filepath.Join(srcConfigDir, credentialFile)
|
||||
srcFile, _, err := openFile(srcCredentialPath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -146,17 +144,8 @@ func copyCertificate(srcConfigDir, destConfigDir, credentialFile string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func copyCredentials(serviceConfigDir, defaultConfigDir, defaultConfigFile, defaultCredentialFile string) error {
|
||||
if err := ensureConfigDirExists(serviceConfigDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := copyCertificate(defaultConfigDir, serviceConfigDir, defaultCredentialFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func copyConfig(srcConfigPath, destConfigPath string) error {
|
||||
// Copy or create config
|
||||
destConfigPath := filepath.Join(serviceConfigDir, defaultConfigFile)
|
||||
destFile, exists, err := openFile(destConfigPath, true)
|
||||
if err != nil {
|
||||
logger.WithError(err).Infof("cannot open %s", destConfigPath)
|
||||
|
@ -167,7 +156,6 @@ func copyCredentials(serviceConfigDir, defaultConfigDir, defaultConfigFile, defa
|
|||
}
|
||||
defer destFile.Close()
|
||||
|
||||
srcConfigPath := filepath.Join(defaultConfigDir, defaultConfigFile)
|
||||
srcFile, _, err := openFile(srcConfigPath, false)
|
||||
if err != nil {
|
||||
fmt.Println("Your service needs a config file that at least specifies the hostname option.")
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
package shell
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// OpenBrowser opens the specified URL in the default browser of the user
|
||||
func OpenBrowser(url string) error {
|
||||
var cmd string
|
||||
var args []string
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
cmd = "cmd"
|
||||
args = []string{"/c", "start"}
|
||||
case "darwin":
|
||||
cmd = "open"
|
||||
default: // "linux", "freebsd", "openbsd", "netbsd"
|
||||
cmd = "xdg-open"
|
||||
}
|
||||
args = append(args, url)
|
||||
return exec.Command(cmd, args...).Start()
|
||||
}
|
||||
|
||||
// Run will kick off a shell task and pipe the results to the respective std pipes
|
||||
func Run(cmd string, args ...string) error {
|
||||
c := exec.Command(cmd, args...)
|
||||
stderr, err := c.StderrPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
io.Copy(os.Stderr, stderr)
|
||||
}()
|
||||
|
||||
stdout, err := c.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
io.Copy(os.Stdout, stdout)
|
||||
}()
|
||||
return c.Run()
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
package transfer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/encrypter"
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/shell"
|
||||
"github.com/cloudflare/cloudflared/log"
|
||||
cli "gopkg.in/urfave/cli.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
baseStoreURL = "https://login.cloudflarewarp.com/"
|
||||
clientTimeout = time.Second * 60
|
||||
)
|
||||
|
||||
var logger = log.CreateLogger()
|
||||
|
||||
// Run does the transfer "dance" with the end result downloading the supported resource.
|
||||
// The expanded description is run is encapsulation of shared business logic needed
|
||||
// to request a resource (token/cert/etc) from the transfer service (loginhelper).
|
||||
// The "dance" we refer to is building a HTTP request, opening that in a browser waiting for
|
||||
// the user to complete an action, while it long polls in the background waiting for an
|
||||
// action to be completed to download the resource.
|
||||
func Run(c *cli.Context, transferURL *url.URL, resourceName, key, value, path string, shouldEncrypt bool) ([]byte, error) {
|
||||
encrypterClient, err := encrypter.New("cloudflared_priv.pem", "cloudflared_pub.pem")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requestURL, err := buildRequestURL(transferURL, key, value+encrypterClient.PublicKey(), shouldEncrypt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = shell.OpenBrowser(requestURL)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "Please open the following URL and log in with your Cloudflare account:\n\n%s\n\nLeave cloudflared running to download the %s automatically.\n", resourceName, requestURL)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stdout, "A browser window should have opened at the following URL:\n\n%s\n\nIf the browser failed to open, open it yourself and visit the URL above.\n", requestURL)
|
||||
}
|
||||
|
||||
// for local debugging
|
||||
baseURL := baseStoreURL
|
||||
if c.IsSet("url") {
|
||||
baseURL = c.String("url")
|
||||
}
|
||||
|
||||
var resourceData []byte
|
||||
|
||||
if shouldEncrypt {
|
||||
buf, key, err := transferRequest(baseURL + filepath.Join("transfer", encrypterClient.PublicKey()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
decodedBuf, err := base64.StdEncoding.DecodeString(string(buf))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decrypted, err := encrypterClient.Decrypt(decodedBuf, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceData = decrypted
|
||||
} else {
|
||||
buf, _, err := transferRequest(baseURL + filepath.Join(encrypterClient.PublicKey()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceData = buf
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(path, resourceData, 0600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resourceData, nil
|
||||
}
|
||||
|
||||
// BuildRequestURL creates a request suitable for a resource transfer.
|
||||
// it will return a constructed url based off the base url and query key/value provided.
|
||||
// follow will follow redirects.
|
||||
func buildRequestURL(baseURL *url.URL, key, value string, follow bool) (string, error) {
|
||||
q := baseURL.Query()
|
||||
q.Set(key, value)
|
||||
baseURL.RawQuery = q.Encode()
|
||||
if !follow {
|
||||
return baseURL.String(), nil
|
||||
}
|
||||
|
||||
response, err := http.Get(baseURL.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return response.Request.URL.String(), nil
|
||||
|
||||
}
|
||||
|
||||
// transferRequest downloads the requested resource from the request URL
|
||||
func transferRequest(requestURL string) ([]byte, string, error) {
|
||||
client := &http.Client{Timeout: clientTimeout}
|
||||
const pollAttempts = 10
|
||||
// we do "long polling" on the endpoint to get the resource.
|
||||
for i := 0; i < pollAttempts; i++ {
|
||||
buf, key, err := poll(client, requestURL)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
} else if len(buf) > 0 {
|
||||
if err := putSuccess(client, requestURL); err != nil {
|
||||
logger.WithError(err).Error("Failed to update resource success")
|
||||
}
|
||||
return buf, key, nil
|
||||
}
|
||||
}
|
||||
return nil, "", errors.New("Failed to fetch resource")
|
||||
}
|
||||
|
||||
// poll the endpoint for the request resource, waiting for the user interaction
|
||||
func poll(client *http.Client, requestURL string) ([]byte, string, error) {
|
||||
resp, err := client.Get(requestURL)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// ignore everything other than server errors as the resource
|
||||
// may not exist until the user does the interaction
|
||||
if resp.StatusCode >= 500 {
|
||||
return nil, "", fmt.Errorf("error on request %d", resp.StatusCode)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
logger.Info("Waiting for login...")
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, resp.Body); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return buf.Bytes(), resp.Header.Get("service-public-key"), nil
|
||||
}
|
||||
|
||||
// putSuccess tells the server we successfully downloaded the resource
|
||||
func putSuccess(client *http.Client, requestURL string) error {
|
||||
req, err := http.NewRequest("PUT", requestURL+"/ok", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("HTTP Response Status Code: %d", resp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,658 @@
|
|||
package tunnel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime/trace"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/raven-go"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
|
||||
"github.com/cloudflare/cloudflared/cmd/sqlgateway"
|
||||
"github.com/cloudflare/cloudflared/hello"
|
||||
"github.com/cloudflare/cloudflared/metrics"
|
||||
"github.com/cloudflare/cloudflared/origin"
|
||||
"github.com/cloudflare/cloudflared/tunneldns"
|
||||
"github.com/coreos/go-systemd/daemon"
|
||||
"github.com/facebookgo/grace/gracenet"
|
||||
"github.com/pkg/errors"
|
||||
cli "gopkg.in/urfave/cli.v2"
|
||||
"gopkg.in/urfave/cli.v2/altsrc"
|
||||
)
|
||||
|
||||
const sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b:3e8827f6f9f740738eb11138f7bebb68@sentry.io/189878"
|
||||
|
||||
var (
|
||||
shutdownC chan struct{}
|
||||
graceShutdownC chan struct{}
|
||||
version string
|
||||
)
|
||||
|
||||
func Flags() []cli.Flag {
|
||||
return tunnelFlags(true)
|
||||
}
|
||||
|
||||
func Commands() []*cli.Command {
|
||||
cmds := []*cli.Command{
|
||||
{
|
||||
Name: "login",
|
||||
Action: login,
|
||||
Usage: "Generate a configuration file with your login details",
|
||||
ArgsUsage: " ",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "url",
|
||||
Hidden: true,
|
||||
},
|
||||
},
|
||||
Hidden: true,
|
||||
},
|
||||
{
|
||||
Name: "hello",
|
||||
Action: helloWorld,
|
||||
Usage: "Run a simple \"Hello World\" server for testing Argo Tunnel.",
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "port",
|
||||
Usage: "Listen on the selected port.",
|
||||
Value: 8080,
|
||||
},
|
||||
},
|
||||
ArgsUsage: " ", // can't be the empty string or we get the default output
|
||||
Hidden: true,
|
||||
},
|
||||
{
|
||||
Name: "proxy-dns",
|
||||
Action: tunneldns.Run,
|
||||
Usage: "Run a DNS over HTTPS proxy server.",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "metrics",
|
||||
Value: "localhost:",
|
||||
Usage: "Listen address for metrics reporting.",
|
||||
EnvVars: []string{"TUNNEL_METRICS"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Usage: "Listen address for the DNS over HTTPS proxy server.",
|
||||
Value: "localhost",
|
||||
EnvVars: []string{"TUNNEL_DNS_ADDRESS"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "port",
|
||||
Usage: "Listen on given port for the DNS over HTTPS proxy server.",
|
||||
Value: 53,
|
||||
EnvVars: []string{"TUNNEL_DNS_PORT"},
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "upstream",
|
||||
Usage: "Upstream endpoint URL, you can specify multiple endpoints for redundancy.",
|
||||
Value: cli.NewStringSlice("https://1.1.1.1/dns-query", "https://1.0.0.1/dns-query"),
|
||||
EnvVars: []string{"TUNNEL_DNS_UPSTREAM"},
|
||||
},
|
||||
},
|
||||
ArgsUsage: " ", // can't be the empty string or we get the default output
|
||||
Hidden: false,
|
||||
},
|
||||
{
|
||||
Name: "db",
|
||||
Action: func(c *cli.Context) error {
|
||||
tags := make(map[string]string)
|
||||
tags["hostname"] = c.String("hostname")
|
||||
raven.SetTagsContext(tags)
|
||||
|
||||
fmt.Printf("\nSQL Database Password: ")
|
||||
pass, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
go sqlgateway.StartProxy(c, logger, string(pass))
|
||||
|
||||
raven.CapturePanic(func() { err = tunnel(c) }, nil)
|
||||
if err != nil {
|
||||
raven.CaptureError(err, nil)
|
||||
}
|
||||
return err
|
||||
},
|
||||
Before: Before,
|
||||
Usage: "SQL Gateway is an SQL over HTTP reverse proxy",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "db",
|
||||
Value: true,
|
||||
Usage: "Enable the SQL Gateway Proxy",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Value: "",
|
||||
Usage: "Database connection string: db://user:pass",
|
||||
},
|
||||
},
|
||||
Hidden: true,
|
||||
},
|
||||
}
|
||||
|
||||
var subcommands []*cli.Command
|
||||
for _, cmd := range cmds {
|
||||
c := *cmd
|
||||
c.Hidden = false
|
||||
subcommands = append(subcommands, &c)
|
||||
}
|
||||
|
||||
cmds = append(cmds, &cli.Command{
|
||||
Name: "tunnel",
|
||||
Action: tunnel,
|
||||
Before: Before,
|
||||
Category: "Tunnel",
|
||||
Usage: "Make a locally-running web service accessible over the internet using Argo Tunnel.",
|
||||
ArgsUsage: "[origin-url]",
|
||||
Description: `Argo Tunnel asks you to specify a hostname on a Cloudflare-powered
|
||||
domain you control and a local address. Traffic from that hostname is routed
|
||||
(optionally via a Cloudflare Load Balancer) to this machine and appears on the
|
||||
specified port where it can be served.
|
||||
|
||||
This feature requires your Cloudflare account be subscribed to the Argo Smart Routing feature.
|
||||
|
||||
To use, begin by calling login to download a certificate:
|
||||
|
||||
cloudflared tunnel login
|
||||
|
||||
With your certificate installed you can then launch your first tunnel,
|
||||
replacing my.site.com with a subdomain of your site:
|
||||
|
||||
cloudflared tunnel --hostname my.site.com --url http://localhost:8080
|
||||
|
||||
If you have a web server running on port 8080 (in this example), it will be available on
|
||||
the internet!`,
|
||||
Subcommands: subcommands,
|
||||
Flags: tunnelFlags(false),
|
||||
})
|
||||
|
||||
return cmds
|
||||
}
|
||||
|
||||
func tunnel(c *cli.Context) error {
|
||||
return StartServer(c, version, shutdownC, graceShutdownC)
|
||||
}
|
||||
|
||||
func Init(v string, s, g chan struct{}) {
|
||||
version, shutdownC, graceShutdownC = v, s, g
|
||||
}
|
||||
|
||||
func StartServer(c *cli.Context, version string, shutdownC, graceShutdownC chan struct{}) error {
|
||||
raven.SetDSN(sentryDSN)
|
||||
var wg sync.WaitGroup
|
||||
listeners := gracenet.Net{}
|
||||
errC := make(chan error)
|
||||
connectedSignal := make(chan struct{})
|
||||
dnsReadySignal := make(chan struct{})
|
||||
|
||||
if c.String("config") == "" {
|
||||
logger.Warnf("Cannot determine default configuration path. No file %v in %v", config.DefaultConfigFiles, config.DefaultConfigDirs)
|
||||
}
|
||||
|
||||
if err := configMainLogger(c); err != nil {
|
||||
return errors.Wrap(err, "Error configuring logger")
|
||||
}
|
||||
|
||||
protoLogger, err := configProtoLogger(c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error configuring protocol logger")
|
||||
}
|
||||
|
||||
if c.IsSet("trace-output") {
|
||||
tmpTraceFile, err := ioutil.TempFile("", "trace")
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Failed to create new temporary file to save trace output")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := tmpTraceFile.Close(); err != nil {
|
||||
logger.WithError(err).Errorf("Failed to close trace output file %s", tmpTraceFile.Name())
|
||||
}
|
||||
if err := os.Rename(tmpTraceFile.Name(), c.String("trace-output")); err != nil {
|
||||
logger.WithError(err).Errorf("Failed to rename temporary trace output file %s to %s", tmpTraceFile.Name(), c.String("trace-output"))
|
||||
} else {
|
||||
os.Remove(tmpTraceFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
if err := trace.Start(tmpTraceFile); err != nil {
|
||||
logger.WithError(err).Error("Failed to start trace")
|
||||
return errors.Wrap(err, "Error starting tracing")
|
||||
}
|
||||
defer trace.Stop()
|
||||
}
|
||||
|
||||
if c.String("logfile") != "" {
|
||||
if err := initLogFile(c, logger, protoLogger); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := handleDeprecatedOptions(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buildInfo := origin.GetBuildInfo()
|
||||
logger.Infof("Build info: %+v", *buildInfo)
|
||||
logger.Infof("Version %s", version)
|
||||
logClientOptions(c)
|
||||
|
||||
if c.IsSet("proxy-dns") {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
errC <- runDNSProxyServer(c, dnsReadySignal, shutdownC)
|
||||
}()
|
||||
} else {
|
||||
close(dnsReadySignal)
|
||||
}
|
||||
|
||||
// Wait for proxy-dns to come up (if used)
|
||||
<-dnsReadySignal
|
||||
|
||||
// update needs to be after DNS proxy is up to resolve equinox server address
|
||||
if updater.IsAutoupdateEnabled(c) {
|
||||
logger.Infof("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq"))
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
errC <- updater.Autoupdate(c.Duration("autoupdate-freq"), &listeners, shutdownC)
|
||||
}()
|
||||
}
|
||||
|
||||
metricsListener, err := listeners.Listen("tcp", c.String("metrics"))
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Error opening metrics server listener")
|
||||
return errors.Wrap(err, "Error opening metrics server listener")
|
||||
}
|
||||
defer metricsListener.Close()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
errC <- metrics.ServeMetrics(metricsListener, shutdownC, logger)
|
||||
}()
|
||||
|
||||
go notifySystemd(connectedSignal)
|
||||
if c.IsSet("pidfile") {
|
||||
go writePidFile(connectedSignal, c.String("pidfile"))
|
||||
}
|
||||
|
||||
// Serve DNS proxy stand-alone if no hostname or tag or app is going to run
|
||||
if dnsProxyStandAlone(c) {
|
||||
close(connectedSignal)
|
||||
// no grace period, handle SIGINT/SIGTERM immediately
|
||||
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, 0)
|
||||
}
|
||||
|
||||
if c.IsSet("hello-world") {
|
||||
helloListener, err := hello.CreateTLSListener("127.0.0.1:")
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Cannot start Hello World Server")
|
||||
return errors.Wrap(err, "Cannot start Hello World Server")
|
||||
}
|
||||
defer helloListener.Close()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
hello.StartHelloWorldServer(logger, helloListener, shutdownC)
|
||||
}()
|
||||
c.Set("url", "https://"+helloListener.Addr().String())
|
||||
}
|
||||
|
||||
tunnelConfig, err := prepareTunnelConfig(c, buildInfo, version, logger, protoLogger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
errC <- origin.StartTunnelDaemon(tunnelConfig, graceShutdownC, connectedSignal)
|
||||
}()
|
||||
|
||||
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, c.Duration("grace-period"))
|
||||
}
|
||||
|
||||
func Before(c *cli.Context) error {
|
||||
if c.String("config") == "" {
|
||||
logger.Warnf("Cannot determine default configuration path. No file %v in %v", config.DefaultConfigFiles, config.DefaultConfigDirs)
|
||||
}
|
||||
inputSource, err := config.FindInputSourceContext(c)
|
||||
if err != nil {
|
||||
logger.WithError(err).Infof("Cannot load configuration from %s", c.String("config"))
|
||||
return err
|
||||
} else if inputSource != nil {
|
||||
err := altsrc.ApplyInputSourceValues(c, inputSource, c.App.Flags)
|
||||
if err != nil {
|
||||
logger.WithError(err).Infof("Cannot apply configuration from %s", c.String("config"))
|
||||
return err
|
||||
}
|
||||
logger.Infof("Applied configuration from %s", c.String("config"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitToShutdown(wg *sync.WaitGroup,
|
||||
errC chan error,
|
||||
shutdownC, graceShutdownC chan struct{},
|
||||
gracePeriod time.Duration,
|
||||
) error {
|
||||
var err error
|
||||
if gracePeriod > 0 {
|
||||
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceShutdownC, gracePeriod)
|
||||
} else {
|
||||
err = waitForSignal(errC, shutdownC)
|
||||
close(graceShutdownC)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Quitting due to error")
|
||||
} else {
|
||||
logger.Info("Quitting...")
|
||||
}
|
||||
// Wait for clean exit, discarding all errors
|
||||
go func() {
|
||||
for range errC {
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
func notifySystemd(waitForSignal chan struct{}) {
|
||||
<-waitForSignal
|
||||
daemon.SdNotify(false, "READY=1")
|
||||
}
|
||||
|
||||
func writePidFile(waitForSignal chan struct{}, pidFile string) {
|
||||
<-waitForSignal
|
||||
file, err := os.Create(pidFile)
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("Unable to write pid to %s", pidFile)
|
||||
}
|
||||
defer file.Close()
|
||||
fmt.Fprintf(file, "%d", os.Getpid())
|
||||
}
|
||||
|
||||
func tunnelFlags(shouldHide bool) []cli.Flag {
|
||||
return []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Specifies a config file in YAML format.",
|
||||
Value: config.FindDefaultConfigPath(),
|
||||
Hidden: shouldHide,
|
||||
},
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "autoupdate-freq",
|
||||
Usage: "Autoupdate frequency. Default is 24h.",
|
||||
Value: time.Hour * 24,
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "no-autoupdate",
|
||||
Usage: "Disable periodic check for updates, restarting the server with the new version.",
|
||||
Value: false,
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "is-autoupdated",
|
||||
Usage: "Signal the new process that Argo Tunnel client has been autoupdated",
|
||||
Value: false,
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "edge",
|
||||
Usage: "Address of the Cloudflare tunnel server.",
|
||||
EnvVars: []string{"TUNNEL_EDGE"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "cacert",
|
||||
Usage: "Certificate Authority authenticating the Cloudflare tunnel connection.",
|
||||
EnvVars: []string{"TUNNEL_CACERT"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "no-tls-verify",
|
||||
Usage: "Disables TLS verification of the certificate presented by your origin. Will allow any certificate from the origin to be accepted. Note: The connection from your machine to Cloudflare's Edge is still encrypted.",
|
||||
EnvVars: []string{"NO_TLS_VERIFY"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "origincert",
|
||||
Usage: "Path to the certificate generated for your origin when you run cloudflared login.",
|
||||
EnvVars: []string{"TUNNEL_ORIGIN_CERT"},
|
||||
Value: findDefaultOriginCertPath(),
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "origin-ca-pool",
|
||||
Usage: "Path to the CA for the certificate of your origin. This option should be used only if your certificate is not signed by Cloudflare.",
|
||||
EnvVars: []string{"TUNNEL_ORIGIN_CA_POOL"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "url",
|
||||
Value: "http://localhost:8080",
|
||||
Usage: "Connect to the local webserver at `URL`.",
|
||||
EnvVars: []string{"TUNNEL_URL"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "hostname",
|
||||
Usage: "Set a hostname on a Cloudflare zone to route traffic through this tunnel.",
|
||||
EnvVars: []string{"TUNNEL_HOSTNAME"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "origin-server-name",
|
||||
Usage: "Hostname on the origin server certificate.",
|
||||
EnvVars: []string{"TUNNEL_ORIGIN_SERVER_NAME"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "id",
|
||||
Usage: "A unique identifier used to tie connections to this tunnel instance.",
|
||||
EnvVars: []string{"TUNNEL_ID"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "lb-pool",
|
||||
Usage: "The name of a (new/existing) load balancing pool to add this origin to.",
|
||||
EnvVars: []string{"TUNNEL_LB_POOL"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "api-key",
|
||||
Usage: "This parameter has been deprecated since version 2017.10.1.",
|
||||
EnvVars: []string{"TUNNEL_API_KEY"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "api-email",
|
||||
Usage: "This parameter has been deprecated since version 2017.10.1.",
|
||||
EnvVars: []string{"TUNNEL_API_EMAIL"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "api-ca-key",
|
||||
Usage: "This parameter has been deprecated since version 2017.10.1.",
|
||||
EnvVars: []string{"TUNNEL_API_CA_KEY"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "metrics",
|
||||
Value: "localhost:",
|
||||
Usage: "Listen address for metrics reporting.",
|
||||
EnvVars: []string{"TUNNEL_METRICS"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "metrics-update-freq",
|
||||
Usage: "Frequency to update tunnel metrics",
|
||||
Value: time.Second * 5,
|
||||
EnvVars: []string{"TUNNEL_METRICS_UPDATE_FREQ"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "tag",
|
||||
Usage: "Custom tags used to identify this tunnel, in format `KEY=VALUE`. Multiple tags may be specified",
|
||||
EnvVars: []string{"TUNNEL_TAG"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "heartbeat-interval",
|
||||
Usage: "Minimum idle time before sending a heartbeat.",
|
||||
Value: time.Second * 5,
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewUint64Flag(&cli.Uint64Flag{
|
||||
Name: "heartbeat-count",
|
||||
Usage: "Minimum number of unacked heartbeats to send before closing the connection.",
|
||||
Value: 5,
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "loglevel",
|
||||
Value: "info",
|
||||
Usage: "Application logging level {panic, fatal, error, warn, info, debug}",
|
||||
EnvVars: []string{"TUNNEL_LOGLEVEL"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "proto-loglevel",
|
||||
Value: "warn",
|
||||
Usage: "Protocol logging level {panic, fatal, error, warn, info, debug}",
|
||||
EnvVars: []string{"TUNNEL_PROTO_LOGLEVEL"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewUintFlag(&cli.UintFlag{
|
||||
Name: "retries",
|
||||
Value: 5,
|
||||
Usage: "Maximum number of retries for connection/protocol errors.",
|
||||
EnvVars: []string{"TUNNEL_RETRIES"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "hello-world",
|
||||
Value: false,
|
||||
Usage: "Run Hello World Server",
|
||||
EnvVars: []string{"TUNNEL_HELLO_WORLD"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "pidfile",
|
||||
Usage: "Write the application's PID to this file after first successful connection.",
|
||||
EnvVars: []string{"TUNNEL_PIDFILE"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "logfile",
|
||||
Usage: "Save application log to this file for reporting issues.",
|
||||
EnvVars: []string{"TUNNEL_LOGFILE"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "ha-connections",
|
||||
Value: 4,
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "proxy-connect-timeout",
|
||||
Usage: "HTTP proxy timeout for establishing a new connection",
|
||||
Value: time.Second * 30,
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "proxy-tls-timeout",
|
||||
Usage: "HTTP proxy timeout for completing a TLS handshake",
|
||||
Value: time.Second * 10,
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "proxy-tcp-keepalive",
|
||||
Usage: "HTTP proxy TCP keepalive duration",
|
||||
Value: time.Second * 30,
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "proxy-no-happy-eyeballs",
|
||||
Usage: "HTTP proxy should disable \"happy eyeballs\" for IPv4/v6 fallback",
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "proxy-keepalive-connections",
|
||||
Usage: "HTTP proxy maximum keepalive connection pool size",
|
||||
Value: 100,
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "proxy-keepalive-timeout",
|
||||
Usage: "HTTP proxy timeout for closing an idle connection",
|
||||
Value: time.Second * 90,
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "proxy-dns",
|
||||
Usage: "Run a DNS over HTTPS proxy server.",
|
||||
EnvVars: []string{"TUNNEL_DNS"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "proxy-dns-port",
|
||||
Value: 53,
|
||||
Usage: "Listen on given port for the DNS over HTTPS proxy server.",
|
||||
EnvVars: []string{"TUNNEL_DNS_PORT"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "proxy-dns-address",
|
||||
Usage: "Listen address for the DNS over HTTPS proxy server.",
|
||||
Value: "localhost",
|
||||
EnvVars: []string{"TUNNEL_DNS_ADDRESS"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "proxy-dns-upstream",
|
||||
Usage: "Upstream endpoint URL, you can specify multiple endpoints for redundancy.",
|
||||
Value: cli.NewStringSlice("https://1.1.1.1/dns-query", "https://1.0.0.1/dns-query"),
|
||||
EnvVars: []string{"TUNNEL_DNS_UPSTREAM"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||
Name: "grace-period",
|
||||
Usage: "Duration to accept new requests after cloudflared receives first SIGINT/SIGTERM. A second SIGINT/SIGTERM will force cloudflared to shutdown immediately.",
|
||||
Value: time.Second * 30,
|
||||
EnvVars: []string{"TUNNEL_GRACE_PERIOD"},
|
||||
Hidden: true,
|
||||
}),
|
||||
altsrc.NewUintFlag(&cli.UintFlag{
|
||||
Name: "compression-quality",
|
||||
Value: 0,
|
||||
Usage: "(beta) Use cross-stream compression instead HTTP compression. 0-off, 1-low, 2-medium, >=3-high.",
|
||||
EnvVars: []string{"TUNNEL_COMPRESSION_LEVEL"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "no-chunked-encoding",
|
||||
Usage: "Disables chunked transfer encoding; useful if you are running a WSGI server.",
|
||||
EnvVars: []string{"TUNNEL_NO_CHUNKED_ENCODING"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "trace-output",
|
||||
Usage: "Name of trace output file, generated when cloudflared stops.",
|
||||
EnvVars: []string{"TUNNEL_TRACE_OUTPUT"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package tunnel
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
@ -15,80 +15,39 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||
"github.com/cloudflare/cloudflared/origin"
|
||||
"github.com/cloudflare/cloudflared/tlsconfig"
|
||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||
"github.com/cloudflare/cloudflared/validation"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
"gopkg.in/urfave/cli.v2/altsrc"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultConfigFiles = []string{"config.yml", "config.yaml"}
|
||||
|
||||
// Launchd doesn't set root env variables, so there is default
|
||||
// Windows default config dir was ~/cloudflare-warp in documentation; let's keep it compatible
|
||||
defaultConfigDirs = []string{"~/.cloudflared", "~/.cloudflare-warp", "~/cloudflare-warp", "/usr/local/etc/cloudflared", "/etc/cloudflared"}
|
||||
developerPortal = "https://developers.cloudflare.com/argo-tunnel"
|
||||
quickStartUrl = developerPortal + "/quickstart/quickstart/"
|
||||
serviceUrl = developerPortal + "/reference/service/"
|
||||
argumentsUrl = developerPortal + "/reference/arguments/"
|
||||
)
|
||||
|
||||
const defaultCredentialFile = "cert.pem"
|
||||
|
||||
func fileExists(path string) (bool, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// ignore missing files
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
f.Close()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// returns the first path that contains a cert.pem file. If none of the defaultConfigDirs
|
||||
// (differs by OS for legacy reasons) contains a cert.pem file, return empty string
|
||||
// returns the first path that contains a cert.pem file. If none of the DefaultConfigDirs
|
||||
// contains a cert.pem file, return empty string
|
||||
func findDefaultOriginCertPath() string {
|
||||
for _, defaultConfigDir := range defaultConfigDirs {
|
||||
originCertPath, _ := homedir.Expand(filepath.Join(defaultConfigDir, defaultCredentialFile))
|
||||
if ok, _ := fileExists(originCertPath); ok {
|
||||
for _, defaultConfigDir := range config.DefaultConfigDirs {
|
||||
originCertPath, _ := homedir.Expand(filepath.Join(defaultConfigDir, config.DefaultCredentialFile))
|
||||
if ok, _ := config.FileExists(originCertPath); ok {
|
||||
return originCertPath
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// returns the first path that contains a config file. If none of the combination of
|
||||
// defaultConfigDirs (differs by OS for legacy reasons) and defaultConfigFiles
|
||||
// contains a config file, return empty string
|
||||
func findDefaultConfigPath() string {
|
||||
for _, configDir := range defaultConfigDirs {
|
||||
for _, configFile := range defaultConfigFiles {
|
||||
dirPath, err := homedir.Expand(configDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(dirPath, configFile)
|
||||
if ok, _ := fileExists(path); ok {
|
||||
return path
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func findInputSourceContext(context *cli.Context) (altsrc.InputSourceContext, error) {
|
||||
if context.String("config") != "" {
|
||||
return altsrc.NewYamlSourceFromFile(context.String("config"))
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func generateRandomClientID() string {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
id := make([]byte, 32)
|
||||
|
@ -96,24 +55,6 @@ func generateRandomClientID() string {
|
|||
return hex.EncodeToString(id)
|
||||
}
|
||||
|
||||
func enoughOptionsSet(c *cli.Context) bool {
|
||||
// For cloudflared to work, the user needs to at least provide a hostname,
|
||||
// or runs as stand alone DNS proxy .
|
||||
// When using sudo, use -E flag to preserve env vars
|
||||
if c.NumFlags() == 0 && c.NArg() == 0 && os.Getenv("TUNNEL_HOSTNAME") == "" && os.Getenv("TUNNEL_DNS") == "" {
|
||||
if isRunningFromTerminal() {
|
||||
logger.Errorf("No arguments were provided. You need to at least specify the hostname for this tunnel. See %s", quickStartUrl)
|
||||
logger.Infof("If you want to run Argo Tunnel client as a stand alone DNS proxy, run with --proxy-dns option or set TUNNEL_DNS environment variable.")
|
||||
} else {
|
||||
logger.Errorf("You need to specify all the options in a configuration file, or use environment variables. See %s and %s", serviceUrl, argumentsUrl)
|
||||
logger.Infof("If you want to run Argo Tunnel client as a stand alone DNS proxy, specify proxy-dns option in the configuration file, or set TUNNEL_DNS environment variable.")
|
||||
}
|
||||
cli.ShowAppHelp(c)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func handleDeprecatedOptions(c *cli.Context) error {
|
||||
// Fail if the user provided an old authentication method
|
||||
if c.IsSet("api-key") || c.IsSet("api-email") || c.IsSet("api-ca-key") {
|
||||
|
@ -123,12 +64,14 @@ func handleDeprecatedOptions(c *cli.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// validate url. It can be either from --url or argument
|
||||
func validateUrl(c *cli.Context) (string, error) {
|
||||
var url = c.String("url")
|
||||
if c.NArg() > 0 {
|
||||
if !c.IsSet("url") {
|
||||
return "", errors.New("Please specify an origin URL.")
|
||||
if c.IsSet("url") {
|
||||
return "", errors.New("Specified origin urls using both --url and argument. Decide which one you want, I can only support one.")
|
||||
}
|
||||
url = c.Args().Get(0)
|
||||
}
|
||||
validUrl, err := validation.ValidateUrl(url)
|
||||
return validUrl, err
|
||||
|
@ -165,7 +108,7 @@ func dnsProxyStandAlone(c *cli.Context) bool {
|
|||
|
||||
func getOriginCert(c *cli.Context) ([]byte, error) {
|
||||
if c.String("origincert") == "" {
|
||||
logger.Warnf("Cannot determine default origin certificate path. No file %s in %v", defaultCredentialFile, defaultConfigDirs)
|
||||
logger.Warnf("Cannot determine default origin certificate path. No file %s in %v", config.DefaultCredentialFile, config.DefaultConfigDirs)
|
||||
if isRunningFromTerminal() {
|
||||
logger.Errorf("You need to specify the origin certificate path with --origincert option, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", argumentsUrl)
|
||||
return nil, fmt.Errorf("Client didn't specify origincert path when running from terminal")
|
||||
|
@ -180,7 +123,7 @@ func getOriginCert(c *cli.Context) ([]byte, error) {
|
|||
logger.WithError(err).Errorf("Cannot resolve path %s", c.String("origincert"))
|
||||
return nil, fmt.Errorf("Cannot resolve path %s", c.String("origincert"))
|
||||
}
|
||||
ok, err := fileExists(originCertPath)
|
||||
ok, err := config.FileExists(originCertPath)
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot check if origin cert exists at path %s", c.String("origincert"))
|
||||
return nil, fmt.Errorf("Cannot check if origin cert exists at path %s", c.String("origincert"))
|
||||
|
@ -206,7 +149,7 @@ If you don't have a certificate signed by Cloudflare, run the command:
|
|||
return originCert, nil
|
||||
}
|
||||
|
||||
func prepareTunnelConfig(c *cli.Context, buildInfo *origin.BuildInfo, logger, protoLogger *logrus.Logger) (*origin.TunnelConfig, error) {
|
||||
func prepareTunnelConfig(c *cli.Context, buildInfo *origin.BuildInfo, version string, logger, protoLogger *logrus.Logger) (*origin.TunnelConfig, error) {
|
||||
hostname, err := validation.ValidateHostname(c.String("hostname"))
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Invalid hostname")
|
||||
|
@ -225,12 +168,12 @@ func prepareTunnelConfig(c *cli.Context, buildInfo *origin.BuildInfo, logger, pr
|
|||
|
||||
tags = append(tags, tunnelpogs.Tag{Name: "ID", Value: clientID})
|
||||
|
||||
url, err := validateUrl(c)
|
||||
originURL, err := validateUrl(c)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Error validating url")
|
||||
return nil, errors.Wrap(err, "Error validating url")
|
||||
logger.WithError(err).Error("Error validating origin URL")
|
||||
return nil, errors.Wrap(err, "Error validating origin URL")
|
||||
}
|
||||
logger.Infof("Proxying tunnel requests to %s", url)
|
||||
logger.Infof("Proxying tunnel requests to %s", originURL)
|
||||
|
||||
originCert, err := getOriginCert(c)
|
||||
if err != nil {
|
||||
|
@ -262,9 +205,15 @@ func prepareTunnelConfig(c *cli.Context, buildInfo *origin.BuildInfo, logger, pr
|
|||
httpTransport.TLSClientConfig.ServerName = c.String("origin-server-name")
|
||||
}
|
||||
|
||||
err = validation.ValidateHTTPService(originURL, httpTransport)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("unable to connect to the origin")
|
||||
return nil, errors.Wrap(err, "unable to connect to the origin")
|
||||
}
|
||||
|
||||
return &origin.TunnelConfig{
|
||||
EdgeAddrs: c.StringSlice("edge"),
|
||||
OriginUrl: url,
|
||||
OriginUrl: originURL,
|
||||
Hostname: hostname,
|
||||
OriginCert: originCert,
|
||||
TlsConfig: tlsconfig.CreateTunnelConfig(c, c.StringSlice("edge")),
|
||||
|
@ -274,7 +223,7 @@ func prepareTunnelConfig(c *cli.Context, buildInfo *origin.BuildInfo, logger, pr
|
|||
MaxHeartbeats: c.Uint64("heartbeat-count"),
|
||||
ClientID: clientID,
|
||||
BuildInfo: buildInfo,
|
||||
ReportedVersion: Version,
|
||||
ReportedVersion: version,
|
||||
LBPool: c.String("lb-pool"),
|
||||
Tags: tags,
|
||||
HAConnections: c.Int("ha-connections"),
|
||||
|
@ -316,3 +265,7 @@ func loadCertPool(c *cli.Context, logger *logrus.Logger) (*x509.CertPool, error)
|
|||
|
||||
return originCertPool, nil
|
||||
}
|
||||
|
||||
func isRunningFromTerminal() bool {
|
||||
return terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package tunnel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/cloudflare/cloudflared/hello"
|
||||
)
|
||||
|
||||
|
||||
func helloWorld(c *cli.Context) error {
|
||||
address := fmt.Sprintf(":%d", c.Int("port"))
|
||||
listener, err := hello.CreateTLSListener(address)
|
|
@ -1,11 +1,10 @@
|
|||
package main
|
||||
package tunnel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cloudflare/cloudflared/log"
|
||||
|
||||
"github.com/rifflock/lfshook"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/urfave/cli.v2"
|
|
@ -0,0 +1,69 @@
|
|||
package tunnel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/transfer"
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
cli "gopkg.in/urfave/cli.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
baseLoginURL = "https://dash.cloudflare.com/argotunnel"
|
||||
callbackStoreURL = "https://login.cloudflarewarp.com/"
|
||||
)
|
||||
|
||||
func login(c *cli.Context) error {
|
||||
path, ok, err := checkForExistingCert()
|
||||
if ok {
|
||||
fmt.Fprintf(os.Stdout, "You have an existing certificate at %s which login would overwrite.\nIf this is intentional, please move or delete that file then run this command again.\n", path)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
loginURL, err := url.Parse(baseLoginURL)
|
||||
if err != nil {
|
||||
// shouldn't happen, URL is hardcoded
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = transfer.Run(c, loginURL, "cert", "callback", callbackStoreURL, path, false)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write the certificate due to the following error:\n%v\n\nYour browser will download the certificate instead. You will have to manually\ncopy it to the following path:\n\n%s\n", err, path)
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stdout, "You have successfully logged in.\nIf you wish to copy your credentials to a server, they have been saved to:\n%s\n", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkForExistingCert() (string, bool, error) {
|
||||
configPath, err := homedir.Expand(config.DefaultConfigDirs[0])
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
ok, err := config.FileExists(configPath)
|
||||
if !ok && err == nil {
|
||||
// create config directory if doesn't already exist
|
||||
err = os.Mkdir(configPath, 0700)
|
||||
}
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
path := filepath.Join(configPath, config.DefaultCredentialFile)
|
||||
fileInfo, err := os.Stat(path)
|
||||
if err == nil && fileInfo.Size() > 0 {
|
||||
return path, true, nil
|
||||
}
|
||||
if err != nil && err.(*os.PathError).Err != syscall.ENOENT {
|
||||
return path, false, err
|
||||
}
|
||||
|
||||
return path, false, nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package tunnel
|
||||
|
||||
import (
|
||||
"github.com/cloudflare/cloudflared/tunneldns"
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package tunnel
|
||||
|
||||
import (
|
||||
"os"
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package tunnel
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package tunnel
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package tunnel
|
||||
|
||||
import (
|
||||
"testing"
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package updater
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
@ -8,6 +8,7 @@ import (
|
|||
"golang.org/x/crypto/ssh/terminal"
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
|
||||
"github.com/cloudflare/cloudflared/log"
|
||||
"github.com/equinox-io/equinox"
|
||||
"github.com/facebookgo/grace/gracenet"
|
||||
)
|
||||
|
@ -18,13 +19,16 @@ const (
|
|||
noUpdateOnWindowsMessage = "cloudflared will not automatically update on Windows systems."
|
||||
)
|
||||
|
||||
var publicKey = []byte(`
|
||||
var (
|
||||
publicKey = []byte(`
|
||||
-----BEGIN ECDSA PUBLIC KEY-----
|
||||
MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE4OWZocTVZ8Do/L6ScLdkV+9A0IYMHoOf
|
||||
dsCmJ/QZ6aw0w9qkkwEpne1Lmo6+0pGexZzFZOH6w5amShn+RXt7qkSid9iWlzGq
|
||||
EKx0BZogHSor9Wy5VztdFaAaVbsJiCbO
|
||||
-----END ECDSA PUBLIC KEY-----
|
||||
`)
|
||||
logger = log.CreateLogger()
|
||||
)
|
||||
|
||||
type ReleaseInfo struct {
|
||||
Updated bool
|
||||
|
@ -54,14 +58,14 @@ func checkForUpdates() ReleaseInfo {
|
|||
return ReleaseInfo{Updated: true, Version: resp.ReleaseVersion}
|
||||
}
|
||||
|
||||
func update(_ *cli.Context) error {
|
||||
func Update(_ *cli.Context) error {
|
||||
if updateApplied() {
|
||||
os.Exit(64)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoupdate(freq time.Duration, listeners *gracenet.Net, shutdownC chan struct{}) error {
|
||||
func Autoupdate(freq time.Duration, listeners *gracenet.Net, shutdownC chan struct{}) error {
|
||||
tickC := time.Tick(freq)
|
||||
for {
|
||||
if updateApplied() {
|
||||
|
@ -96,7 +100,7 @@ func updateApplied() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func isAutoupdateEnabled(c *cli.Context) bool {
|
||||
func IsAutoupdateEnabled(c *cli.Context) bool {
|
||||
if runtime.GOOS == "windows" {
|
||||
logger.Info(noUpdateOnWindowsMessage)
|
||||
return false
|
|
@ -22,6 +22,7 @@ import (
|
|||
const (
|
||||
windowsServiceName = "Cloudflared"
|
||||
windowsServiceDescription = "Argo Tunnel agent"
|
||||
windowsServiceUrl = "https://developers.cloudflare.com/argo-tunnel/reference/service/"
|
||||
|
||||
recoverActionDelay = time.Second * 20
|
||||
failureCountResetPeriod = time.Hour * 24
|
||||
|
@ -163,7 +164,7 @@ func installWindowsService(c *cli.Context) error {
|
|||
err = configRecoveryOption(s.Handle)
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("Cannot set service recovery actions")
|
||||
logger.Infof("See %s to manually configure service recovery actions", serviceUrl)
|
||||
logger.Infof("See %s to manually configure service recovery actions", windowsServiceUrl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bufio"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -103,7 +104,7 @@ func (e clientRegisterTunnelError) Error() string {
|
|||
return e.cause.Error()
|
||||
}
|
||||
|
||||
func (c *TunnelConfig) RegistrationOptions(connectionID uint8, OriginLocalIP string) *tunnelpogs.RegistrationOptions {
|
||||
func (c *TunnelConfig) RegistrationOptions(connectionID uint8, OriginLocalIP string, uuid uuid.UUID) *tunnelpogs.RegistrationOptions {
|
||||
policy := tunnelrpc.ExistingTunnelPolicy_balance
|
||||
if c.HAConnections <= 1 && c.LBPool == "" {
|
||||
policy = tunnelrpc.ExistingTunnelPolicy_disconnect
|
||||
|
@ -120,6 +121,7 @@ func (c *TunnelConfig) RegistrationOptions(connectionID uint8, OriginLocalIP str
|
|||
IsAutoupdated: c.IsAutoupdated,
|
||||
RunFromTerminal: c.RunFromTerminal,
|
||||
CompressionQuality: c.CompressionQuality,
|
||||
UUID: uuid.String(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -316,11 +318,15 @@ func RegisterTunnel(ctx context.Context, muxer *h2mux.Muxer, config *TunnelConfi
|
|||
serverInfoPromise := tsClient.GetServerInfo(ctx, func(tunnelrpc.TunnelServer_getServerInfo_Params) error {
|
||||
return nil
|
||||
})
|
||||
uuid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return clientRegisterTunnelError{cause: err}
|
||||
}
|
||||
registration, err := ts.RegisterTunnel(
|
||||
ctx,
|
||||
config.OriginCert,
|
||||
config.Hostname,
|
||||
config.RegistrationOptions(connectionID, originLocalIP),
|
||||
config.RegistrationOptions(connectionID, originLocalIP, uuid),
|
||||
)
|
||||
LogServerInfo(serverInfoPromise.Result(), connectionID, config.Metrics, config.Logger)
|
||||
if err != nil {
|
||||
|
@ -339,7 +345,17 @@ func RegisterTunnel(ctx context.Context, muxer *h2mux.Muxer, config *TunnelConfi
|
|||
}
|
||||
}
|
||||
|
||||
if registration.TunnelID != "" {
|
||||
config.Logger.Info("Tunnel ID: " + registration.TunnelID)
|
||||
}
|
||||
|
||||
// Print out the user's trial zone URL in a nice box (if they requested and got one)
|
||||
if isTrialTunnel := config.Hostname == "" && registration.Url != ""; isTrialTunnel {
|
||||
for _, line := range asciiBox(trialZoneMsg(registration.Url), 2) {
|
||||
config.Logger.Infoln(line)
|
||||
}
|
||||
}
|
||||
|
||||
config.Logger.Infof("Route propagating, it may take up to 1 minute for your new route to become functional")
|
||||
return nil
|
||||
}
|
||||
|
@ -454,7 +470,7 @@ func NewTunnelHandler(ctx context.Context,
|
|||
) (*TunnelHandler, string, error) {
|
||||
originURL, err := validation.ValidateUrl(config.OriginUrl)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("Unable to parse origin url %#v", originURL)
|
||||
return nil, "", fmt.Errorf("unable to parse origin URL %#v", originURL)
|
||||
}
|
||||
h := &TunnelHandler{
|
||||
originUrl: originURL,
|
||||
|
@ -630,3 +646,35 @@ func uint8ToString(input uint8) string {
|
|||
func isLBProbeRequest(req *http.Request) bool {
|
||||
return strings.HasPrefix(req.UserAgent(), lbProbeUserAgentPrefix)
|
||||
}
|
||||
|
||||
// Print out the given lines in a nice ASCII box.
|
||||
func asciiBox(lines []string, padding int) (box []string) {
|
||||
maxLen := maxLen(lines)
|
||||
spacer := strings.Repeat(" ", padding)
|
||||
|
||||
border := "+" + strings.Repeat("-", maxLen+(padding*2)) + "+"
|
||||
|
||||
box = append(box, border)
|
||||
for _, line := range lines {
|
||||
box = append(box, "|"+spacer+line+strings.Repeat(" ", maxLen-len(line))+spacer+"|")
|
||||
}
|
||||
box = append(box, border)
|
||||
return
|
||||
}
|
||||
|
||||
func maxLen(lines []string) int {
|
||||
max := 0
|
||||
for _, line := range lines {
|
||||
if len(line) > max {
|
||||
max = len(line)
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func trialZoneMsg(url string) []string {
|
||||
return []string{
|
||||
"Your free tunnel has started! Visit it:",
|
||||
" " + url,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
package pogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cloudflare/cloudflared/tunnelrpc"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"zombiezen.com/go/capnproto2"
|
||||
"zombiezen.com/go/capnproto2/pogs"
|
||||
"zombiezen.com/go/capnproto2/rpc"
|
||||
|
@ -56,6 +58,7 @@ type RegistrationOptions struct {
|
|||
IsAutoupdated bool `capnp:"isAutoupdated"`
|
||||
RunFromTerminal bool `capnp:"runFromTerminal"`
|
||||
CompressionQuality uint64 `capnp:"compressionQuality"`
|
||||
UUID string `capnp:"uuid"`
|
||||
}
|
||||
|
||||
func MarshalRegistrationOptions(s tunnelrpc.RegistrationOptions, p *RegistrationOptions) error {
|
||||
|
|
|
@ -43,6 +43,7 @@ struct RegistrationOptions {
|
|||
runFromTerminal @9 :Bool;
|
||||
# cross stream compression setting, 0 - off, 3 - high
|
||||
compressionQuality @10 :UInt64;
|
||||
uuid @11 :Text;
|
||||
}
|
||||
|
||||
struct Tag {
|
||||
|
|
|
@ -105,6 +105,11 @@ func (s Authentication_List) At(i int) Authentication { return Authentication{s.
|
|||
|
||||
func (s Authentication_List) Set(i int, v Authentication) error { return s.List.SetStruct(i, v.Struct) }
|
||||
|
||||
func (s Authentication_List) String() string {
|
||||
str, _ := text.MarshalList(0xc082ef6e0d42ed1d, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// Authentication_Promise is a wrapper for a Authentication promised by a client call.
|
||||
type Authentication_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -245,6 +250,11 @@ func (s TunnelRegistration_List) Set(i int, v TunnelRegistration) error {
|
|||
return s.List.SetStruct(i, v.Struct)
|
||||
}
|
||||
|
||||
func (s TunnelRegistration_List) String() string {
|
||||
str, _ := text.MarshalList(0xf41a0f001ad49e46, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// TunnelRegistration_Promise is a wrapper for a TunnelRegistration promised by a client call.
|
||||
type TunnelRegistration_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -259,12 +269,12 @@ type RegistrationOptions struct{ capnp.Struct }
|
|||
const RegistrationOptions_TypeID = 0xc793e50592935b4a
|
||||
|
||||
func NewRegistrationOptions(s *capnp.Segment) (RegistrationOptions, error) {
|
||||
st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 6})
|
||||
st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 7})
|
||||
return RegistrationOptions{st}, err
|
||||
}
|
||||
|
||||
func NewRootRegistrationOptions(s *capnp.Segment) (RegistrationOptions, error) {
|
||||
st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 6})
|
||||
st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 7})
|
||||
return RegistrationOptions{st}, err
|
||||
}
|
||||
|
||||
|
@ -438,12 +448,31 @@ func (s RegistrationOptions) SetCompressionQuality(v uint64) {
|
|||
s.Struct.SetUint64(8, v)
|
||||
}
|
||||
|
||||
func (s RegistrationOptions) Uuid() (string, error) {
|
||||
p, err := s.Struct.Ptr(6)
|
||||
return p.Text(), err
|
||||
}
|
||||
|
||||
func (s RegistrationOptions) HasUuid() bool {
|
||||
p, err := s.Struct.Ptr(6)
|
||||
return p.IsValid() || err != nil
|
||||
}
|
||||
|
||||
func (s RegistrationOptions) UuidBytes() ([]byte, error) {
|
||||
p, err := s.Struct.Ptr(6)
|
||||
return p.TextBytes(), err
|
||||
}
|
||||
|
||||
func (s RegistrationOptions) SetUuid(v string) error {
|
||||
return s.Struct.SetText(6, v)
|
||||
}
|
||||
|
||||
// RegistrationOptions_List is a list of RegistrationOptions.
|
||||
type RegistrationOptions_List struct{ capnp.List }
|
||||
|
||||
// NewRegistrationOptions creates a new list of RegistrationOptions.
|
||||
func NewRegistrationOptions_List(s *capnp.Segment, sz int32) (RegistrationOptions_List, error) {
|
||||
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 6}, sz)
|
||||
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 7}, sz)
|
||||
return RegistrationOptions_List{l}, err
|
||||
}
|
||||
|
||||
|
@ -455,6 +484,11 @@ func (s RegistrationOptions_List) Set(i int, v RegistrationOptions) error {
|
|||
return s.List.SetStruct(i, v.Struct)
|
||||
}
|
||||
|
||||
func (s RegistrationOptions_List) String() string {
|
||||
str, _ := text.MarshalList(0xc793e50592935b4a, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// RegistrationOptions_Promise is a wrapper for a RegistrationOptions promised by a client call.
|
||||
type RegistrationOptions_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -539,6 +573,11 @@ func (s Tag_List) At(i int) Tag { return Tag{s.List.Struct(i)} }
|
|||
|
||||
func (s Tag_List) Set(i int, v Tag) error { return s.List.SetStruct(i, v.Struct) }
|
||||
|
||||
func (s Tag_List) String() string {
|
||||
str, _ := text.MarshalList(0xcbd96442ae3bb01a, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// Tag_Promise is a wrapper for a Tag promised by a client call.
|
||||
type Tag_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -664,6 +703,11 @@ func (s ServerInfo_List) At(i int) ServerInfo { return ServerInfo{s.List.Struct(
|
|||
|
||||
func (s ServerInfo_List) Set(i int, v ServerInfo) error { return s.List.SetStruct(i, v.Struct) }
|
||||
|
||||
func (s ServerInfo_List) String() string {
|
||||
str, _ := text.MarshalList(0xf2c68e2547ec3866, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// ServerInfo_Promise is a wrapper for a ServerInfo promised by a client call.
|
||||
type ServerInfo_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -925,6 +969,11 @@ func (s TunnelServer_registerTunnel_Params_List) Set(i int, v TunnelServer_regis
|
|||
return s.List.SetStruct(i, v.Struct)
|
||||
}
|
||||
|
||||
func (s TunnelServer_registerTunnel_Params_List) String() string {
|
||||
str, _ := text.MarshalList(0xb70431c0dc014915, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// TunnelServer_registerTunnel_Params_Promise is a wrapper for a TunnelServer_registerTunnel_Params promised by a client call.
|
||||
type TunnelServer_registerTunnel_Params_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -1004,6 +1053,11 @@ func (s TunnelServer_registerTunnel_Results_List) Set(i int, v TunnelServer_regi
|
|||
return s.List.SetStruct(i, v.Struct)
|
||||
}
|
||||
|
||||
func (s TunnelServer_registerTunnel_Results_List) String() string {
|
||||
str, _ := text.MarshalList(0xf2c122394f447e8e, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// TunnelServer_registerTunnel_Results_Promise is a wrapper for a TunnelServer_registerTunnel_Results promised by a client call.
|
||||
type TunnelServer_registerTunnel_Results_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -1058,6 +1112,11 @@ func (s TunnelServer_getServerInfo_Params_List) Set(i int, v TunnelServer_getSer
|
|||
return s.List.SetStruct(i, v.Struct)
|
||||
}
|
||||
|
||||
func (s TunnelServer_getServerInfo_Params_List) String() string {
|
||||
str, _ := text.MarshalList(0xdc3ed6801961e502, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// TunnelServer_getServerInfo_Params_Promise is a wrapper for a TunnelServer_getServerInfo_Params promised by a client call.
|
||||
type TunnelServer_getServerInfo_Params_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -1133,6 +1192,11 @@ func (s TunnelServer_getServerInfo_Results_List) Set(i int, v TunnelServer_getSe
|
|||
return s.List.SetStruct(i, v.Struct)
|
||||
}
|
||||
|
||||
func (s TunnelServer_getServerInfo_Results_List) String() string {
|
||||
str, _ := text.MarshalList(0xe3e37d096a5b564e, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// TunnelServer_getServerInfo_Results_Promise is a wrapper for a TunnelServer_getServerInfo_Results promised by a client call.
|
||||
type TunnelServer_getServerInfo_Results_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -1195,6 +1259,11 @@ func (s TunnelServer_unregisterTunnel_Params_List) Set(i int, v TunnelServer_unr
|
|||
return s.List.SetStruct(i, v.Struct)
|
||||
}
|
||||
|
||||
func (s TunnelServer_unregisterTunnel_Params_List) String() string {
|
||||
str, _ := text.MarshalList(0x9b87b390babc2ccf, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// TunnelServer_unregisterTunnel_Params_Promise is a wrapper for a TunnelServer_unregisterTunnel_Params promised by a client call.
|
||||
type TunnelServer_unregisterTunnel_Params_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -1245,6 +1314,11 @@ func (s TunnelServer_unregisterTunnel_Results_List) Set(i int, v TunnelServer_un
|
|||
return s.List.SetStruct(i, v.Struct)
|
||||
}
|
||||
|
||||
func (s TunnelServer_unregisterTunnel_Results_List) String() string {
|
||||
str, _ := text.MarshalList(0xa29a916d4ebdd894, s.List)
|
||||
return str
|
||||
}
|
||||
|
||||
// TunnelServer_unregisterTunnel_Results_Promise is a wrapper for a TunnelServer_unregisterTunnel_Results promised by a client call.
|
||||
type TunnelServer_unregisterTunnel_Results_Promise struct{ *capnp.Pipeline }
|
||||
|
||||
|
@ -1253,92 +1327,94 @@ func (p TunnelServer_unregisterTunnel_Results_Promise) Struct() (TunnelServer_un
|
|||
return TunnelServer_unregisterTunnel_Results{s}, err
|
||||
}
|
||||
|
||||
const schema_db8274f9144abc7e = "x\xda\x9cV]\x8c\x14\xd5\x12\xae\xef\x9c\x99\xedY\xb2" +
|
||||
"\xcbl\xa7\x87\\\x98\\2\x09Y\xc2OX.\\\xe0" +
|
||||
"f\xd9{\xaf\xfb\xe3\x82\x99uY\xa6\x190\x08\x98\xd0" +
|
||||
"\xcc\x1c\x86^{\xba'\xdd=\x08D~$\x18\x95(" +
|
||||
"Q\x91\x071\x18$1Qc\xfc\x89&\x06\x83\x09\xbe" +
|
||||
"H\x0c1\xbc\xa8\x91Hb\x94\xec\x03DC\xdc\xe8\x83" +
|
||||
"&\xa6\xcd\xe9\xd9\x9ei\x16\x05\xf4\xed\xccw\xeaT}" +
|
||||
"U]\xf5\xd5,\xfb\x07\x1f`\xcb\x93\xdf$\x89\xf4\xe1" +
|
||||
"d[\xf0\xbf\xea\xa53\xff9q\xf1\x08\xa9Y\x16\x1c" +
|
||||
"87\x92\xf9\xc5?\xfc5\x11V|\xc1\xf6A\xbb\xc6" +
|
||||
"\x14\"m\x82\xad'\x04\x97\x96\x9c\xfb\xf0\xd9\xf7\x9ex" +
|
||||
"\x89\xf4\x05\x00QB!Z\xf1\x1b\xfb\x15\x04M\xe5\xfd" +
|
||||
"\x84\xe0\x85\xaf>\x1a\xab>w\xf2\x0c\xa9\x0b\xa2\xfb\xd5" +
|
||||
"\x9c1J\x04\xb3\xf2\xb8r~y\xe2\x83\xc6M\x92\xcb" +
|
||||
"\xab\x1e~]>\x1d\xe4o\x13\x82\xb9?\x0cu\xda7" +
|
||||
"\x0e\x9f'5\x8b\x16\x8b\x86\xe1\xb7|\x04\xda\xcf\xf2\xa8" +
|
||||
"\xfd\x18\x1a\x8fl=\xfe|r\xe2\xf8\x05\xd2\xb3\x88q" +
|
||||
"N\xb6I\xebg\x12.\xb4W\xc2\xe0\xa7\x12\x0e#\x04" +
|
||||
"\xd9w\xfe\xfb\xd6P\xf9\xf2\xc5i\xbe\xc3\xcc\xd6)\x93" +
|
||||
"\xda\x83\x8a<mR\x1e!\x04l\xc2\x98s\xe8\xcb{" +
|
||||
"\xae\xc4RxW\xf9\x0e\x94\x08\xc6\x1e\xd8:\xde\xbe\xff" +
|
||||
"\xea\xd5\xa9\x14 \xaf^U\xc2\x14\xce*2\xfbU\xdb" +
|
||||
"\x07\xc5\xb6\xde\xcd\xd7I\xcd\xf2\x9b\x0ayY\xe9\x83v" +
|
||||
"-\x0c2\xa1\\\xd0\xaa)\x85(8v`x\xfd\xea" +
|
||||
"y\x1fO\xc6\xddmJMJwfJ\xba\xdb\xd9\xfb" +
|
||||
"\xfd}\xf3\x8f}29\x8duh\xf8Tj1\xb4\x17" +
|
||||
"\xa5\x1f\xed\x844\xbe\xb1\xf6\xe5\xcf\xb3\xe9\xecOz\x16" +
|
||||
"q\xdb\x90\xfe\xd9\xd48\xb4\xcf\xa4\xed\x8aOS9P" +
|
||||
"O\xe0\xd7m[Xn-Q\xfaWt,--\x19" +
|
||||
"5\xbb\xd6\xb7f\x8f\xe9\xf9\xa6]\xd9\x18\xe2\xfd\x05\xc7" +
|
||||
"2K{\x0b\x80\xde\x01F\xa4\xce\xed#\x02\xd4Y[" +
|
||||
"\x88\xc0Tu\x88\xa8\xdf\xac\xd8\x8e+\x82\xb2\xe9\x95\x1c" +
|
||||
"\xdb\x16\xc4K\xfe\xc1\x1d\x86e\xd8%\xd1\x0c\xd4vk" +
|
||||
"\xa0F\x80\xa2pw\x0bwi\xddvE\xc5\xf4|\xe1" +
|
||||
"6\xe0\xee\xfe\x82\xe1\x1aUOO\xf0\x04Q\x02Dj" +
|
||||
"\xe7I\"\xbd\x8bC\xff'CPq\x8d\x92(\x08\x17" +
|
||||
"\xa6S\x1e3l\xa7\xc8E\x09IbH\x12\x9aAg" +
|
||||
"\xfe\xd5\xa0\x1b\x84W\xb7|\x8f\x9a\xafn\xff~\xda\xeb" +
|
||||
"\x82\x91\x0e)w4)\xaf\xd9\"'\x8cC/0\xa8" +
|
||||
"@F\xce\x8c\xban\x84H\x1f\xe5\xd073\xa8\x8ce" +
|
||||
"\xc2\xb2n\x1a\"\xd2\x0b\x1c\xfa6\x86\xc0q\xcd\x8ai" +
|
||||
"\xdf+\x88\xbb>:\x89\xa1\x93\x10\xecr<\xdf6\xaa" +
|
||||
"\x82\x88\xd0A\x0c\x1d\x84\x83N\xcd7\x1d\xdbCWk" +
|
||||
"\x1e\x08\xe8\x8a\x95\xe0\x0f>\xf0`\xdd\xdf%l\xdf," +
|
||||
"\x19\xf21Q\xf8m[\x94\xe7\x11\xe9\x03\x1c\xfah\x8c" +
|
||||
"r\xfe\xdf\xb1<\"\xca\xebv\xb4\xf2P\x1e\x16{#" +
|
||||
"V9Q5L+\xfa\x15%3H\xca\xfd-\x9b\xdb" +
|
||||
"\xf1\xdb\x10V\xd5\x0d\xd9\xad\xaf\xe5\xc2\x0c%\xc7%\x11" +
|
||||
"Gm>F\x88\x8a\xdd\xe0(.C\x8b\xa6\xd6\x83!" +
|
||||
"\xa2\xe2B\x89\xafD\x8b\xa9\xb6\x1cY\xa2\xe2\x12\x89\xf7" +
|
||||
"\x82\x01<\x03N\xa4\xad\xc2\x1bD\xc5^\x09\x0fK\xf3" +
|
||||
"\x04\xcf A\xa4\x0d\x86\xee\x07$>*\xf1d\"\x83" +
|
||||
"$\x91\x96\xc7b\xa2\xe2\xb0\xc4\xb7K\xbc\x8de\xd0F" +
|
||||
"\xa4=\x84q\xa2\xe26\x89\xef\x92\xb8\x92\xcc\xc8\x11\xd5" +
|
||||
"\x04\\\xa2bY\xe25\x89\xa7fg\x90\"\xd2\xaa!" +
|
||||
"nI|\x8f\xc4\xdb\xe7d\xd0N\xa4\xd5q\x98\xa8\xe8" +
|
||||
"K\xfc\x90\xc4g \x83\x19D\xda~\x9c$*\x1e\x92" +
|
||||
"\xf8\xd3`\x08J\x96)l?_\x8ew\xc2n\xe1z" +
|
||||
"\xa6cG\xbf\xb9\xe35K-\xa6\x06\x1a\x8d6-8" +
|
||||
"i9\xd1H\xb7d\x9f\x804!\xa89\x8e5vs" +
|
||||
"\x87\xa5}\xa3\xe2a&\xa1\xc0\x81\xae\x96\x8c\x12$\x18" +
|
||||
"\x84\xf3^\xf2MJ;v\xbe\x8c6bhk~\xf2" +
|
||||
"Q\x87r%\xc3\xca\xd7\x9aLLo\xb0\xee;\xf5\x1a" +
|
||||
"\xe5\xca\x86/\xca\x001\x80\x10\xb8u{\xad\xebT7" +
|
||||
"B\xb8U\xd36,j\xde\x94\x9cj\xcd\x15\x9e\x07\xd3" +
|
||||
"\xb1\xf5\xbaa\x99\xdc\xdf\x8bvbh\x8f\xf5\x10\x9b\xde" +
|
||||
"C\xb9Z\xdfF\xa3\"{&\xd5\xec\xebE\x8b\x89\xf4" +
|
||||
"n\x0e}Y\xac\xaf{d_/\xe4\xd0W2\xa4\xe5" +
|
||||
"p5{x\xb7a\xd5\xc5-\xddz'\x15\xab\x08\xbf" +
|
||||
"q\xca\xdb;\x9d\xee\x82\xe1*F\xd5\xfb\x9b\xaf7\x08" +
|
||||
"/-\xc5(\xae\x80}Dz\x8aC\xcf0\xf4\xbb\xa1" +
|
||||
"V\xa1\xab\xb5%\xa6\x8d>\xff\xb3p\xfd\x8d(\x8d\xb9" +
|
||||
"O\x125W3\xa2\x8d\xa4\xea\xfb\x88\xa9y\x05\xadm" +
|
||||
"\x88h\xf9\xa9\xffw\x89\xa9\xab\x14\xb0\xe6\xbf\x01D[" +
|
||||
"_]t\x94\x98:_\x09\"e\xa4\xfeF\xc8\x01\x04" +
|
||||
"Qv\x94\x0b\xf3\x1b@\x10\xc9/\"\x05%\x1a@\x01" +
|
||||
"w_\xee[\xd4;\xe7\xddM\xc5\xa2Uy\xe7z5" +
|
||||
"\xe2\xa4%_Y\xad\x98\xdfq\"\xbd\x83C\x9f\xcd\x10" +
|
||||
"X\xce\x94\x94\xa6\xc7b-t;\x89k\x10\x8e\x84." +
|
||||
"-\x1fK\xff\x99\xa6\xff\xfdR\x85\xf7p\xe8Gb\xdd" +
|
||||
"\xfa\x98\x04\x1f\xe5\xd0\x9f\x8c\xa9\xf0\xe3r\x9b\x1c\xe1\xd0" +
|
||||
"O7\x85M=u\x94H?\xcd\xa1\xbf\xd9R5\xf5" +
|
||||
"ui\xf8\x1a\x87\xfe>\x83\"\\7\xe2\xa9\xd4\xdd\x96" +
|
||||
"X[Ne\xd4\xb4\x85'%`j\xea\xe5\x95\x9c\xf5" +
|
||||
"\x9ap\xab\x86-l\xf8k\x0d\xd3\xaa\xbb\x82Zc\xda" +
|
||||
"\xc8/?\x1cS\x8e\xdf\x03\x00\x00\xff\xff#\xa3\xb8\x8f"
|
||||
const schema_db8274f9144abc7e = "x\xda\x9cV_\x8c\x13U\x17?\xbf{\xdb\x9d\x16v" +
|
||||
"\xe9N\xa6$\xd0|\xa4\xf9\x08||\x10A\x101\xb0" +
|
||||
"\xfe\xd9?.h\xd7e\xe9\xd0\xd5\x10\xc0\x84\xa1\xbd\x94" +
|
||||
"Y\xdb\x99ff\x8a,\xe1\x7f\xd6\x88D\x09\x08<\x88" +
|
||||
"`\x90\x84\x08\x86\xa8\x89&&\x06\x13|P\x1ex " +
|
||||
"&b4\x92\x18%<H$\xc4\x8d<hb\xc6\xdc" +
|
||||
"\xe9N[v\x15\xd0\xb7\xdb\xdf=\xf7\x9c\xdf9s\xce" +
|
||||
"\xeft\xe1\xffx\x17[\x14\xfd>J\xa4\xf7F[\xfc" +
|
||||
"\xc7\xca\x97O=r\xf4\xd2\x08\xa9)\xe6\xef<\xdf\x97" +
|
||||
"\xfc\xcd\xdb\xfb\x1d\x11\x16_a\xdb\xa0\xfd\xc4\x14\"\xed" +
|
||||
":[E\xf0/?p\xfe\x93\x83\x1f\xbe\xfc&\xe9s" +
|
||||
"\x00\xa2\x88B\xb4\xf8\x0f\xf6;\x08\x9a\xca;\x09\xfe\x91" +
|
||||
"o>\x1d(\x1f:v\x8a\xd49\xe1\xfd2\xce\x18E" +
|
||||
"\xfc\xa9\x19\\\xbd\xb0(\xf2q\xed&\xca\xe5\xd5|~" +
|
||||
"C>\xed\xe6\xef\x13\xfc\x197{\xda\xac[{/\x90" +
|
||||
"\x9aB\x83E\xcd\xf0\x07\xde\x07\xed\xb6<j\xbf\x04\xc6" +
|
||||
"}\xeb\x0e\xbf\x1e\xbd~\xf8\"\xe9)4q\x8e*\xd2" +
|
||||
"\xfa\xb5\x88\x03\xed\xed \xf8\x89\xc8;\x8c\xe0\xa7>x" +
|
||||
"\xf4\xbd\x9e\xc2\xb7\x97\xc6\xf9\x0e2{E\x19\xd5\x8e\xca" +
|
||||
"w\xda!\xe5E\x82\xcf\xae\x1b\xd3w\x7f\xfd\xc4\xd5\xa6" +
|
||||
"\x14n*?\x82\"\xfe\xc0s\xeb\x86\xe2;\xae]\x1b" +
|
||||
"K\x01\x013%H\xe1\xb6\"\xb3_\xb2\xa1[\xac_" +
|
||||
"\xba\xe6\x06\xa9)~G!\xa7\xc6:\xa0\xcd\x8e\xc9 " +
|
||||
"\xff\x8d]\xd4\xce\xca\x93\x7f`g\xef\xaae3?\x1b" +
|
||||
"mvw(6*\xdd\x9d\x8eIw\x9b\x96\xfe\xfc\xd4" +
|
||||
"\xec\x03_\x8c\x8ec\x1d\x18~\x1e\x9b\x07\xedJ\xe0\xf1" +
|
||||
"Ki|k\xc5[_\xa5\x12\xa9_\xf5\x14\x9am\x03" +
|
||||
"\xfa\xb7cC\xd0\xe2qy\x8c\xc6\xd3\xa0\xf9\xbeW\xb5" +
|
||||
",Qr*\x91\xfc\x83\xe11\xbf oT\xacJ\xc7" +
|
||||
"\xf2\xad\xa6\xeb\x99Vq0\xc0;\xb3v\xc9\xcc\x0fg" +
|
||||
"\x01\xbd\x15\x8cH\x9d\xd1A\x04\xa8S\xd7\x12\x81\xa9j" +
|
||||
"\x0fQ\xa7Y\xb4lG\xf8\x05\xd3\xcd\xdb\x96%\x88\xe7" +
|
||||
"\xbd]\x1b\x8d\x92a\xe5E=P\xcb\xc4@\xb5\x009" +
|
||||
"\xe1l\x11\xce\x82\xaa\xe5\x88\xa2\xe9z\xc2\xa9\xc1\xb3:" +
|
||||
"\xb3\x86c\x94]=\xc2#D\x11\x10\xa9m\xc7\x88\xf4" +
|
||||
"v\x0e\xfd?\x0c~\xd11\xf2\"+\x1c\x98va\xc0" +
|
||||
"\xb0\xec\x1c\x17yD\x89!J\xa8\x07\x9d\xf2O\x83\xae" +
|
||||
"\x16n\xb5\xe4\xb9T\x7fu\xf7\xf7\xe3^g\x8dD@" +
|
||||
"\xb9\xb5Ny\xf9Z9a\x1cz\x96A\x05\x92rf" +
|
||||
"\xd4\x95}Dz?\x87\xbe\x86Ae,\x19\x94\xf5\xd9" +
|
||||
"\x1e\"=\xcb\xa1\xafg\xf0m\xc7,\x9a\xd6\x93\x82\xb8" +
|
||||
"\xe3\xa1\x8d\x18\xda\x08\xfef\xdb\xf5,\xa3,\x88\x08\xad" +
|
||||
"\xc4\xd0J\xd8eW<\xd3\xb6\\\xb47\xe6\x81\x80\xf6" +
|
||||
"\xa6\x12\xfc\xc5\x07\xee\xaez\x9b\x85\xe5\x99yC>&" +
|
||||
"\x0a\xbem\x83\xf2L\"\xbd\x8bC\xefo\xa2\x9cy\xa8" +
|
||||
")\x8f\x90\xf2\xca\x8d\x8d<\x94\x17\xc4p\xc8*-\xca" +
|
||||
"\x86Y\x0a\x7f\x85\xc9t\x93\xf2L\xc3\xe6n\xfcV\x07" +
|
||||
"Uu\x02v\xab*\xe9 C\xc9qa\xc8Q\xebF" +
|
||||
"\x1fQ\xae\x0b\x1c\xb9~4hj\x19\xf4\x10\xe5z%" +
|
||||
"\x9eE\x83\xa9\xb6\x12)\xa2\xdc\xd3\x12\x1f\x04\x03x\x12" +
|
||||
"\x9cH\xd3\xf1.QnP\xc2\x1b\xa4y\x84'\x11!" +
|
||||
"\xd2\x9e\x0f\xdc\xaf\x97\xf8f\x89G#ID\x894\x81" +
|
||||
"yD\xb9\x0d\x12\xdf.\xf1\x16\x96D\x0b\x916\x8c!" +
|
||||
"\xa2\xdcV\x89\x8fH\\\x89&\xe5\x88j{\xe0\x10\xe5" +
|
||||
"vK\xfcU\x89\xc7\xa6%\x11\x93\x82\x13\xe0\xfb$~" +
|
||||
"D\xe2\xf1\xe9I\xc4\xa5\xfc`/Q\xee\xa0\xc4\x8fK" +
|
||||
"|\x12\x92\x98D\xa4\xbd\x81cD\xb9\xe3\x12?#\xf1" +
|
||||
"\xc9-IL&\xd2N\x07|NJ\xfc\x1c\x18\xfc|" +
|
||||
"\xc9\x14\x96\x97)4w\xc8\x16\xe1\xb8\xa6m\x85\xbf\xb9" +
|
||||
"\xed\xd6?\x81\x18\x1bt\xd4\xda7k'\xe4\xa4#\xd1" +
|
||||
"X\x07\x04$\x08~\xc5\xb6K\x03wv^\xc23\x8a" +
|
||||
".\xa6\x10\xb2\x1cho\xc8+A\x82~\xa0\x03y\xcf" +
|
||||
"\xa4\x84me\x0ah!\x86\x96z+\xf4\xdb\x94\xce\x1b" +
|
||||
"\xa5L\xa5\xce\xc4t\xbb\xab\x9e]\xadP\xba`x\xa2" +
|
||||
"\x00\x10\x03\x08\xbeS\xb5V8vy\x10\xc2)\x9b\x96" +
|
||||
"Q\xa2\xfaM\xde.W\x1c\xe1\xba0mK\xaf\x1a%" +
|
||||
"\x93{\xc3\x88\x13C\x9c\x90\xa8V\xcd\xc2\x84Fc\xe3" +
|
||||
"\x1b-]\xe9\x184\x8a\xb2\xb1b\xf5\xe6\x9f;\x8fH" +
|
||||
"\x9f\xc5\xa1/lj\xfe\xf9\xb2\xf9\xff\xcf\xa1?\xcc\x90" +
|
||||
"\x90\x13Xo\xf4-F\xa9*&D\xba\x97\xd4\x15\x85" +
|
||||
"W;e\xacM\xf6\xac\xac\xe1(F\xd9\xfd\x97\xafW" +
|
||||
"\x0b7!\x15\xabY&;\x88\xf4\x18\x87\x9ed\xe8t" +
|
||||
"\x02AC{c\x95\x8c\xd3\x07\xfew\xe1:kQj" +
|
||||
"\xe2\x10%\xaa\xefo\x84kK\xd5\xb7\x11S3\x0a\x1a" +
|
||||
"+\x13\xe1\x86T\x1fw\x88\xa9K\x14\xb0\xfa_\x06\x84" +
|
||||
"\x7f\x0d\xd4\xb9\xfb\x89\xa9\xb3\x15?\x94O\xea\xac\x85\xec" +
|
||||
"\x82\x1ffG\xe9 \xbf.\xf8\xa1F#\x94Y\xa2." +
|
||||
"dq\xff\xe5\x9e \xf1i\xf7~*\x16\xee\xd3{\xd7" +
|
||||
"\xab\x16'!\xf9\xcaj5\xf9\x1d\"\xd2[9\xf4i" +
|
||||
"\x0c~\xc9\x1e\xd3\xdb\xc4@S\x0b\xddM\x07k\x84C" +
|
||||
"5L\xc8\xc7\xd2\x7f\xb2\xee\x7f\x87\x94\xea\xad\x1c\xfaH" +
|
||||
"S\xb7\xee\x91\xe0v\x0e}_\x93T\xbf$W\xce\x08" +
|
||||
"\x87~\xb2\xae~\xea\x89\xfdD\xfaI\x0e\xfd\\C\xfa" +
|
||||
"\xd4\xb3\xd2\xf0\x0c\x87\xfe\x11\x83\"\x1c'\xe4\xa9T\x9d" +
|
||||
"\x86\xa2\x97\xecb\xbfi\x09W\xea\xc1\x98\x04\xc8+9" +
|
||||
"\xf8\x15\xe1\x94\x0dKX\xf0V\x18f\xa9\xea\x08j\xcc" +
|
||||
"l-\xbfLo\x93\x8c\xfc\x19\x00\x00\xff\xff@V\xc0" +
|
||||
"\xb7"
|
||||
|
||||
func init() {
|
||||
schemas.Register(schema_db8274f9144abc7e,
|
||||
|
|
|
@ -6,7 +6,9 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/idna"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const defaultScheme = "http"
|
||||
|
@ -15,7 +17,7 @@ var supportedProtocol = [2]string{"http", "https"}
|
|||
|
||||
func ValidateHostname(hostname string) (string, error) {
|
||||
if hostname == "" {
|
||||
return "", fmt.Errorf("Hostname should not be empty")
|
||||
return "", nil
|
||||
}
|
||||
// users gives url(contains schema) not just hostname
|
||||
if strings.Contains(hostname, ":") || strings.Contains(hostname, "%3A") {
|
||||
|
@ -48,7 +50,7 @@ func ValidateHostname(hostname string) (string, error) {
|
|||
|
||||
func ValidateUrl(originUrl string) (string, error) {
|
||||
if originUrl == "" {
|
||||
return "", fmt.Errorf("Url should not be empty")
|
||||
return "", fmt.Errorf("URL should not be empty")
|
||||
}
|
||||
|
||||
if net.ParseIP(originUrl) != nil {
|
||||
|
@ -134,3 +136,44 @@ func validateIP(scheme, host, port string) (string, error) {
|
|||
}
|
||||
return fmt.Sprintf("%s://%s", scheme, host), nil
|
||||
}
|
||||
|
||||
func ValidateHTTPService(originURL string, transport http.RoundTripper) error {
|
||||
parsedURL, err := url.Parse(originURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: transport}
|
||||
|
||||
initialResponse, initialErr := client.Get(parsedURL.String())
|
||||
if initialErr != nil || initialResponse.StatusCode != http.StatusOK {
|
||||
// Attempt the same endpoint via the other protocol (http/https); maybe we have better luck?
|
||||
oldScheme := parsedURL.Scheme
|
||||
parsedURL.Scheme = toggleProtocol(parsedURL.Scheme)
|
||||
|
||||
secondResponse, _ := client.Get(parsedURL.String())
|
||||
|
||||
if secondResponse != nil && secondResponse.StatusCode == http.StatusOK { // Worked this time--advise the user to switch protocols
|
||||
return errors.Errorf(
|
||||
"%s doesn't seem to work over %s, but does seem to work over %s. Consider changing the origin URL to %s",
|
||||
parsedURL.Hostname(),
|
||||
oldScheme,
|
||||
parsedURL.Scheme,
|
||||
parsedURL,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return initialErr
|
||||
}
|
||||
|
||||
func toggleProtocol(httpProtocol string) string {
|
||||
switch httpProtocol {
|
||||
case "http":
|
||||
return "https"
|
||||
case "https":
|
||||
return "http"
|
||||
default:
|
||||
return httpProtocol
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,13 +4,21 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TestValidateHostname(t *testing.T) {
|
||||
var inputHostname string
|
||||
hostname, err := ValidateHostname(inputHostname)
|
||||
assert.Equal(t, err, fmt.Errorf("Hostname should not be empty"))
|
||||
assert.Equal(t, err, nil)
|
||||
assert.Empty(t, hostname)
|
||||
|
||||
inputHostname = "hello.example.com"
|
||||
|
@ -42,7 +50,7 @@ func TestValidateHostname(t *testing.T) {
|
|||
|
||||
func TestValidateUrl(t *testing.T) {
|
||||
validUrl, err := ValidateUrl("")
|
||||
assert.Equal(t, fmt.Errorf("Url should not be empty"), err)
|
||||
assert.Equal(t, fmt.Errorf("URL should not be empty"), err)
|
||||
assert.Empty(t, validUrl)
|
||||
|
||||
validUrl, err = ValidateUrl("https://localhost:8080")
|
||||
|
@ -134,3 +142,102 @@ func TestValidateUrl(t *testing.T) {
|
|||
assert.Equal(t, "https://hello.example.com:8080", validUrl)
|
||||
|
||||
}
|
||||
|
||||
func TestToggleProtocol(t *testing.T) {
|
||||
assert.Equal(t, "https", toggleProtocol("http"))
|
||||
assert.Equal(t, "http", toggleProtocol("https"))
|
||||
assert.Equal(t, "random", toggleProtocol("random"))
|
||||
assert.Equal(t, "", toggleProtocol(""))
|
||||
}
|
||||
|
||||
func TestValidateHTTPService_HTTP2HTTP(t *testing.T) {
|
||||
server, client, err := createMockServerAndClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
}))
|
||||
assert.NoError(t, err)
|
||||
defer server.Close()
|
||||
|
||||
assert.Equal(t, nil, ValidateHTTPService("http://example.com/", client.Transport))
|
||||
}
|
||||
|
||||
func TestValidateHTTPService_ServerNonOKResponse(t *testing.T) {
|
||||
server, client, err := createMockServerAndClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(400)
|
||||
}))
|
||||
assert.NoError(t, err)
|
||||
defer server.Close()
|
||||
|
||||
assert.Equal(t, nil, ValidateHTTPService("http://example.com/", client.Transport))
|
||||
}
|
||||
|
||||
func TestValidateHTTPService_HTTPS2HTTP(t *testing.T) {
|
||||
server, client, err := createMockServerAndClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
}))
|
||||
assert.NoError(t, err)
|
||||
defer server.Close()
|
||||
|
||||
assert.Equal(t,
|
||||
"example.com doesn't seem to work over https, but does seem to work over http. Consider changing the origin URL to http://example.com:1234/",
|
||||
ValidateHTTPService("https://example.com:1234/", client.Transport).Error())
|
||||
}
|
||||
|
||||
func TestValidateHTTPService_HTTPS2HTTPS(t *testing.T) {
|
||||
server, client, err := createSecureMockServerAndClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
}))
|
||||
assert.NoError(t, err)
|
||||
defer server.Close()
|
||||
|
||||
assert.Equal(t, nil, ValidateHTTPService("https://example.com/", client.Transport))
|
||||
}
|
||||
|
||||
func TestValidateHTTPService_HTTP2HTTPS(t *testing.T) {
|
||||
server, client, err := createSecureMockServerAndClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
}))
|
||||
assert.NoError(t, err)
|
||||
defer server.Close()
|
||||
|
||||
assert.Equal(t,
|
||||
"example.com doesn't seem to work over http, but does seem to work over https. Consider changing the origin URL to https://example.com:1234/",
|
||||
ValidateHTTPService("http://example.com:1234/", client.Transport).Error())
|
||||
}
|
||||
|
||||
func createMockServerAndClient(handler http.Handler) (*httptest.Server, *http.Client, error) {
|
||||
client := http.DefaultClient
|
||||
server := httptest.NewServer(handler)
|
||||
|
||||
client.Transport = &http.Transport{
|
||||
Proxy: func(req *http.Request) (*url.URL, error) {
|
||||
return url.Parse(server.URL)
|
||||
},
|
||||
}
|
||||
|
||||
return server, client, nil
|
||||
}
|
||||
|
||||
func createSecureMockServerAndClient(handler http.Handler) (*httptest.Server, *http.Client, error) {
|
||||
client := http.DefaultClient
|
||||
server := httptest.NewTLSServer(handler)
|
||||
|
||||
cert, err := x509.ParseCertificate(server.TLS.Certificates[0].Certificate[0])
|
||||
if err != nil {
|
||||
server.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
certpool := x509.NewCertPool()
|
||||
certpool.AddCert(cert)
|
||||
|
||||
client.Transport = &http.Transport{
|
||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return net.Dial("tcp", server.URL[strings.LastIndex(server.URL, "/")+1:])
|
||||
},
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: certpool,
|
||||
},
|
||||
}
|
||||
|
||||
return server, client, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
CoreOS Project
|
||||
Copyright 2014 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
|
@ -0,0 +1,7 @@
|
|||
package http
|
||||
|
||||
import "net/http"
|
||||
|
||||
type Client interface {
|
||||
Do(*http.Request) (*http.Response, error)
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
// Package http is DEPRECATED. Use net/http instead.
|
||||
package http
|
|
@ -0,0 +1,161 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func WriteError(w http.ResponseWriter, code int, msg string) {
|
||||
e := struct {
|
||||
Error string `json:"error"`
|
||||
}{
|
||||
Error: msg,
|
||||
}
|
||||
b, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
log.Printf("go-oidc: failed to marshal %#v: %v", e, err)
|
||||
code = http.StatusInternalServerError
|
||||
b = []byte(`{"error":"server_error"}`)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(code)
|
||||
w.Write(b)
|
||||
}
|
||||
|
||||
// BasicAuth parses a username and password from the request's
|
||||
// Authorization header. This was pulled from golang master:
|
||||
// https://codereview.appspot.com/76540043
|
||||
func BasicAuth(r *http.Request) (username, password string, ok bool) {
|
||||
auth := r.Header.Get("Authorization")
|
||||
if auth == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(auth, "Basic ") {
|
||||
return
|
||||
}
|
||||
c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cs := string(c)
|
||||
s := strings.IndexByte(cs, ':')
|
||||
if s < 0 {
|
||||
return
|
||||
}
|
||||
return cs[:s], cs[s+1:], true
|
||||
}
|
||||
|
||||
func cacheControlMaxAge(hdr string) (time.Duration, bool, error) {
|
||||
for _, field := range strings.Split(hdr, ",") {
|
||||
parts := strings.SplitN(strings.TrimSpace(field), "=", 2)
|
||||
k := strings.ToLower(strings.TrimSpace(parts[0]))
|
||||
if k != "max-age" {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(parts) == 1 {
|
||||
return 0, false, errors.New("max-age has no value")
|
||||
}
|
||||
|
||||
v := strings.TrimSpace(parts[1])
|
||||
if v == "" {
|
||||
return 0, false, errors.New("max-age has empty value")
|
||||
}
|
||||
|
||||
age, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
if age <= 0 {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
return time.Duration(age) * time.Second, true, nil
|
||||
}
|
||||
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
func expires(date, expires string) (time.Duration, bool, error) {
|
||||
if date == "" || expires == "" {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
var te time.Time
|
||||
var err error
|
||||
if expires == "0" {
|
||||
return 0, false, nil
|
||||
}
|
||||
te, err = time.Parse(time.RFC1123, expires)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
td, err := time.Parse(time.RFC1123, date)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
ttl := te.Sub(td)
|
||||
|
||||
// headers indicate data already expired, caller should not
|
||||
// have to care about this case
|
||||
if ttl <= 0 {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
return ttl, true, nil
|
||||
}
|
||||
|
||||
func Cacheable(hdr http.Header) (time.Duration, bool, error) {
|
||||
ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control"))
|
||||
if err != nil || ok {
|
||||
return ttl, ok, err
|
||||
}
|
||||
|
||||
return expires(hdr.Get("Date"), hdr.Get("Expires"))
|
||||
}
|
||||
|
||||
// MergeQuery appends additional query values to an existing URL.
|
||||
func MergeQuery(u url.URL, q url.Values) url.URL {
|
||||
uv := u.Query()
|
||||
for k, vs := range q {
|
||||
for _, v := range vs {
|
||||
uv.Add(k, v)
|
||||
}
|
||||
}
|
||||
u.RawQuery = uv.Encode()
|
||||
return u
|
||||
}
|
||||
|
||||
// NewResourceLocation appends a resource id to the end of the requested URL path.
|
||||
func NewResourceLocation(reqURL *url.URL, id string) string {
|
||||
var u url.URL
|
||||
u = *reqURL
|
||||
u.Path = path.Join(u.Path, id)
|
||||
u.RawQuery = ""
|
||||
u.Fragment = ""
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// CopyRequest returns a clone of the provided *http.Request.
|
||||
// The returned object is a shallow copy of the struct and a
|
||||
// deep copy of its Header field.
|
||||
func CopyRequest(r *http.Request) *http.Request {
|
||||
r2 := *r
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return &r2
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty
|
||||
// since `url.Parse("")` does not return an error. Must contian a scheme and a host.
|
||||
func ParseNonEmptyURL(u string) (*url.URL, error) {
|
||||
if u == "" {
|
||||
return nil, errors.New("url is empty")
|
||||
}
|
||||
|
||||
ur, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ur.Scheme == "" {
|
||||
return nil, errors.New("url scheme is empty")
|
||||
}
|
||||
|
||||
if ur.Host == "" {
|
||||
return nil, errors.New("url host is empty")
|
||||
}
|
||||
|
||||
return ur, nil
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Claims map[string]interface{}
|
||||
|
||||
func (c Claims) Add(name string, value interface{}) {
|
||||
c[name] = value
|
||||
}
|
||||
|
||||
func (c Claims) StringClaim(name string) (string, bool, error) {
|
||||
cl, ok := c[name]
|
||||
if !ok {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
v, ok := cl.(string)
|
||||
if !ok {
|
||||
return "", false, fmt.Errorf("unable to parse claim as string: %v", name)
|
||||
}
|
||||
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
func (c Claims) StringsClaim(name string) ([]string, bool, error) {
|
||||
cl, ok := c[name]
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
if v, ok := cl.([]string); ok {
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
// When unmarshaled, []string will become []interface{}.
|
||||
if v, ok := cl.([]interface{}); ok {
|
||||
var ret []string
|
||||
for _, vv := range v {
|
||||
str, ok := vv.(string)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
|
||||
}
|
||||
ret = append(ret, str)
|
||||
}
|
||||
return ret, true, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
|
||||
}
|
||||
|
||||
func (c Claims) Int64Claim(name string) (int64, bool, error) {
|
||||
cl, ok := c[name]
|
||||
if !ok {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
v, ok := cl.(int64)
|
||||
if !ok {
|
||||
vf, ok := cl.(float64)
|
||||
if !ok {
|
||||
return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name)
|
||||
}
|
||||
v = int64(vf)
|
||||
}
|
||||
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
func (c Claims) Float64Claim(name string) (float64, bool, error) {
|
||||
cl, ok := c[name]
|
||||
if !ok {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
v, ok := cl.(float64)
|
||||
if !ok {
|
||||
vi, ok := cl.(int64)
|
||||
if !ok {
|
||||
return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name)
|
||||
}
|
||||
v = float64(vi)
|
||||
}
|
||||
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
func (c Claims) TimeClaim(name string) (time.Time, bool, error) {
|
||||
v, ok, err := c.Float64Claim(name)
|
||||
if !ok || err != nil {
|
||||
return time.Time{}, ok, err
|
||||
}
|
||||
|
||||
s := math.Trunc(v)
|
||||
ns := (v - s) * math.Pow(10, 9)
|
||||
return time.Unix(int64(s), int64(ns)).UTC(), true, nil
|
||||
}
|
||||
|
||||
func decodeClaims(payload []byte) (Claims, error) {
|
||||
var c Claims
|
||||
if err := json.Unmarshal(payload, &c); err != nil {
|
||||
return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func marshalClaims(c Claims) ([]byte, error) {
|
||||
b, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func encodeClaims(c Claims) (string, error) {
|
||||
b, err := marshalClaims(c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return encodeSegment(b), nil
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
// Package jose is DEPRECATED. Use gopkg.in/square/go-jose.v2 instead.
|
||||
package jose
|
|
@ -0,0 +1,112 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
HeaderMediaType = "typ"
|
||||
HeaderKeyAlgorithm = "alg"
|
||||
HeaderKeyID = "kid"
|
||||
)
|
||||
|
||||
const (
|
||||
// Encryption Algorithm Header Parameter Values for JWS
|
||||
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6
|
||||
AlgHS256 = "HS256"
|
||||
AlgHS384 = "HS384"
|
||||
AlgHS512 = "HS512"
|
||||
AlgRS256 = "RS256"
|
||||
AlgRS384 = "RS384"
|
||||
AlgRS512 = "RS512"
|
||||
AlgES256 = "ES256"
|
||||
AlgES384 = "ES384"
|
||||
AlgES512 = "ES512"
|
||||
AlgPS256 = "PS256"
|
||||
AlgPS384 = "PS384"
|
||||
AlgPS512 = "PS512"
|
||||
AlgNone = "none"
|
||||
)
|
||||
|
||||
const (
|
||||
// Algorithm Header Parameter Values for JWE
|
||||
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1
|
||||
AlgRSA15 = "RSA1_5"
|
||||
AlgRSAOAEP = "RSA-OAEP"
|
||||
AlgRSAOAEP256 = "RSA-OAEP-256"
|
||||
AlgA128KW = "A128KW"
|
||||
AlgA192KW = "A192KW"
|
||||
AlgA256KW = "A256KW"
|
||||
AlgDir = "dir"
|
||||
AlgECDHES = "ECDH-ES"
|
||||
AlgECDHESA128KW = "ECDH-ES+A128KW"
|
||||
AlgECDHESA192KW = "ECDH-ES+A192KW"
|
||||
AlgECDHESA256KW = "ECDH-ES+A256KW"
|
||||
AlgA128GCMKW = "A128GCMKW"
|
||||
AlgA192GCMKW = "A192GCMKW"
|
||||
AlgA256GCMKW = "A256GCMKW"
|
||||
AlgPBES2HS256A128KW = "PBES2-HS256+A128KW"
|
||||
AlgPBES2HS384A192KW = "PBES2-HS384+A192KW"
|
||||
AlgPBES2HS512A256KW = "PBES2-HS512+A256KW"
|
||||
)
|
||||
|
||||
const (
|
||||
// Encryption Algorithm Header Parameter Values for JWE
|
||||
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22
|
||||
EncA128CBCHS256 = "A128CBC-HS256"
|
||||
EncA128CBCHS384 = "A128CBC-HS384"
|
||||
EncA256CBCHS512 = "A256CBC-HS512"
|
||||
EncA128GCM = "A128GCM"
|
||||
EncA192GCM = "A192GCM"
|
||||
EncA256GCM = "A256GCM"
|
||||
)
|
||||
|
||||
type JOSEHeader map[string]string
|
||||
|
||||
func (j JOSEHeader) Validate() error {
|
||||
if _, exists := j[HeaderKeyAlgorithm]; !exists {
|
||||
return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeHeader(seg string) (JOSEHeader, error) {
|
||||
b, err := decodeSegment(seg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var h JOSEHeader
|
||||
err = json.Unmarshal(b, &h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func encodeHeader(h JOSEHeader) (string, error) {
|
||||
b, err := json.Marshal(h)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return encodeSegment(b), nil
|
||||
}
|
||||
|
||||
// Decode JWT specific base64url encoding with padding stripped
|
||||
func decodeSegment(seg string) ([]byte, error) {
|
||||
if l := len(seg) % 4; l != 0 {
|
||||
seg += strings.Repeat("=", 4-l)
|
||||
}
|
||||
return base64.URLEncoding.DecodeString(seg)
|
||||
}
|
||||
|
||||
// Encode JWT specific base64url encoding with padding stripped
|
||||
func encodeSegment(seg []byte) string {
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JSON Web Key
|
||||
// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5
|
||||
type JWK struct {
|
||||
ID string
|
||||
Type string
|
||||
Alg string
|
||||
Use string
|
||||
Exponent int
|
||||
Modulus *big.Int
|
||||
Secret []byte
|
||||
}
|
||||
|
||||
type jwkJSON struct {
|
||||
ID string `json:"kid"`
|
||||
Type string `json:"kty"`
|
||||
Alg string `json:"alg"`
|
||||
Use string `json:"use"`
|
||||
Exponent string `json:"e"`
|
||||
Modulus string `json:"n"`
|
||||
}
|
||||
|
||||
func (j *JWK) MarshalJSON() ([]byte, error) {
|
||||
t := jwkJSON{
|
||||
ID: j.ID,
|
||||
Type: j.Type,
|
||||
Alg: j.Alg,
|
||||
Use: j.Use,
|
||||
Exponent: encodeExponent(j.Exponent),
|
||||
Modulus: encodeModulus(j.Modulus),
|
||||
}
|
||||
|
||||
return json.Marshal(&t)
|
||||
}
|
||||
|
||||
func (j *JWK) UnmarshalJSON(data []byte) error {
|
||||
var t jwkJSON
|
||||
err := json.Unmarshal(data, &t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e, err := decodeExponent(t.Exponent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := decodeModulus(t.Modulus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
j.ID = t.ID
|
||||
j.Type = t.Type
|
||||
j.Alg = t.Alg
|
||||
j.Use = t.Use
|
||||
j.Exponent = e
|
||||
j.Modulus = n
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type JWKSet struct {
|
||||
Keys []JWK `json:"keys"`
|
||||
}
|
||||
|
||||
func decodeExponent(e string) (int, error) {
|
||||
decE, err := decodeBase64URLPaddingOptional(e)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var eBytes []byte
|
||||
if len(decE) < 8 {
|
||||
eBytes = make([]byte, 8-len(decE), 8)
|
||||
eBytes = append(eBytes, decE...)
|
||||
} else {
|
||||
eBytes = decE
|
||||
}
|
||||
eReader := bytes.NewReader(eBytes)
|
||||
var E uint64
|
||||
err = binary.Read(eReader, binary.BigEndian, &E)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(E), nil
|
||||
}
|
||||
|
||||
func encodeExponent(e int) string {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(e))
|
||||
var idx int
|
||||
for ; idx < 8; idx++ {
|
||||
if b[idx] != 0x0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return base64.RawURLEncoding.EncodeToString(b[idx:])
|
||||
}
|
||||
|
||||
// Turns a URL encoded modulus of a key into a big int.
|
||||
func decodeModulus(n string) (*big.Int, error) {
|
||||
decN, err := decodeBase64URLPaddingOptional(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
N := big.NewInt(0)
|
||||
N.SetBytes(decN)
|
||||
return N, nil
|
||||
}
|
||||
|
||||
func encodeModulus(n *big.Int) string {
|
||||
return base64.RawURLEncoding.EncodeToString(n.Bytes())
|
||||
}
|
||||
|
||||
// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not.
|
||||
// The stdlib version currently doesn't handle this.
|
||||
// We can get rid of this is if this bug:
|
||||
// https://github.com/golang/go/issues/4237
|
||||
// ever closes.
|
||||
func decodeBase64URLPaddingOptional(e string) ([]byte, error) {
|
||||
if m := len(e) % 4; m != 0 {
|
||||
e += strings.Repeat("=", 4-m)
|
||||
}
|
||||
return base64.URLEncoding.DecodeString(e)
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type JWS struct {
|
||||
RawHeader string
|
||||
Header JOSEHeader
|
||||
RawPayload string
|
||||
Payload []byte
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
// Given a raw encoded JWS token parses it and verifies the structure.
|
||||
func ParseJWS(raw string) (JWS, error) {
|
||||
parts := strings.Split(raw, ".")
|
||||
if len(parts) != 3 {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts))
|
||||
}
|
||||
|
||||
rawSig := parts[2]
|
||||
jws := JWS{
|
||||
RawHeader: parts[0],
|
||||
RawPayload: parts[1],
|
||||
}
|
||||
|
||||
header, err := decodeHeader(jws.RawHeader)
|
||||
if err != nil {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err)
|
||||
}
|
||||
if err = header.Validate(); err != nil {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, %s", err)
|
||||
}
|
||||
jws.Header = header
|
||||
|
||||
payload, err := decodeSegment(jws.RawPayload)
|
||||
if err != nil {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err)
|
||||
}
|
||||
jws.Payload = payload
|
||||
|
||||
sig, err := decodeSegment(rawSig)
|
||||
if err != nil {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err)
|
||||
}
|
||||
jws.Signature = sig
|
||||
|
||||
return jws, nil
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
package jose
|
||||
|
||||
import "strings"
|
||||
|
||||
type JWT JWS
|
||||
|
||||
func ParseJWT(token string) (jwt JWT, err error) {
|
||||
jws, err := ParseJWS(token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return JWT(jws), nil
|
||||
}
|
||||
|
||||
func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) {
|
||||
jwt = JWT{}
|
||||
|
||||
jwt.Header = header
|
||||
jwt.Header[HeaderMediaType] = "JWT"
|
||||
|
||||
claimBytes, err := marshalClaims(claims)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
jwt.Payload = claimBytes
|
||||
|
||||
eh, err := encodeHeader(header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
jwt.RawHeader = eh
|
||||
|
||||
ec, err := encodeClaims(claims)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
jwt.RawPayload = ec
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (j *JWT) KeyID() (string, bool) {
|
||||
kID, ok := j.Header[HeaderKeyID]
|
||||
return kID, ok
|
||||
}
|
||||
|
||||
func (j *JWT) Claims() (Claims, error) {
|
||||
return decodeClaims(j.Payload)
|
||||
}
|
||||
|
||||
// Encoded data part of the token which may be signed.
|
||||
func (j *JWT) Data() string {
|
||||
return strings.Join([]string{j.RawHeader, j.RawPayload}, ".")
|
||||
}
|
||||
|
||||
// Full encoded JWT token string in format: header.claims.signature
|
||||
func (j *JWT) Encode() string {
|
||||
d := j.Data()
|
||||
s := encodeSegment(j.Signature)
|
||||
return strings.Join([]string{d, s}, ".")
|
||||
}
|
||||
|
||||
func NewSignedJWT(claims Claims, s Signer) (*JWT, error) {
|
||||
header := JOSEHeader{
|
||||
HeaderKeyAlgorithm: s.Alg(),
|
||||
HeaderKeyID: s.ID(),
|
||||
}
|
||||
|
||||
jwt, err := NewJWT(header, claims)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sig, err := s.Sign([]byte(jwt.Data()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jwt.Signature = sig
|
||||
|
||||
return &jwt, nil
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Verifier interface {
|
||||
ID() string
|
||||
Alg() string
|
||||
Verify(sig []byte, data []byte) error
|
||||
}
|
||||
|
||||
type Signer interface {
|
||||
Verifier
|
||||
Sign(data []byte) (sig []byte, err error)
|
||||
}
|
||||
|
||||
func NewVerifier(jwk JWK) (Verifier, error) {
|
||||
if jwk.Type != "RSA" {
|
||||
return nil, fmt.Errorf("unsupported key type %q", jwk.Type)
|
||||
}
|
||||
|
||||
return NewVerifierRSA(jwk)
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type VerifierRSA struct {
|
||||
KeyID string
|
||||
Hash crypto.Hash
|
||||
PublicKey rsa.PublicKey
|
||||
}
|
||||
|
||||
type SignerRSA struct {
|
||||
PrivateKey rsa.PrivateKey
|
||||
VerifierRSA
|
||||
}
|
||||
|
||||
func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) {
|
||||
if jwk.Alg != "" && jwk.Alg != "RS256" {
|
||||
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
|
||||
}
|
||||
|
||||
v := VerifierRSA{
|
||||
KeyID: jwk.ID,
|
||||
PublicKey: rsa.PublicKey{
|
||||
N: jwk.Modulus,
|
||||
E: jwk.Exponent,
|
||||
},
|
||||
Hash: crypto.SHA256,
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA {
|
||||
return &SignerRSA{
|
||||
PrivateKey: key,
|
||||
VerifierRSA: VerifierRSA{
|
||||
KeyID: kid,
|
||||
PublicKey: key.PublicKey,
|
||||
Hash: crypto.SHA256,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (v *VerifierRSA) ID() string {
|
||||
return v.KeyID
|
||||
}
|
||||
|
||||
func (v *VerifierRSA) Alg() string {
|
||||
return "RS256"
|
||||
}
|
||||
|
||||
func (v *VerifierRSA) Verify(sig []byte, data []byte) error {
|
||||
h := v.Hash.New()
|
||||
h.Write(data)
|
||||
return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig)
|
||||
}
|
||||
|
||||
func (s *SignerRSA) Sign(data []byte) ([]byte, error) {
|
||||
h := s.Hash.New()
|
||||
h.Write(data)
|
||||
return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil))
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
// Package key is DEPRECATED. Use github.com/coreos/go-oidc instead.
|
||||
package key
|
|
@ -0,0 +1,153 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
)
|
||||
|
||||
func NewPublicKey(jwk jose.JWK) *PublicKey {
|
||||
return &PublicKey{jwk: jwk}
|
||||
}
|
||||
|
||||
type PublicKey struct {
|
||||
jwk jose.JWK
|
||||
}
|
||||
|
||||
func (k *PublicKey) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(&k.jwk)
|
||||
}
|
||||
|
||||
func (k *PublicKey) UnmarshalJSON(data []byte) error {
|
||||
var jwk jose.JWK
|
||||
if err := json.Unmarshal(data, &jwk); err != nil {
|
||||
return err
|
||||
}
|
||||
k.jwk = jwk
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *PublicKey) ID() string {
|
||||
return k.jwk.ID
|
||||
}
|
||||
|
||||
func (k *PublicKey) Verifier() (jose.Verifier, error) {
|
||||
return jose.NewVerifierRSA(k.jwk)
|
||||
}
|
||||
|
||||
type PrivateKey struct {
|
||||
KeyID string
|
||||
PrivateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
func (k *PrivateKey) ID() string {
|
||||
return k.KeyID
|
||||
}
|
||||
|
||||
func (k *PrivateKey) Signer() jose.Signer {
|
||||
return jose.NewSignerRSA(k.ID(), *k.PrivateKey)
|
||||
}
|
||||
|
||||
func (k *PrivateKey) JWK() jose.JWK {
|
||||
return jose.JWK{
|
||||
ID: k.KeyID,
|
||||
Type: "RSA",
|
||||
Alg: "RS256",
|
||||
Use: "sig",
|
||||
Exponent: k.PrivateKey.PublicKey.E,
|
||||
Modulus: k.PrivateKey.PublicKey.N,
|
||||
}
|
||||
}
|
||||
|
||||
type KeySet interface {
|
||||
ExpiresAt() time.Time
|
||||
}
|
||||
|
||||
type PublicKeySet struct {
|
||||
keys []PublicKey
|
||||
index map[string]*PublicKey
|
||||
expiresAt time.Time
|
||||
}
|
||||
|
||||
func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet {
|
||||
keys := make([]PublicKey, len(jwks))
|
||||
index := make(map[string]*PublicKey)
|
||||
for i, jwk := range jwks {
|
||||
keys[i] = *NewPublicKey(jwk)
|
||||
index[keys[i].ID()] = &keys[i]
|
||||
}
|
||||
return &PublicKeySet{
|
||||
keys: keys,
|
||||
index: index,
|
||||
expiresAt: exp,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PublicKeySet) ExpiresAt() time.Time {
|
||||
return s.expiresAt
|
||||
}
|
||||
|
||||
func (s *PublicKeySet) Keys() []PublicKey {
|
||||
return s.keys
|
||||
}
|
||||
|
||||
func (s *PublicKeySet) Key(id string) *PublicKey {
|
||||
return s.index[id]
|
||||
}
|
||||
|
||||
type PrivateKeySet struct {
|
||||
keys []*PrivateKey
|
||||
ActiveKeyID string
|
||||
expiresAt time.Time
|
||||
}
|
||||
|
||||
func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet {
|
||||
return &PrivateKeySet{
|
||||
keys: keys,
|
||||
ActiveKeyID: keys[0].ID(),
|
||||
expiresAt: exp.UTC(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PrivateKeySet) Keys() []*PrivateKey {
|
||||
return s.keys
|
||||
}
|
||||
|
||||
func (s *PrivateKeySet) ExpiresAt() time.Time {
|
||||
return s.expiresAt
|
||||
}
|
||||
|
||||
func (s *PrivateKeySet) Active() *PrivateKey {
|
||||
for i, k := range s.keys {
|
||||
if k.ID() == s.ActiveKeyID {
|
||||
return s.keys[i]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type GeneratePrivateKeyFunc func() (*PrivateKey, error)
|
||||
|
||||
func GeneratePrivateKey() (*PrivateKey, error) {
|
||||
pk, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyID := make([]byte, 20)
|
||||
if _, err := io.ReadFull(rand.Reader, keyID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k := PrivateKey{
|
||||
KeyID: hex.EncodeToString(keyID),
|
||||
PrivateKey: pk,
|
||||
}
|
||||
|
||||
return &k, nil
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/pkg/health"
|
||||
)
|
||||
|
||||
type PrivateKeyManager interface {
|
||||
ExpiresAt() time.Time
|
||||
Signer() (jose.Signer, error)
|
||||
JWKs() ([]jose.JWK, error)
|
||||
PublicKeys() ([]PublicKey, error)
|
||||
|
||||
WritableKeySetRepo
|
||||
health.Checkable
|
||||
}
|
||||
|
||||
func NewPrivateKeyManager() PrivateKeyManager {
|
||||
return &privateKeyManager{
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
type privateKeyManager struct {
|
||||
keySet *PrivateKeySet
|
||||
clock clockwork.Clock
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) ExpiresAt() time.Time {
|
||||
if m.keySet == nil {
|
||||
return m.clock.Now().UTC()
|
||||
}
|
||||
|
||||
return m.keySet.ExpiresAt()
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) Signer() (jose.Signer, error) {
|
||||
if err := m.Healthy(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.keySet.Active().Signer(), nil
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) JWKs() ([]jose.JWK, error) {
|
||||
if err := m.Healthy(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keys := m.keySet.Keys()
|
||||
jwks := make([]jose.JWK, len(keys))
|
||||
for i, k := range keys {
|
||||
jwks[i] = k.JWK()
|
||||
}
|
||||
return jwks, nil
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) {
|
||||
jwks, err := m.JWKs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys := make([]PublicKey, len(jwks))
|
||||
for i, jwk := range jwks {
|
||||
keys[i] = *NewPublicKey(jwk)
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) Healthy() error {
|
||||
if m.keySet == nil {
|
||||
return errors.New("private key manager uninitialized")
|
||||
}
|
||||
|
||||
if len(m.keySet.Keys()) == 0 {
|
||||
return errors.New("private key manager zero keys")
|
||||
}
|
||||
|
||||
if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) {
|
||||
return errors.New("private key manager keys expired")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) Set(keySet KeySet) error {
|
||||
privKeySet, ok := keySet.(*PrivateKeySet)
|
||||
if !ok {
|
||||
return errors.New("unable to cast to PrivateKeySet")
|
||||
}
|
||||
|
||||
m.keySet = privKeySet
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var ErrorNoKeys = errors.New("no keys found")
|
||||
|
||||
type WritableKeySetRepo interface {
|
||||
Set(KeySet) error
|
||||
}
|
||||
|
||||
type ReadableKeySetRepo interface {
|
||||
Get() (KeySet, error)
|
||||
}
|
||||
|
||||
type PrivateKeySetRepo interface {
|
||||
WritableKeySetRepo
|
||||
ReadableKeySetRepo
|
||||
}
|
||||
|
||||
func NewPrivateKeySetRepo() PrivateKeySetRepo {
|
||||
return &memPrivateKeySetRepo{}
|
||||
}
|
||||
|
||||
type memPrivateKeySetRepo struct {
|
||||
mu sync.RWMutex
|
||||
pks PrivateKeySet
|
||||
}
|
||||
|
||||
func (r *memPrivateKeySetRepo) Set(ks KeySet) error {
|
||||
pks, ok := ks.(*PrivateKeySet)
|
||||
if !ok {
|
||||
return errors.New("unable to cast to PrivateKeySet")
|
||||
} else if pks == nil {
|
||||
return errors.New("nil KeySet")
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.pks = *pks
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *memPrivateKeySetRepo) Get() (KeySet, error) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
if r.pks.keys == nil {
|
||||
return nil, ErrorNoKeys
|
||||
}
|
||||
return KeySet(&r.pks), nil
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
ptime "github.com/coreos/pkg/timeutil"
|
||||
"github.com/jonboulle/clockwork"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorPrivateKeysExpired = errors.New("private keys have expired")
|
||||
)
|
||||
|
||||
func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator {
|
||||
return &PrivateKeyRotator{
|
||||
repo: repo,
|
||||
ttl: ttl,
|
||||
|
||||
keep: 2,
|
||||
generateKey: GeneratePrivateKey,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
type PrivateKeyRotator struct {
|
||||
repo PrivateKeySetRepo
|
||||
generateKey GeneratePrivateKeyFunc
|
||||
clock clockwork.Clock
|
||||
keep int
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) expiresAt() time.Time {
|
||||
return r.clock.Now().UTC().Add(r.ttl)
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) Healthy() error {
|
||||
pks, err := r.privateKeySet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.clock.Now().After(pks.ExpiresAt()) {
|
||||
return ErrorPrivateKeysExpired
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) {
|
||||
ks, err := r.repo.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pks, ok := ks.(*PrivateKeySet)
|
||||
if !ok {
|
||||
return nil, errors.New("unable to cast to PrivateKeySet")
|
||||
}
|
||||
return pks, nil
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) {
|
||||
pks, err := r.privateKeySet()
|
||||
if err == ErrorNoKeys {
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
now := r.clock.Now()
|
||||
|
||||
// Ideally, we want to rotate after half the TTL has elapsed.
|
||||
idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2)
|
||||
|
||||
// If we are past the ideal rotation time, rotate immediatly.
|
||||
return max(0, idealRotationTime.Sub(now)), nil
|
||||
}
|
||||
|
||||
func max(a, b time.Duration) time.Duration {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) Run() chan struct{} {
|
||||
attempt := func() {
|
||||
k, err := r.generateKey()
|
||||
if err != nil {
|
||||
log.Printf("go-oidc: failed generating signing key: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
exp := r.expiresAt()
|
||||
if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil {
|
||||
log.Printf("go-oidc: key rotation failed: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
var nextRotation time.Duration
|
||||
var sleep time.Duration
|
||||
var err error
|
||||
for {
|
||||
if nextRotation, err = r.nextRotation(); err == nil {
|
||||
break
|
||||
}
|
||||
sleep = ptime.ExpBackoff(sleep, time.Minute)
|
||||
log.Printf("go-oidc: error getting nextRotation, retrying in %v: %v", sleep, err)
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-r.clock.After(nextRotation):
|
||||
attempt()
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error {
|
||||
ks, err := repo.Get()
|
||||
if err != nil && err != ErrorNoKeys {
|
||||
return err
|
||||
}
|
||||
|
||||
var keys []*PrivateKey
|
||||
if ks != nil {
|
||||
pks, ok := ks.(*PrivateKeySet)
|
||||
if !ok {
|
||||
return errors.New("unable to cast to PrivateKeySet")
|
||||
}
|
||||
keys = pks.Keys()
|
||||
}
|
||||
|
||||
keys = append([]*PrivateKey{k}, keys...)
|
||||
if l := len(keys); l > keep {
|
||||
keys = keys[0:keep]
|
||||
}
|
||||
|
||||
nks := PrivateKeySet{
|
||||
keys: keys,
|
||||
ActiveKeyID: k.ID(),
|
||||
expiresAt: exp,
|
||||
}
|
||||
|
||||
return repo.Set(KeySet(&nks))
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
|
||||
"github.com/coreos/pkg/timeutil"
|
||||
)
|
||||
|
||||
func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer {
|
||||
return &KeySetSyncer{
|
||||
readable: r,
|
||||
writable: w,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
type KeySetSyncer struct {
|
||||
readable ReadableKeySetRepo
|
||||
writable WritableKeySetRepo
|
||||
clock clockwork.Clock
|
||||
}
|
||||
|
||||
func (s *KeySetSyncer) Run() chan struct{} {
|
||||
stop := make(chan struct{})
|
||||
go func() {
|
||||
var failing bool
|
||||
var next time.Duration
|
||||
for {
|
||||
exp, err := syncKeySet(s.readable, s.writable, s.clock)
|
||||
if err != nil || exp == 0 {
|
||||
if !failing {
|
||||
failing = true
|
||||
next = time.Second
|
||||
} else {
|
||||
next = timeutil.ExpBackoff(next, time.Minute)
|
||||
}
|
||||
if exp == 0 {
|
||||
log.Printf("Synced to already expired key set, retrying in %v: %v", next, err)
|
||||
|
||||
} else {
|
||||
log.Printf("Failed syncing key set, retrying in %v: %v", next, err)
|
||||
}
|
||||
} else {
|
||||
failing = false
|
||||
next = exp / 2
|
||||
}
|
||||
|
||||
select {
|
||||
case <-s.clock.After(next):
|
||||
continue
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) {
|
||||
return syncKeySet(r, w, clockwork.NewRealClock())
|
||||
}
|
||||
|
||||
// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire.
|
||||
// If keyset has already expired, returns a zero duration.
|
||||
func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) {
|
||||
var ks KeySet
|
||||
ks, err = r.Get()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ks == nil {
|
||||
err = errors.New("no source KeySet")
|
||||
return
|
||||
}
|
||||
|
||||
if err = w.Set(ks); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := clock.Now()
|
||||
if ks.ExpiresAt().After(now) {
|
||||
exp = ks.ExpiresAt().Sub(now)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
// Package oauth2 is DEPRECATED. Use golang.org/x/oauth instead.
|
||||
package oauth2
|
|
@ -0,0 +1,29 @@
|
|||
package oauth2
|
||||
|
||||
const (
|
||||
ErrorAccessDenied = "access_denied"
|
||||
ErrorInvalidClient = "invalid_client"
|
||||
ErrorInvalidGrant = "invalid_grant"
|
||||
ErrorInvalidRequest = "invalid_request"
|
||||
ErrorServerError = "server_error"
|
||||
ErrorUnauthorizedClient = "unauthorized_client"
|
||||
ErrorUnsupportedGrantType = "unsupported_grant_type"
|
||||
ErrorUnsupportedResponseType = "unsupported_response_type"
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
Type string `json:"error"`
|
||||
Description string `json:"error_description,omitempty"`
|
||||
State string `json:"state,omitempty"`
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
if e.Description != "" {
|
||||
return e.Type + ": " + e.Description
|
||||
}
|
||||
return e.Type
|
||||
}
|
||||
|
||||
func NewError(typ string) *Error {
|
||||
return &Error{Type: typ}
|
||||
}
|
|
@ -0,0 +1,416 @@
|
|||
package oauth2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
)
|
||||
|
||||
// ResponseTypesEqual compares two response_type values. If either
|
||||
// contains a space, it is treated as an unordered list. For example,
|
||||
// comparing "code id_token" and "id_token code" would evaluate to true.
|
||||
func ResponseTypesEqual(r1, r2 string) bool {
|
||||
if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") {
|
||||
// fast route, no split needed
|
||||
return r1 == r2
|
||||
}
|
||||
|
||||
// split, sort, and compare
|
||||
r1Fields := strings.Fields(r1)
|
||||
r2Fields := strings.Fields(r2)
|
||||
if len(r1Fields) != len(r2Fields) {
|
||||
return false
|
||||
}
|
||||
sort.Strings(r1Fields)
|
||||
sort.Strings(r2Fields)
|
||||
for i, r1Field := range r1Fields {
|
||||
if r1Field != r2Fields[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
// OAuth2.0 response types registered by OIDC.
|
||||
//
|
||||
// See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents
|
||||
ResponseTypeCode = "code"
|
||||
ResponseTypeCodeIDToken = "code id_token"
|
||||
ResponseTypeCodeIDTokenToken = "code id_token token"
|
||||
ResponseTypeIDToken = "id_token"
|
||||
ResponseTypeIDTokenToken = "id_token token"
|
||||
ResponseTypeToken = "token"
|
||||
ResponseTypeNone = "none"
|
||||
)
|
||||
|
||||
const (
|
||||
GrantTypeAuthCode = "authorization_code"
|
||||
GrantTypeClientCreds = "client_credentials"
|
||||
GrantTypeUserCreds = "password"
|
||||
GrantTypeImplicit = "implicit"
|
||||
GrantTypeRefreshToken = "refresh_token"
|
||||
|
||||
AuthMethodClientSecretPost = "client_secret_post"
|
||||
AuthMethodClientSecretBasic = "client_secret_basic"
|
||||
AuthMethodClientSecretJWT = "client_secret_jwt"
|
||||
AuthMethodPrivateKeyJWT = "private_key_jwt"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Credentials ClientCredentials
|
||||
Scope []string
|
||||
RedirectURL string
|
||||
AuthURL string
|
||||
TokenURL string
|
||||
|
||||
// Must be one of the AuthMethodXXX methods above. Right now, only
|
||||
// AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported.
|
||||
AuthMethod string
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
hc phttp.Client
|
||||
creds ClientCredentials
|
||||
scope []string
|
||||
authURL *url.URL
|
||||
redirectURL *url.URL
|
||||
tokenURL *url.URL
|
||||
authMethod string
|
||||
}
|
||||
|
||||
type ClientCredentials struct {
|
||||
ID string
|
||||
Secret string
|
||||
}
|
||||
|
||||
func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) {
|
||||
if len(cfg.Credentials.ID) == 0 {
|
||||
err = errors.New("missing client id")
|
||||
return
|
||||
}
|
||||
|
||||
if len(cfg.Credentials.Secret) == 0 {
|
||||
err = errors.New("missing client secret")
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.AuthMethod == "" {
|
||||
cfg.AuthMethod = AuthMethodClientSecretBasic
|
||||
} else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic {
|
||||
err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod)
|
||||
return
|
||||
}
|
||||
|
||||
au, err := phttp.ParseNonEmptyURL(cfg.AuthURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Allow empty redirect URL in the case where the client
|
||||
// only needs to verify a given token.
|
||||
ru, err := url.Parse(cfg.RedirectURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c = &Client{
|
||||
creds: cfg.Credentials,
|
||||
scope: cfg.Scope,
|
||||
redirectURL: ru,
|
||||
authURL: au,
|
||||
tokenURL: tu,
|
||||
hc: hc,
|
||||
authMethod: cfg.AuthMethod,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Return the embedded HTTP client
|
||||
func (c *Client) HttpClient() phttp.Client {
|
||||
return c.hc
|
||||
}
|
||||
|
||||
// Generate the url for initial redirect to oauth provider.
|
||||
func (c *Client) AuthCodeURL(state, accessType, prompt string) string {
|
||||
v := c.commonURLValues()
|
||||
v.Set("state", state)
|
||||
if strings.ToLower(accessType) == "offline" {
|
||||
v.Set("access_type", "offline")
|
||||
}
|
||||
|
||||
if prompt != "" {
|
||||
v.Set("prompt", prompt)
|
||||
}
|
||||
v.Set("response_type", "code")
|
||||
|
||||
q := v.Encode()
|
||||
u := *c.authURL
|
||||
if u.RawQuery == "" {
|
||||
u.RawQuery = q
|
||||
} else {
|
||||
u.RawQuery += "&" + q
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (c *Client) commonURLValues() url.Values {
|
||||
return url.Values{
|
||||
"redirect_uri": {c.redirectURL.String()},
|
||||
"scope": {strings.Join(c.scope, " ")},
|
||||
"client_id": {c.creds.ID},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) {
|
||||
var req *http.Request
|
||||
var err error
|
||||
switch c.authMethod {
|
||||
case AuthMethodClientSecretPost:
|
||||
values.Set("client_secret", c.creds.Secret)
|
||||
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case AuthMethodClientSecretBasic:
|
||||
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encodedID := url.QueryEscape(c.creds.ID)
|
||||
encodedSecret := url.QueryEscape(c.creds.Secret)
|
||||
req.SetBasicAuth(encodedID, encodedSecret)
|
||||
default:
|
||||
panic("misconfigured client: auth method not supported")
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
return req, nil
|
||||
|
||||
}
|
||||
|
||||
// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type.
|
||||
// May not be supported by all OAuth2 servers.
|
||||
func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) {
|
||||
v := url.Values{
|
||||
"scope": {strings.Join(scope, " ")},
|
||||
"grant_type": {GrantTypeClientCreds},
|
||||
}
|
||||
|
||||
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return parseTokenResponse(resp)
|
||||
}
|
||||
|
||||
// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type
|
||||
// May not be supported by all OAuth2 servers.
|
||||
func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) {
|
||||
v := url.Values{
|
||||
"scope": {strings.Join(c.scope, " ")},
|
||||
"grant_type": {GrantTypeUserCreds},
|
||||
"username": {username},
|
||||
"password": {password},
|
||||
}
|
||||
|
||||
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return parseTokenResponse(resp)
|
||||
}
|
||||
|
||||
// RequestToken requests a token from the Token Endpoint with the specified grantType.
|
||||
// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code.
|
||||
// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token.
|
||||
func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) {
|
||||
v := c.commonURLValues()
|
||||
|
||||
v.Set("grant_type", grantType)
|
||||
v.Set("client_secret", c.creds.Secret)
|
||||
switch grantType {
|
||||
case GrantTypeAuthCode:
|
||||
v.Set("code", value)
|
||||
case GrantTypeRefreshToken:
|
||||
v.Set("refresh_token", value)
|
||||
default:
|
||||
err = fmt.Errorf("unsupported grant_type: %v", grantType)
|
||||
return
|
||||
}
|
||||
|
||||
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return parseTokenResponse(resp)
|
||||
}
|
||||
|
||||
func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299
|
||||
|
||||
contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result = TokenResponse{
|
||||
RawBody: body,
|
||||
}
|
||||
|
||||
newError := func(typ, desc, state string) error {
|
||||
if typ == "" {
|
||||
return fmt.Errorf("unrecognized error %s", body)
|
||||
}
|
||||
return &Error{typ, desc, state}
|
||||
}
|
||||
|
||||
if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" {
|
||||
var vals url.Values
|
||||
vals, err = url.ParseQuery(string(body))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if error := vals.Get("error"); error != "" || badStatusCode {
|
||||
err = newError(error, vals.Get("error_description"), vals.Get("state"))
|
||||
return
|
||||
}
|
||||
e := vals.Get("expires_in")
|
||||
if e == "" {
|
||||
e = vals.Get("expires")
|
||||
}
|
||||
if e != "" {
|
||||
result.Expires, err = strconv.Atoi(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
result.AccessToken = vals.Get("access_token")
|
||||
result.TokenType = vals.Get("token_type")
|
||||
result.IDToken = vals.Get("id_token")
|
||||
result.RefreshToken = vals.Get("refresh_token")
|
||||
result.Scope = vals.Get("scope")
|
||||
} else {
|
||||
var r struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
IDToken string `json:"id_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
Scope string `json:"scope"`
|
||||
State string `json:"state"`
|
||||
ExpiresIn json.Number `json:"expires_in"` // Azure AD returns string
|
||||
Expires int `json:"expires"`
|
||||
Error string `json:"error"`
|
||||
Desc string `json:"error_description"`
|
||||
}
|
||||
if err = json.Unmarshal(body, &r); err != nil {
|
||||
return
|
||||
}
|
||||
if r.Error != "" || badStatusCode {
|
||||
err = newError(r.Error, r.Desc, r.State)
|
||||
return
|
||||
}
|
||||
result.AccessToken = r.AccessToken
|
||||
result.TokenType = r.TokenType
|
||||
result.IDToken = r.IDToken
|
||||
result.RefreshToken = r.RefreshToken
|
||||
result.Scope = r.Scope
|
||||
if expiresIn, err := r.ExpiresIn.Int64(); err != nil {
|
||||
result.Expires = r.Expires
|
||||
} else {
|
||||
result.Expires = int(expiresIn)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
AccessToken string
|
||||
TokenType string
|
||||
Expires int
|
||||
IDToken string
|
||||
RefreshToken string // OPTIONAL.
|
||||
Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED.
|
||||
RawBody []byte // In case callers need some other non-standard info from the token response
|
||||
}
|
||||
|
||||
type AuthCodeRequest struct {
|
||||
ResponseType string
|
||||
ClientID string
|
||||
RedirectURL *url.URL
|
||||
Scope []string
|
||||
State string
|
||||
}
|
||||
|
||||
func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) {
|
||||
acr := AuthCodeRequest{
|
||||
ResponseType: q.Get("response_type"),
|
||||
ClientID: q.Get("client_id"),
|
||||
State: q.Get("state"),
|
||||
Scope: make([]string, 0),
|
||||
}
|
||||
|
||||
qs := strings.TrimSpace(q.Get("scope"))
|
||||
if qs != "" {
|
||||
acr.Scope = strings.Split(qs, " ")
|
||||
}
|
||||
|
||||
err := func() error {
|
||||
if acr.ClientID == "" {
|
||||
return NewError(ErrorInvalidRequest)
|
||||
}
|
||||
|
||||
redirectURL := q.Get("redirect_uri")
|
||||
if redirectURL != "" {
|
||||
ru, err := url.Parse(redirectURL)
|
||||
if err != nil {
|
||||
return NewError(ErrorInvalidRequest)
|
||||
}
|
||||
acr.RedirectURL = ru
|
||||
}
|
||||
|
||||
return nil
|
||||
}()
|
||||
|
||||
return acr, err
|
||||
}
|
|
@ -0,0 +1,846 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/go-oidc/key"
|
||||
"github.com/coreos/go-oidc/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
// amount of time that must pass after the last key sync
|
||||
// completes before another attempt may begin
|
||||
keySyncWindow = 5 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultScope = []string{"openid", "email", "profile"}
|
||||
|
||||
supportedAuthMethods = map[string]struct{}{
|
||||
oauth2.AuthMethodClientSecretBasic: struct{}{},
|
||||
oauth2.AuthMethodClientSecretPost: struct{}{},
|
||||
}
|
||||
)
|
||||
|
||||
type ClientCredentials oauth2.ClientCredentials
|
||||
|
||||
type ClientIdentity struct {
|
||||
Credentials ClientCredentials
|
||||
Metadata ClientMetadata
|
||||
}
|
||||
|
||||
type JWAOptions struct {
|
||||
// SigningAlg specifies an JWA alg for signing JWTs.
|
||||
//
|
||||
// Specifying this field implies different actions depending on the context. It may
|
||||
// require objects be serialized and signed as a JWT instead of plain JSON, or
|
||||
// require an existing JWT object use the specified alg.
|
||||
//
|
||||
// See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
|
||||
SigningAlg string
|
||||
// EncryptionAlg, if provided, specifies that the returned or sent object be stored
|
||||
// (or nested) within a JWT object and encrypted with the provided JWA alg.
|
||||
EncryptionAlg string
|
||||
// EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If
|
||||
// EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults
|
||||
// to A128CBC-HS256.
|
||||
//
|
||||
// If EncryptionEnc is provided EncryptionAlg must also be specified.
|
||||
EncryptionEnc string
|
||||
}
|
||||
|
||||
func (opt JWAOptions) valid() error {
|
||||
if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" {
|
||||
return errors.New("encryption encoding provided with no encryption algorithm")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (opt JWAOptions) defaults() JWAOptions {
|
||||
if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" {
|
||||
opt.EncryptionEnc = jose.EncA128CBCHS256
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
||||
var (
|
||||
// Ensure ClientMetadata satisfies these interfaces.
|
||||
_ json.Marshaler = &ClientMetadata{}
|
||||
_ json.Unmarshaler = &ClientMetadata{}
|
||||
)
|
||||
|
||||
// ClientMetadata holds metadata that the authorization server associates
|
||||
// with a client identifier. The fields range from human-facing display
|
||||
// strings such as client name, to items that impact the security of the
|
||||
// protocol, such as the list of valid redirect URIs.
|
||||
//
|
||||
// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
|
||||
//
|
||||
// TODO: support language specific claim representations
|
||||
// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts
|
||||
type ClientMetadata struct {
|
||||
RedirectURIs []url.URL // Required
|
||||
|
||||
// A list of OAuth 2.0 "response_type" values that the client wishes to restrict
|
||||
// itself to. Either "code", "token", or another registered extension.
|
||||
//
|
||||
// If omitted, only "code" will be used.
|
||||
ResponseTypes []string
|
||||
// A list of OAuth 2.0 grant types the client wishes to restrict itself to.
|
||||
// The grant type values used by OIDC are "authorization_code", "implicit",
|
||||
// and "refresh_token".
|
||||
//
|
||||
// If ommitted, only "authorization_code" will be used.
|
||||
GrantTypes []string
|
||||
// "native" or "web". If omitted, "web".
|
||||
ApplicationType string
|
||||
|
||||
// List of email addresses.
|
||||
Contacts []mail.Address
|
||||
// Name of client to be presented to the end-user.
|
||||
ClientName string
|
||||
// URL that references a logo for the Client application.
|
||||
LogoURI *url.URL
|
||||
// URL of the home page of the Client.
|
||||
ClientURI *url.URL
|
||||
// Profile data policies and terms of use to be provided to the end user.
|
||||
PolicyURI *url.URL
|
||||
TermsOfServiceURI *url.URL
|
||||
|
||||
// URL to or the value of the client's JSON Web Key Set document.
|
||||
JWKSURI *url.URL
|
||||
JWKS *jose.JWKSet
|
||||
|
||||
// URL referencing a flie with a single JSON array of redirect URIs.
|
||||
SectorIdentifierURI *url.URL
|
||||
|
||||
SubjectType string
|
||||
|
||||
// Options to restrict the JWS alg and enc values used for server responses and requests.
|
||||
IDTokenResponseOptions JWAOptions
|
||||
UserInfoResponseOptions JWAOptions
|
||||
RequestObjectOptions JWAOptions
|
||||
|
||||
// Client requested authorization method and signing options for the token endpoint.
|
||||
//
|
||||
// Defaults to "client_secret_basic"
|
||||
TokenEndpointAuthMethod string
|
||||
TokenEndpointAuthSigningAlg string
|
||||
|
||||
// DefaultMaxAge specifies the maximum amount of time in seconds before an authorized
|
||||
// user must reauthroize.
|
||||
//
|
||||
// If 0, no limitation is placed on the maximum.
|
||||
DefaultMaxAge int64
|
||||
// RequireAuthTime specifies if the auth_time claim in the ID token is required.
|
||||
RequireAuthTime bool
|
||||
|
||||
// Default Authentication Context Class Reference values for authentication requests.
|
||||
DefaultACRValues []string
|
||||
|
||||
// URI that a third party can use to initiate a login by the relaying party.
|
||||
//
|
||||
// See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin
|
||||
InitiateLoginURI *url.URL
|
||||
// Pre-registered request_uri values that may be cached by the server.
|
||||
RequestURIs []url.URL
|
||||
}
|
||||
|
||||
// Defaults returns a shallow copy of ClientMetadata with default
|
||||
// values replacing omitted fields.
|
||||
func (m ClientMetadata) Defaults() ClientMetadata {
|
||||
if len(m.ResponseTypes) == 0 {
|
||||
m.ResponseTypes = []string{oauth2.ResponseTypeCode}
|
||||
}
|
||||
if len(m.GrantTypes) == 0 {
|
||||
m.GrantTypes = []string{oauth2.GrantTypeAuthCode}
|
||||
}
|
||||
if m.ApplicationType == "" {
|
||||
m.ApplicationType = "web"
|
||||
}
|
||||
if m.TokenEndpointAuthMethod == "" {
|
||||
m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic
|
||||
}
|
||||
m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults()
|
||||
m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults()
|
||||
m.RequestObjectOptions = m.RequestObjectOptions.defaults()
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *ClientMetadata) MarshalJSON() ([]byte, error) {
|
||||
e := m.toEncodableStruct()
|
||||
return json.Marshal(&e)
|
||||
}
|
||||
|
||||
func (m *ClientMetadata) UnmarshalJSON(data []byte) error {
|
||||
var e encodableClientMetadata
|
||||
if err := json.Unmarshal(data, &e); err != nil {
|
||||
return err
|
||||
}
|
||||
meta, err := e.toStruct()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := meta.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
type encodableClientMetadata struct {
|
||||
RedirectURIs []string `json:"redirect_uris"` // Required
|
||||
ResponseTypes []string `json:"response_types,omitempty"`
|
||||
GrantTypes []string `json:"grant_types,omitempty"`
|
||||
ApplicationType string `json:"application_type,omitempty"`
|
||||
Contacts []string `json:"contacts,omitempty"`
|
||||
ClientName string `json:"client_name,omitempty"`
|
||||
LogoURI string `json:"logo_uri,omitempty"`
|
||||
ClientURI string `json:"client_uri,omitempty"`
|
||||
PolicyURI string `json:"policy_uri,omitempty"`
|
||||
TermsOfServiceURI string `json:"tos_uri,omitempty"`
|
||||
JWKSURI string `json:"jwks_uri,omitempty"`
|
||||
JWKS *jose.JWKSet `json:"jwks,omitempty"`
|
||||
SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"`
|
||||
SubjectType string `json:"subject_type,omitempty"`
|
||||
IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"`
|
||||
IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"`
|
||||
IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"`
|
||||
UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"`
|
||||
UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"`
|
||||
UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"`
|
||||
RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"`
|
||||
RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"`
|
||||
RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"`
|
||||
TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"`
|
||||
TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"`
|
||||
DefaultMaxAge int64 `json:"default_max_age,omitempty"`
|
||||
RequireAuthTime bool `json:"require_auth_time,omitempty"`
|
||||
DefaultACRValues []string `json:"default_acr_values,omitempty"`
|
||||
InitiateLoginURI string `json:"initiate_login_uri,omitempty"`
|
||||
RequestURIs []string `json:"request_uris,omitempty"`
|
||||
}
|
||||
|
||||
func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) {
|
||||
p := stickyErrParser{}
|
||||
m := ClientMetadata{
|
||||
RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"),
|
||||
ResponseTypes: c.ResponseTypes,
|
||||
GrantTypes: c.GrantTypes,
|
||||
ApplicationType: c.ApplicationType,
|
||||
Contacts: p.parseEmails(c.Contacts, "contacts"),
|
||||
ClientName: c.ClientName,
|
||||
LogoURI: p.parseURI(c.LogoURI, "logo_uri"),
|
||||
ClientURI: p.parseURI(c.ClientURI, "client_uri"),
|
||||
PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"),
|
||||
TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"),
|
||||
JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"),
|
||||
JWKS: c.JWKS,
|
||||
SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"),
|
||||
SubjectType: c.SubjectType,
|
||||
TokenEndpointAuthMethod: c.TokenEndpointAuthMethod,
|
||||
TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg,
|
||||
DefaultMaxAge: c.DefaultMaxAge,
|
||||
RequireAuthTime: c.RequireAuthTime,
|
||||
DefaultACRValues: c.DefaultACRValues,
|
||||
InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"),
|
||||
RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"),
|
||||
IDTokenResponseOptions: JWAOptions{
|
||||
c.IDTokenSignedResponseAlg,
|
||||
c.IDTokenEncryptedResponseAlg,
|
||||
c.IDTokenEncryptedResponseEnc,
|
||||
},
|
||||
UserInfoResponseOptions: JWAOptions{
|
||||
c.UserInfoSignedResponseAlg,
|
||||
c.UserInfoEncryptedResponseAlg,
|
||||
c.UserInfoEncryptedResponseEnc,
|
||||
},
|
||||
RequestObjectOptions: JWAOptions{
|
||||
c.RequestObjectSigningAlg,
|
||||
c.RequestObjectEncryptionAlg,
|
||||
c.RequestObjectEncryptionEnc,
|
||||
},
|
||||
}
|
||||
if p.firstErr != nil {
|
||||
return ClientMetadata{}, p.firstErr
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// stickyErrParser parses URIs and email addresses. Once it encounters
|
||||
// a parse error, subsequent calls become no-op.
|
||||
type stickyErrParser struct {
|
||||
firstErr error
|
||||
}
|
||||
|
||||
func (p *stickyErrParser) parseURI(s, field string) *url.URL {
|
||||
if p.firstErr != nil || s == "" {
|
||||
return nil
|
||||
}
|
||||
u, err := url.Parse(s)
|
||||
if err == nil {
|
||||
if u.Host == "" {
|
||||
err = errors.New("no host in URI")
|
||||
} else if u.Scheme != "http" && u.Scheme != "https" {
|
||||
err = errors.New("invalid URI scheme")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err)
|
||||
return nil
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL {
|
||||
if p.firstErr != nil || len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
uris := make([]url.URL, len(s))
|
||||
for i, val := range s {
|
||||
if val == "" {
|
||||
p.firstErr = fmt.Errorf("invalid URI in field %s", field)
|
||||
return nil
|
||||
}
|
||||
if u := p.parseURI(val, field); u != nil {
|
||||
uris[i] = *u
|
||||
}
|
||||
}
|
||||
return uris
|
||||
}
|
||||
|
||||
func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address {
|
||||
if p.firstErr != nil || len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
addrs := make([]mail.Address, len(s))
|
||||
for i, addr := range s {
|
||||
if addr == "" {
|
||||
p.firstErr = fmt.Errorf("invalid email in field %s", field)
|
||||
return nil
|
||||
}
|
||||
a, err := mail.ParseAddress(addr)
|
||||
if err != nil {
|
||||
p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err)
|
||||
return nil
|
||||
}
|
||||
addrs[i] = *a
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata {
|
||||
return encodableClientMetadata{
|
||||
RedirectURIs: urisToStrings(m.RedirectURIs),
|
||||
ResponseTypes: m.ResponseTypes,
|
||||
GrantTypes: m.GrantTypes,
|
||||
ApplicationType: m.ApplicationType,
|
||||
Contacts: emailsToStrings(m.Contacts),
|
||||
ClientName: m.ClientName,
|
||||
LogoURI: uriToString(m.LogoURI),
|
||||
ClientURI: uriToString(m.ClientURI),
|
||||
PolicyURI: uriToString(m.PolicyURI),
|
||||
TermsOfServiceURI: uriToString(m.TermsOfServiceURI),
|
||||
JWKSURI: uriToString(m.JWKSURI),
|
||||
JWKS: m.JWKS,
|
||||
SectorIdentifierURI: uriToString(m.SectorIdentifierURI),
|
||||
SubjectType: m.SubjectType,
|
||||
IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg,
|
||||
IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg,
|
||||
IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc,
|
||||
UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg,
|
||||
UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg,
|
||||
UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc,
|
||||
RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg,
|
||||
RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg,
|
||||
RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc,
|
||||
TokenEndpointAuthMethod: m.TokenEndpointAuthMethod,
|
||||
TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg,
|
||||
DefaultMaxAge: m.DefaultMaxAge,
|
||||
RequireAuthTime: m.RequireAuthTime,
|
||||
DefaultACRValues: m.DefaultACRValues,
|
||||
InitiateLoginURI: uriToString(m.InitiateLoginURI),
|
||||
RequestURIs: urisToStrings(m.RequestURIs),
|
||||
}
|
||||
}
|
||||
|
||||
func uriToString(u *url.URL) string {
|
||||
if u == nil {
|
||||
return ""
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func urisToStrings(urls []url.URL) []string {
|
||||
if len(urls) == 0 {
|
||||
return nil
|
||||
}
|
||||
sli := make([]string, len(urls))
|
||||
for i, u := range urls {
|
||||
sli[i] = u.String()
|
||||
}
|
||||
return sli
|
||||
}
|
||||
|
||||
func emailsToStrings(addrs []mail.Address) []string {
|
||||
if len(addrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
sli := make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
sli[i] = addr.String()
|
||||
}
|
||||
return sli
|
||||
}
|
||||
|
||||
// Valid determines if a ClientMetadata conforms with the OIDC specification.
|
||||
//
|
||||
// Valid is called by UnmarshalJSON.
|
||||
//
|
||||
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
|
||||
// URLs fields where the OIDC spec requires it. This may change in future releases
|
||||
// of this package. See: https://github.com/coreos/go-oidc/issues/34
|
||||
func (m *ClientMetadata) Valid() error {
|
||||
if len(m.RedirectURIs) == 0 {
|
||||
return errors.New("zero redirect URLs")
|
||||
}
|
||||
|
||||
validURI := func(u *url.URL, fieldName string) error {
|
||||
if u.Host == "" {
|
||||
return fmt.Errorf("no host for uri field %s", fieldName)
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return fmt.Errorf("uri field %s scheme is not http or https", fieldName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
uris := []struct {
|
||||
val *url.URL
|
||||
name string
|
||||
}{
|
||||
{m.LogoURI, "logo_uri"},
|
||||
{m.ClientURI, "client_uri"},
|
||||
{m.PolicyURI, "policy_uri"},
|
||||
{m.TermsOfServiceURI, "tos_uri"},
|
||||
{m.JWKSURI, "jwks_uri"},
|
||||
{m.SectorIdentifierURI, "sector_identifier_uri"},
|
||||
{m.InitiateLoginURI, "initiate_login_uri"},
|
||||
}
|
||||
|
||||
for _, uri := range uris {
|
||||
if uri.val == nil {
|
||||
continue
|
||||
}
|
||||
if err := validURI(uri.val, uri.name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
uriLists := []struct {
|
||||
vals []url.URL
|
||||
name string
|
||||
}{
|
||||
{m.RedirectURIs, "redirect_uris"},
|
||||
{m.RequestURIs, "request_uris"},
|
||||
}
|
||||
for _, list := range uriLists {
|
||||
for _, uri := range list.vals {
|
||||
if err := validURI(&uri, list.name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
options := []struct {
|
||||
option JWAOptions
|
||||
name string
|
||||
}{
|
||||
{m.IDTokenResponseOptions, "id_token response"},
|
||||
{m.UserInfoResponseOptions, "userinfo response"},
|
||||
{m.RequestObjectOptions, "request_object"},
|
||||
}
|
||||
for _, option := range options {
|
||||
if err := option.option.valid(); err != nil {
|
||||
return fmt.Errorf("invalid JWA values for %s: %v", option.name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ClientRegistrationResponse struct {
|
||||
ClientID string // Required
|
||||
ClientSecret string
|
||||
RegistrationAccessToken string
|
||||
RegistrationClientURI string
|
||||
// If IsZero is true, unspecified.
|
||||
ClientIDIssuedAt time.Time
|
||||
// Time at which the client_secret will expire.
|
||||
// If IsZero is true, it will not expire.
|
||||
ClientSecretExpiresAt time.Time
|
||||
|
||||
ClientMetadata
|
||||
}
|
||||
|
||||
type encodableClientRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"` // Required
|
||||
ClientSecret string `json:"client_secret,omitempty"`
|
||||
RegistrationAccessToken string `json:"registration_access_token,omitempty"`
|
||||
RegistrationClientURI string `json:"registration_client_uri,omitempty"`
|
||||
ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"`
|
||||
// Time at which the client_secret will expire, in seconds since the epoch.
|
||||
// If 0 it will not expire.
|
||||
ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required
|
||||
|
||||
encodableClientMetadata
|
||||
}
|
||||
|
||||
func unixToSec(t time.Time) int64 {
|
||||
if t.IsZero() {
|
||||
return 0
|
||||
}
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) {
|
||||
e := encodableClientRegistrationResponse{
|
||||
ClientID: c.ClientID,
|
||||
ClientSecret: c.ClientSecret,
|
||||
RegistrationAccessToken: c.RegistrationAccessToken,
|
||||
RegistrationClientURI: c.RegistrationClientURI,
|
||||
ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt),
|
||||
ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt),
|
||||
encodableClientMetadata: c.ClientMetadata.toEncodableStruct(),
|
||||
}
|
||||
return json.Marshal(&e)
|
||||
}
|
||||
|
||||
func secToUnix(sec int64) time.Time {
|
||||
if sec == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(sec, 0)
|
||||
}
|
||||
|
||||
func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error {
|
||||
var e encodableClientRegistrationResponse
|
||||
if err := json.Unmarshal(data, &e); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.ClientID == "" {
|
||||
return errors.New("no client_id in client registration response")
|
||||
}
|
||||
metadata, err := e.encodableClientMetadata.toStruct()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*c = ClientRegistrationResponse{
|
||||
ClientID: e.ClientID,
|
||||
ClientSecret: e.ClientSecret,
|
||||
RegistrationAccessToken: e.RegistrationAccessToken,
|
||||
RegistrationClientURI: e.RegistrationClientURI,
|
||||
ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt),
|
||||
ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt),
|
||||
ClientMetadata: metadata,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ClientConfig struct {
|
||||
HTTPClient phttp.Client
|
||||
Credentials ClientCredentials
|
||||
Scope []string
|
||||
RedirectURL string
|
||||
ProviderConfig ProviderConfig
|
||||
KeySet key.PublicKeySet
|
||||
}
|
||||
|
||||
func NewClient(cfg ClientConfig) (*Client, error) {
|
||||
// Allow empty redirect URL in the case where the client
|
||||
// only needs to verify a given token.
|
||||
ru, err := url.Parse(cfg.RedirectURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid redirect URL: %v", err)
|
||||
}
|
||||
|
||||
c := Client{
|
||||
credentials: cfg.Credentials,
|
||||
httpClient: cfg.HTTPClient,
|
||||
scope: cfg.Scope,
|
||||
redirectURL: ru.String(),
|
||||
providerConfig: newProviderConfigRepo(cfg.ProviderConfig),
|
||||
keySet: cfg.KeySet,
|
||||
}
|
||||
|
||||
if c.httpClient == nil {
|
||||
c.httpClient = http.DefaultClient
|
||||
}
|
||||
|
||||
if c.scope == nil {
|
||||
c.scope = make([]string, len(DefaultScope))
|
||||
copy(c.scope, DefaultScope)
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
httpClient phttp.Client
|
||||
providerConfig *providerConfigRepo
|
||||
credentials ClientCredentials
|
||||
redirectURL string
|
||||
scope []string
|
||||
keySet key.PublicKeySet
|
||||
providerSyncer *ProviderConfigSyncer
|
||||
|
||||
keySetSyncMutex sync.RWMutex
|
||||
lastKeySetSync time.Time
|
||||
}
|
||||
|
||||
func (c *Client) Healthy() error {
|
||||
now := time.Now().UTC()
|
||||
|
||||
cfg := c.providerConfig.Get()
|
||||
|
||||
if cfg.Empty() {
|
||||
return errors.New("oidc client provider config empty")
|
||||
}
|
||||
|
||||
if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) {
|
||||
return errors.New("oidc client provider config expired")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) OAuthClient() (*oauth2.Client, error) {
|
||||
cfg := c.providerConfig.Get()
|
||||
authMethod, err := chooseAuthMethod(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ocfg := oauth2.Config{
|
||||
Credentials: oauth2.ClientCredentials(c.credentials),
|
||||
RedirectURL: c.redirectURL,
|
||||
AuthURL: cfg.AuthEndpoint.String(),
|
||||
TokenURL: cfg.TokenEndpoint.String(),
|
||||
Scope: c.scope,
|
||||
AuthMethod: authMethod,
|
||||
}
|
||||
|
||||
return oauth2.NewClient(c.httpClient, ocfg)
|
||||
}
|
||||
|
||||
func chooseAuthMethod(cfg ProviderConfig) (string, error) {
|
||||
if len(cfg.TokenEndpointAuthMethodsSupported) == 0 {
|
||||
return oauth2.AuthMethodClientSecretBasic, nil
|
||||
}
|
||||
|
||||
for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported {
|
||||
if _, ok := supportedAuthMethods[authMethod]; ok {
|
||||
return authMethod, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("no supported auth methods")
|
||||
}
|
||||
|
||||
// SyncProviderConfig starts the provider config syncer
|
||||
func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} {
|
||||
r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL)
|
||||
s := NewProviderConfigSyncer(r, c.providerConfig)
|
||||
stop := s.Run()
|
||||
s.WaitUntilInitialSync()
|
||||
return stop
|
||||
}
|
||||
|
||||
func (c *Client) maybeSyncKeys() error {
|
||||
tooSoon := func() bool {
|
||||
return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow))
|
||||
}
|
||||
|
||||
// ignore request to sync keys if a sync operation has been
|
||||
// attempted too recently
|
||||
if tooSoon() {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.keySetSyncMutex.Lock()
|
||||
defer c.keySetSyncMutex.Unlock()
|
||||
|
||||
// check again, as another goroutine may have been holding
|
||||
// the lock while updating the keys
|
||||
if tooSoon() {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := c.providerConfig.Get()
|
||||
r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String())
|
||||
w := &clientKeyRepo{client: c}
|
||||
_, err := key.Sync(r, w)
|
||||
c.lastKeySetSync = time.Now().UTC()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type clientKeyRepo struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
func (r *clientKeyRepo) Set(ks key.KeySet) error {
|
||||
pks, ok := ks.(*key.PublicKeySet)
|
||||
if !ok {
|
||||
return errors.New("unable to cast to PublicKey")
|
||||
}
|
||||
r.client.keySet = *pks
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) {
|
||||
cfg := c.providerConfig.Get()
|
||||
|
||||
if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) {
|
||||
return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds)
|
||||
}
|
||||
|
||||
oac, err := c.OAuthClient()
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
t, err := oac.ClientCredsToken(scope)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
jwt, err := jose.ParseJWT(t.IDToken)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
return jwt, c.VerifyJWT(jwt)
|
||||
}
|
||||
|
||||
// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token.
|
||||
func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) {
|
||||
oac, err := c.OAuthClient()
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
jwt, err := jose.ParseJWT(t.IDToken)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
return jwt, c.VerifyJWT(jwt)
|
||||
}
|
||||
|
||||
// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token.
|
||||
func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) {
|
||||
oac, err := c.OAuthClient()
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
jwt, err := jose.ParseJWT(t.IDToken)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
return jwt, c.VerifyJWT(jwt)
|
||||
}
|
||||
|
||||
func (c *Client) VerifyJWT(jwt jose.JWT) error {
|
||||
var keysFunc func() []key.PublicKey
|
||||
if kID, ok := jwt.KeyID(); ok {
|
||||
keysFunc = c.keysFuncWithID(kID)
|
||||
} else {
|
||||
keysFunc = c.keysFuncAll()
|
||||
}
|
||||
|
||||
v := NewJWTVerifier(
|
||||
c.providerConfig.Get().Issuer.String(),
|
||||
c.credentials.ID,
|
||||
c.maybeSyncKeys, keysFunc)
|
||||
|
||||
return v.Verify(jwt)
|
||||
}
|
||||
|
||||
// keysFuncWithID returns a function that retrieves at most unexpired
|
||||
// public key from the Client that matches the provided ID
|
||||
func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey {
|
||||
return func() []key.PublicKey {
|
||||
c.keySetSyncMutex.RLock()
|
||||
defer c.keySetSyncMutex.RUnlock()
|
||||
|
||||
if c.keySet.ExpiresAt().Before(time.Now()) {
|
||||
return []key.PublicKey{}
|
||||
}
|
||||
|
||||
k := c.keySet.Key(kID)
|
||||
if k == nil {
|
||||
return []key.PublicKey{}
|
||||
}
|
||||
|
||||
return []key.PublicKey{*k}
|
||||
}
|
||||
}
|
||||
|
||||
// keysFuncAll returns a function that retrieves all unexpired public
|
||||
// keys from the Client
|
||||
func (c *Client) keysFuncAll() func() []key.PublicKey {
|
||||
return func() []key.PublicKey {
|
||||
c.keySetSyncMutex.RLock()
|
||||
defer c.keySetSyncMutex.RUnlock()
|
||||
|
||||
if c.keySet.ExpiresAt().Before(time.Now()) {
|
||||
return []key.PublicKey{}
|
||||
}
|
||||
|
||||
return c.keySet.Keys()
|
||||
}
|
||||
}
|
||||
|
||||
type providerConfigRepo struct {
|
||||
mu sync.RWMutex
|
||||
config ProviderConfig // do not access directly, use Get()
|
||||
}
|
||||
|
||||
func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo {
|
||||
return &providerConfigRepo{sync.RWMutex{}, pc}
|
||||
}
|
||||
|
||||
// returns an error to implement ProviderConfigSetter
|
||||
func (r *providerConfigRepo) Set(cfg ProviderConfig) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.config = cfg
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *providerConfigRepo) Get() ProviderConfig {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
return r.config
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
// Package oidc is DEPRECATED. Use github.com/coreos/go-oidc instead.
|
||||
package oidc
|
|
@ -0,0 +1,44 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
)
|
||||
|
||||
type Identity struct {
|
||||
ID string
|
||||
Name string
|
||||
Email string
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
func IdentityFromClaims(claims jose.Claims) (*Identity, error) {
|
||||
if claims == nil {
|
||||
return nil, errors.New("nil claim set")
|
||||
}
|
||||
|
||||
var ident Identity
|
||||
var err error
|
||||
var ok bool
|
||||
|
||||
if ident.ID, ok, err = claims.StringClaim("sub"); err != nil {
|
||||
return nil, err
|
||||
} else if !ok {
|
||||
return nil, errors.New("missing required claim: sub")
|
||||
}
|
||||
|
||||
if ident.Email, _, err = claims.StringClaim("email"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exp, ok, err := claims.TimeClaim("exp")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
ident.ExpiresAt = exp
|
||||
}
|
||||
|
||||
return &ident, nil
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
package oidc
|
||||
|
||||
type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error)
|
|
@ -0,0 +1,67 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/go-oidc/key"
|
||||
)
|
||||
|
||||
// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no
|
||||
// Cache-Control header is provided by the JWK Set document endpoint.
|
||||
const DefaultPublicKeySetTTL = 24 * time.Hour
|
||||
|
||||
// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document.
|
||||
func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo {
|
||||
return &remotePublicKeyRepo{hc: hc, ep: ep}
|
||||
}
|
||||
|
||||
type remotePublicKeyRepo struct {
|
||||
hc phttp.Client
|
||||
ep string
|
||||
}
|
||||
|
||||
// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL
|
||||
// is set on the Key Set to avoid it having to be re-retrieved for every
|
||||
// encryption event. This TTL is typically controlled by the endpoint returning
|
||||
// a Cache-Control header, but defaults to 24 hours if no Cache-Control header
|
||||
// is found.
|
||||
func (r *remotePublicKeyRepo) Get() (key.KeySet, error) {
|
||||
req, err := http.NewRequest("GET", r.ep, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := r.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var d struct {
|
||||
Keys []jose.JWK `json:"keys"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(d.Keys) == 0 {
|
||||
return nil, errors.New("zero keys in response")
|
||||
}
|
||||
|
||||
ttl, ok, err := phttp.Cacheable(resp.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
ttl = DefaultPublicKeySetTTL
|
||||
}
|
||||
|
||||
exp := time.Now().UTC().Add(ttl)
|
||||
ks := key.NewPublicKeySet(d.Keys, exp)
|
||||
return ks, nil
|
||||
}
|
|
@ -0,0 +1,687 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/timeutil"
|
||||
"github.com/jonboulle/clockwork"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
"github.com/coreos/go-oidc/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
// Subject Identifier types defined by the OIDC spec. Specifies if the provider
|
||||
// should provide the same sub claim value to all clients (public) or a unique
|
||||
// value for each client (pairwise).
|
||||
//
|
||||
// See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
|
||||
SubjectTypePublic = "public"
|
||||
SubjectTypePairwise = "pairwise"
|
||||
)
|
||||
|
||||
var (
|
||||
// Default values for omitted provider config fields.
|
||||
//
|
||||
// Use ProviderConfig's Defaults method to fill a provider config with these values.
|
||||
DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit}
|
||||
DefaultResponseModesSupported = []string{"query", "fragment"}
|
||||
DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic}
|
||||
DefaultClaimTypesSupported = []string{"normal"}
|
||||
)
|
||||
|
||||
const (
|
||||
MaximumProviderConfigSyncInterval = 24 * time.Hour
|
||||
MinimumProviderConfigSyncInterval = time.Minute
|
||||
|
||||
discoveryConfigPath = "/.well-known/openid-configuration"
|
||||
)
|
||||
|
||||
// internally configurable for tests
|
||||
var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval
|
||||
|
||||
var (
|
||||
// Ensure ProviderConfig satisfies these interfaces.
|
||||
_ json.Marshaler = &ProviderConfig{}
|
||||
_ json.Unmarshaler = &ProviderConfig{}
|
||||
)
|
||||
|
||||
// ProviderConfig represents the OpenID Provider Metadata specifying what
|
||||
// configurations a provider supports.
|
||||
//
|
||||
// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
|
||||
type ProviderConfig struct {
|
||||
Issuer *url.URL // Required
|
||||
AuthEndpoint *url.URL // Required
|
||||
TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported
|
||||
UserInfoEndpoint *url.URL
|
||||
KeysEndpoint *url.URL // Required
|
||||
RegistrationEndpoint *url.URL
|
||||
EndSessionEndpoint *url.URL
|
||||
CheckSessionIFrame *url.URL
|
||||
|
||||
// Servers MAY choose not to advertise some supported scope values even when this
|
||||
// parameter is used, although those defined in OpenID Core SHOULD be listed, if supported.
|
||||
ScopesSupported []string
|
||||
// OAuth2.0 response types supported.
|
||||
ResponseTypesSupported []string // Required
|
||||
// OAuth2.0 response modes supported.
|
||||
//
|
||||
// If omitted, defaults to DefaultResponseModesSupported.
|
||||
ResponseModesSupported []string
|
||||
// OAuth2.0 grant types supported.
|
||||
//
|
||||
// If omitted, defaults to DefaultGrantTypesSupported.
|
||||
GrantTypesSupported []string
|
||||
ACRValuesSupported []string
|
||||
// SubjectTypesSupported specifies strategies for providing values for the sub claim.
|
||||
SubjectTypesSupported []string // Required
|
||||
|
||||
// JWA signing and encryption algorith values supported for ID tokens.
|
||||
IDTokenSigningAlgValues []string // Required
|
||||
IDTokenEncryptionAlgValues []string
|
||||
IDTokenEncryptionEncValues []string
|
||||
|
||||
// JWA signing and encryption algorith values supported for user info responses.
|
||||
UserInfoSigningAlgValues []string
|
||||
UserInfoEncryptionAlgValues []string
|
||||
UserInfoEncryptionEncValues []string
|
||||
|
||||
// JWA signing and encryption algorith values supported for request objects.
|
||||
ReqObjSigningAlgValues []string
|
||||
ReqObjEncryptionAlgValues []string
|
||||
ReqObjEncryptionEncValues []string
|
||||
|
||||
TokenEndpointAuthMethodsSupported []string
|
||||
TokenEndpointAuthSigningAlgValuesSupported []string
|
||||
DisplayValuesSupported []string
|
||||
ClaimTypesSupported []string
|
||||
ClaimsSupported []string
|
||||
ServiceDocs *url.URL
|
||||
ClaimsLocalsSupported []string
|
||||
UILocalsSupported []string
|
||||
ClaimsParameterSupported bool
|
||||
RequestParameterSupported bool
|
||||
RequestURIParamaterSupported bool
|
||||
RequireRequestURIRegistration bool
|
||||
|
||||
Policy *url.URL
|
||||
TermsOfService *url.URL
|
||||
|
||||
// Not part of the OpenID Provider Metadata
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
// Defaults returns a shallow copy of ProviderConfig with default
|
||||
// values replacing omitted fields.
|
||||
//
|
||||
// var cfg oidc.ProviderConfig
|
||||
// // Fill provider config with default values for omitted fields.
|
||||
// cfg = cfg.Defaults()
|
||||
//
|
||||
func (p ProviderConfig) Defaults() ProviderConfig {
|
||||
setDefault := func(val *[]string, defaultVal []string) {
|
||||
if len(*val) == 0 {
|
||||
*val = defaultVal
|
||||
}
|
||||
}
|
||||
setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported)
|
||||
setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported)
|
||||
setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported)
|
||||
setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ProviderConfig) MarshalJSON() ([]byte, error) {
|
||||
e := p.toEncodableStruct()
|
||||
return json.Marshal(&e)
|
||||
}
|
||||
|
||||
func (p *ProviderConfig) UnmarshalJSON(data []byte) error {
|
||||
var e encodableProviderConfig
|
||||
if err := json.Unmarshal(data, &e); err != nil {
|
||||
return err
|
||||
}
|
||||
conf, err := e.toStruct()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := conf.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
*p = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
type encodableProviderConfig struct {
|
||||
Issuer string `json:"issuer"`
|
||||
AuthEndpoint string `json:"authorization_endpoint"`
|
||||
TokenEndpoint string `json:"token_endpoint"`
|
||||
UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
|
||||
KeysEndpoint string `json:"jwks_uri"`
|
||||
RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
|
||||
EndSessionEndpoint string `json:"end_session_endpoint,omitempty"`
|
||||
CheckSessionIFrame string `json:"check_session_iframe,omitempty"`
|
||||
|
||||
// Use 'omitempty' for all slices as per OIDC spec:
|
||||
// "Claims that return multiple values are represented as JSON arrays.
|
||||
// Claims with zero elements MUST be omitted from the response."
|
||||
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse
|
||||
|
||||
ScopesSupported []string `json:"scopes_supported,omitempty"`
|
||||
ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
|
||||
ResponseModesSupported []string `json:"response_modes_supported,omitempty"`
|
||||
GrantTypesSupported []string `json:"grant_types_supported,omitempty"`
|
||||
ACRValuesSupported []string `json:"acr_values_supported,omitempty"`
|
||||
SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
|
||||
|
||||
IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"`
|
||||
IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"`
|
||||
IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"`
|
||||
UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"`
|
||||
UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"`
|
||||
UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"`
|
||||
ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"`
|
||||
ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"`
|
||||
ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"`
|
||||
|
||||
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"`
|
||||
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"`
|
||||
|
||||
DisplayValuesSupported []string `json:"display_values_supported,omitempty"`
|
||||
ClaimTypesSupported []string `json:"claim_types_supported,omitempty"`
|
||||
ClaimsSupported []string `json:"claims_supported,omitempty"`
|
||||
ServiceDocs string `json:"service_documentation,omitempty"`
|
||||
ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"`
|
||||
UILocalsSupported []string `json:"ui_locales_supported,omitempty"`
|
||||
ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"`
|
||||
RequestParameterSupported bool `json:"request_parameter_supported,omitempty"`
|
||||
RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"`
|
||||
RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"`
|
||||
|
||||
Policy string `json:"op_policy_uri,omitempty"`
|
||||
TermsOfService string `json:"op_tos_uri,omitempty"`
|
||||
}
|
||||
|
||||
func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig {
|
||||
return encodableProviderConfig{
|
||||
Issuer: uriToString(cfg.Issuer),
|
||||
AuthEndpoint: uriToString(cfg.AuthEndpoint),
|
||||
TokenEndpoint: uriToString(cfg.TokenEndpoint),
|
||||
UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint),
|
||||
KeysEndpoint: uriToString(cfg.KeysEndpoint),
|
||||
RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint),
|
||||
EndSessionEndpoint: uriToString(cfg.EndSessionEndpoint),
|
||||
CheckSessionIFrame: uriToString(cfg.CheckSessionIFrame),
|
||||
ScopesSupported: cfg.ScopesSupported,
|
||||
ResponseTypesSupported: cfg.ResponseTypesSupported,
|
||||
ResponseModesSupported: cfg.ResponseModesSupported,
|
||||
GrantTypesSupported: cfg.GrantTypesSupported,
|
||||
ACRValuesSupported: cfg.ACRValuesSupported,
|
||||
SubjectTypesSupported: cfg.SubjectTypesSupported,
|
||||
IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues,
|
||||
IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues,
|
||||
IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues,
|
||||
UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues,
|
||||
UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues,
|
||||
UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues,
|
||||
ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues,
|
||||
ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues,
|
||||
ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues,
|
||||
TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported,
|
||||
TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported,
|
||||
DisplayValuesSupported: cfg.DisplayValuesSupported,
|
||||
ClaimTypesSupported: cfg.ClaimTypesSupported,
|
||||
ClaimsSupported: cfg.ClaimsSupported,
|
||||
ServiceDocs: uriToString(cfg.ServiceDocs),
|
||||
ClaimsLocalsSupported: cfg.ClaimsLocalsSupported,
|
||||
UILocalsSupported: cfg.UILocalsSupported,
|
||||
ClaimsParameterSupported: cfg.ClaimsParameterSupported,
|
||||
RequestParameterSupported: cfg.RequestParameterSupported,
|
||||
RequestURIParamaterSupported: cfg.RequestURIParamaterSupported,
|
||||
RequireRequestURIRegistration: cfg.RequireRequestURIRegistration,
|
||||
Policy: uriToString(cfg.Policy),
|
||||
TermsOfService: uriToString(cfg.TermsOfService),
|
||||
}
|
||||
}
|
||||
|
||||
func (e encodableProviderConfig) toStruct() (ProviderConfig, error) {
|
||||
p := stickyErrParser{}
|
||||
conf := ProviderConfig{
|
||||
Issuer: p.parseURI(e.Issuer, "issuer"),
|
||||
AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"),
|
||||
TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"),
|
||||
UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"),
|
||||
KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"),
|
||||
RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"),
|
||||
EndSessionEndpoint: p.parseURI(e.EndSessionEndpoint, "end_session_endpoint"),
|
||||
CheckSessionIFrame: p.parseURI(e.CheckSessionIFrame, "check_session_iframe"),
|
||||
ScopesSupported: e.ScopesSupported,
|
||||
ResponseTypesSupported: e.ResponseTypesSupported,
|
||||
ResponseModesSupported: e.ResponseModesSupported,
|
||||
GrantTypesSupported: e.GrantTypesSupported,
|
||||
ACRValuesSupported: e.ACRValuesSupported,
|
||||
SubjectTypesSupported: e.SubjectTypesSupported,
|
||||
IDTokenSigningAlgValues: e.IDTokenSigningAlgValues,
|
||||
IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues,
|
||||
IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues,
|
||||
UserInfoSigningAlgValues: e.UserInfoSigningAlgValues,
|
||||
UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues,
|
||||
UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues,
|
||||
ReqObjSigningAlgValues: e.ReqObjSigningAlgValues,
|
||||
ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues,
|
||||
ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues,
|
||||
TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported,
|
||||
TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported,
|
||||
DisplayValuesSupported: e.DisplayValuesSupported,
|
||||
ClaimTypesSupported: e.ClaimTypesSupported,
|
||||
ClaimsSupported: e.ClaimsSupported,
|
||||
ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"),
|
||||
ClaimsLocalsSupported: e.ClaimsLocalsSupported,
|
||||
UILocalsSupported: e.UILocalsSupported,
|
||||
ClaimsParameterSupported: e.ClaimsParameterSupported,
|
||||
RequestParameterSupported: e.RequestParameterSupported,
|
||||
RequestURIParamaterSupported: e.RequestURIParamaterSupported,
|
||||
RequireRequestURIRegistration: e.RequireRequestURIRegistration,
|
||||
Policy: p.parseURI(e.Policy, "op_policy-uri"),
|
||||
TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"),
|
||||
}
|
||||
if p.firstErr != nil {
|
||||
return ProviderConfig{}, p.firstErr
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// Empty returns if a ProviderConfig holds no information.
|
||||
//
|
||||
// This case generally indicates a ProviderConfigGetter has experienced an error
|
||||
// and has nothing to report.
|
||||
func (p ProviderConfig) Empty() bool {
|
||||
return p.Issuer == nil
|
||||
}
|
||||
|
||||
func contains(sli []string, ele string) bool {
|
||||
for _, s := range sli {
|
||||
if s == ele {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Valid determines if a ProviderConfig conforms with the OIDC specification.
|
||||
// If Valid returns successfully it guarantees required field are non-nil and
|
||||
// URLs are well formed.
|
||||
//
|
||||
// Valid is called by UnmarshalJSON.
|
||||
//
|
||||
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
|
||||
// URLs fields where the OIDC spec requires it. This may change in future releases
|
||||
// of this package. See: https://github.com/coreos/go-oidc/issues/34
|
||||
func (p ProviderConfig) Valid() error {
|
||||
grantTypes := p.GrantTypesSupported
|
||||
if len(grantTypes) == 0 {
|
||||
grantTypes = DefaultGrantTypesSupported
|
||||
}
|
||||
implicitOnly := true
|
||||
for _, grantType := range grantTypes {
|
||||
if grantType != oauth2.GrantTypeImplicit {
|
||||
implicitOnly = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(p.SubjectTypesSupported) == 0 {
|
||||
return errors.New("missing required field subject_types_supported")
|
||||
}
|
||||
if len(p.IDTokenSigningAlgValues) == 0 {
|
||||
return errors.New("missing required field id_token_signing_alg_values_supported")
|
||||
}
|
||||
|
||||
if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") {
|
||||
return errors.New("scoped_supported must be unspecified or include 'openid'")
|
||||
}
|
||||
|
||||
if !contains(p.IDTokenSigningAlgValues, "RS256") {
|
||||
return errors.New("id_token_signing_alg_values_supported must include 'RS256'")
|
||||
}
|
||||
|
||||
uris := []struct {
|
||||
val *url.URL
|
||||
name string
|
||||
required bool
|
||||
}{
|
||||
{p.Issuer, "issuer", true},
|
||||
{p.AuthEndpoint, "authorization_endpoint", true},
|
||||
{p.TokenEndpoint, "token_endpoint", !implicitOnly},
|
||||
{p.UserInfoEndpoint, "userinfo_endpoint", false},
|
||||
{p.KeysEndpoint, "jwks_uri", true},
|
||||
{p.RegistrationEndpoint, "registration_endpoint", false},
|
||||
{p.EndSessionEndpoint, "end_session_endpoint", false},
|
||||
{p.CheckSessionIFrame, "check_session_iframe", false},
|
||||
{p.ServiceDocs, "service_documentation", false},
|
||||
{p.Policy, "op_policy_uri", false},
|
||||
{p.TermsOfService, "op_tos_uri", false},
|
||||
}
|
||||
|
||||
for _, uri := range uris {
|
||||
if uri.val == nil {
|
||||
if !uri.required {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("empty value for required uri field %s", uri.name)
|
||||
}
|
||||
if uri.val.Host == "" {
|
||||
return fmt.Errorf("no host for uri field %s", uri.name)
|
||||
}
|
||||
if uri.val.Scheme != "http" && uri.val.Scheme != "https" {
|
||||
return fmt.Errorf("uri field %s schemeis not http or https", uri.name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Supports determines if provider supports a client given their respective metadata.
|
||||
func (p ProviderConfig) Supports(c ClientMetadata) error {
|
||||
if err := p.Valid(); err != nil {
|
||||
return fmt.Errorf("invalid provider config: %v", err)
|
||||
}
|
||||
if err := c.Valid(); err != nil {
|
||||
return fmt.Errorf("invalid client config: %v", err)
|
||||
}
|
||||
|
||||
// Fill default values for omitted fields
|
||||
c = c.Defaults()
|
||||
p = p.Defaults()
|
||||
|
||||
// Do the supported values list the requested one?
|
||||
supports := []struct {
|
||||
supported []string
|
||||
requested string
|
||||
name string
|
||||
}{
|
||||
{p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"},
|
||||
{p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"},
|
||||
{p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"},
|
||||
{p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"},
|
||||
{p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"},
|
||||
{p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"},
|
||||
{p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"},
|
||||
{p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"},
|
||||
{p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"},
|
||||
}
|
||||
for _, field := range supports {
|
||||
if field.requested == "" {
|
||||
continue
|
||||
}
|
||||
if !contains(field.supported, field.requested) {
|
||||
return fmt.Errorf("provider does not support requested value for field %s", field.name)
|
||||
}
|
||||
}
|
||||
|
||||
stringsEqual := func(s1, s2 string) bool { return s1 == s2 }
|
||||
|
||||
// For lists, are the list of requested values a subset of the supported ones?
|
||||
supportsAll := []struct {
|
||||
supported []string
|
||||
requested []string
|
||||
name string
|
||||
// OAuth2.0 response_type can be space separated lists where order doesn't matter.
|
||||
// For example "id_token token" is the same as "token id_token"
|
||||
// Support a custom compare method.
|
||||
comp func(s1, s2 string) bool
|
||||
}{
|
||||
{p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual},
|
||||
{p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual},
|
||||
}
|
||||
for _, field := range supportsAll {
|
||||
requestLoop:
|
||||
for _, req := range field.requested {
|
||||
for _, sup := range field.supported {
|
||||
if field.comp(req, sup) {
|
||||
continue requestLoop
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("provider does not support requested value for field %s", field.name)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(ericchiang): Are there more checks we feel comfortable with begin strict about?
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p ProviderConfig) SupportsGrantType(grantType string) bool {
|
||||
var supported []string
|
||||
if len(p.GrantTypesSupported) == 0 {
|
||||
supported = DefaultGrantTypesSupported
|
||||
} else {
|
||||
supported = p.GrantTypesSupported
|
||||
}
|
||||
|
||||
for _, t := range supported {
|
||||
if t == grantType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type ProviderConfigGetter interface {
|
||||
Get() (ProviderConfig, error)
|
||||
}
|
||||
|
||||
type ProviderConfigSetter interface {
|
||||
Set(ProviderConfig) error
|
||||
}
|
||||
|
||||
type ProviderConfigSyncer struct {
|
||||
from ProviderConfigGetter
|
||||
to ProviderConfigSetter
|
||||
clock clockwork.Clock
|
||||
|
||||
initialSyncDone bool
|
||||
initialSyncWait sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer {
|
||||
return &ProviderConfigSyncer{
|
||||
from: from,
|
||||
to: to,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSyncer) Run() chan struct{} {
|
||||
stop := make(chan struct{})
|
||||
|
||||
var next pcsStepper
|
||||
next = &pcsStepNext{aft: time.Duration(0)}
|
||||
|
||||
s.initialSyncWait.Add(1)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.clock.After(next.after()):
|
||||
next = next.step(s.sync)
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSyncer) WaitUntilInitialSync() {
|
||||
s.initialSyncWait.Wait()
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSyncer) sync() (time.Duration, error) {
|
||||
cfg, err := s.from.Get()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err = s.to.Set(cfg); err != nil {
|
||||
return 0, fmt.Errorf("error setting provider config: %v", err)
|
||||
}
|
||||
|
||||
if !s.initialSyncDone {
|
||||
s.initialSyncWait.Done()
|
||||
s.initialSyncDone = true
|
||||
}
|
||||
|
||||
return nextSyncAfter(cfg.ExpiresAt, s.clock), nil
|
||||
}
|
||||
|
||||
type pcsStepFunc func() (time.Duration, error)
|
||||
|
||||
type pcsStepper interface {
|
||||
after() time.Duration
|
||||
step(pcsStepFunc) pcsStepper
|
||||
}
|
||||
|
||||
type pcsStepNext struct {
|
||||
aft time.Duration
|
||||
}
|
||||
|
||||
func (n *pcsStepNext) after() time.Duration {
|
||||
return n.aft
|
||||
}
|
||||
|
||||
func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) {
|
||||
ttl, err := fn()
|
||||
if err == nil {
|
||||
next = &pcsStepNext{aft: ttl}
|
||||
} else {
|
||||
next = &pcsStepRetry{aft: time.Second}
|
||||
log.Printf("go-oidc: provider config sync failed, retrying in %v: %v", next.after(), err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type pcsStepRetry struct {
|
||||
aft time.Duration
|
||||
}
|
||||
|
||||
func (r *pcsStepRetry) after() time.Duration {
|
||||
return r.aft
|
||||
}
|
||||
|
||||
func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) {
|
||||
ttl, err := fn()
|
||||
if err == nil {
|
||||
next = &pcsStepNext{aft: ttl}
|
||||
} else {
|
||||
next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)}
|
||||
log.Printf("go-oidc: provider config sync failed, retrying in %v: %v", next.after(), err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration {
|
||||
if exp.IsZero() {
|
||||
return MaximumProviderConfigSyncInterval
|
||||
}
|
||||
|
||||
t := exp.Sub(clock.Now()) / 2
|
||||
if t > MaximumProviderConfigSyncInterval {
|
||||
t = MaximumProviderConfigSyncInterval
|
||||
} else if t < minimumProviderConfigSyncInterval {
|
||||
t = minimumProviderConfigSyncInterval
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
type httpProviderConfigGetter struct {
|
||||
hc phttp.Client
|
||||
issuerURL string
|
||||
clock clockwork.Clock
|
||||
}
|
||||
|
||||
func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter {
|
||||
return &httpProviderConfigGetter{
|
||||
hc: hc,
|
||||
issuerURL: issuerURL,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) {
|
||||
// If the Issuer value contains a path component, any terminating / MUST be removed before
|
||||
// appending /.well-known/openid-configuration.
|
||||
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest
|
||||
discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath
|
||||
req, err := http.NewRequest("GET", discoveryURL, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := r.hc.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var ttl time.Duration
|
||||
var ok bool
|
||||
ttl, ok, err = phttp.Cacheable(resp.Header)
|
||||
if err != nil {
|
||||
return
|
||||
} else if ok {
|
||||
cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl)
|
||||
}
|
||||
|
||||
// The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information.
|
||||
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation
|
||||
if !urlEqual(cfg.Issuer.String(), r.issuerURL) {
|
||||
err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) {
|
||||
if hc == nil {
|
||||
hc = http.DefaultClient
|
||||
}
|
||||
|
||||
g := NewHTTPProviderConfigGetter(hc, issuerURL)
|
||||
return g.Get()
|
||||
}
|
||||
|
||||
func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) {
|
||||
return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock())
|
||||
}
|
||||
|
||||
func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) {
|
||||
var sleep time.Duration
|
||||
var err error
|
||||
for {
|
||||
pcfg, err = FetchProviderConfig(hc, issuerURL)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
sleep = timeutil.ExpBackoff(sleep, time.Minute)
|
||||
fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err)
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
)
|
||||
|
||||
type TokenRefresher interface {
|
||||
// Verify checks if the provided token is currently valid or not.
|
||||
Verify(jose.JWT) error
|
||||
|
||||
// Refresh attempts to authenticate and retrieve a new token.
|
||||
Refresh() (jose.JWT, error)
|
||||
}
|
||||
|
||||
type ClientCredsTokenRefresher struct {
|
||||
Issuer string
|
||||
OIDCClient *Client
|
||||
}
|
||||
|
||||
func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) {
|
||||
_, err = VerifyClientClaims(jwt, c.Issuer)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) {
|
||||
if err = c.OIDCClient.Healthy(); err != nil {
|
||||
err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to verify auth code with issuer: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type AuthenticatedTransport struct {
|
||||
TokenRefresher
|
||||
http.RoundTripper
|
||||
|
||||
mu sync.Mutex
|
||||
jwt jose.JWT
|
||||
}
|
||||
|
||||
func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.TokenRefresher.Verify(t.jwt) == nil {
|
||||
return t.jwt, nil
|
||||
}
|
||||
|
||||
jwt, err := t.TokenRefresher.Refresh()
|
||||
if err != nil {
|
||||
return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err)
|
||||
}
|
||||
|
||||
t.jwt = jwt
|
||||
return t.jwt, nil
|
||||
}
|
||||
|
||||
// SetJWT sets the JWT held by the Transport.
|
||||
// This is useful for cases in which you want to set an initial JWT.
|
||||
func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
t.jwt = jwt
|
||||
}
|
||||
|
||||
func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
jwt, err := t.verifiedJWT()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := phttp.CopyRequest(r)
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode()))
|
||||
return t.RoundTripper.RoundTrip(req)
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
)
|
||||
|
||||
// RequestTokenExtractor funcs extract a raw encoded token from a request.
|
||||
type RequestTokenExtractor func(r *http.Request) (string, error)
|
||||
|
||||
// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's
|
||||
// Authorization header.
|
||||
func ExtractBearerToken(r *http.Request) (string, error) {
|
||||
ah := r.Header.Get("Authorization")
|
||||
if ah == "" {
|
||||
return "", errors.New("missing Authorization header")
|
||||
}
|
||||
|
||||
if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" {
|
||||
return "", errors.New("should be a bearer token")
|
||||
}
|
||||
|
||||
val := ah[7:]
|
||||
if len(val) == 0 {
|
||||
return "", errors.New("bearer token is empty")
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request.
|
||||
func CookieTokenExtractor(cookieName string) RequestTokenExtractor {
|
||||
return func(r *http.Request) (string, error) {
|
||||
ck, err := r.Cookie(cookieName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("token cookie not found in request: %v", err)
|
||||
}
|
||||
|
||||
if ck.Value == "" {
|
||||
return "", errors.New("token cookie found but is empty")
|
||||
}
|
||||
|
||||
return ck.Value, nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims {
|
||||
return jose.Claims{
|
||||
// required
|
||||
"iss": iss,
|
||||
"sub": sub,
|
||||
"aud": aud,
|
||||
"iat": iat.Unix(),
|
||||
"exp": exp.Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
func GenClientID(hostport string) (string, error) {
|
||||
b, err := randBytes(32)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var host string
|
||||
if strings.Contains(hostport, ":") {
|
||||
host, _, err = net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
host = hostport
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil
|
||||
}
|
||||
|
||||
func randBytes(n int) ([]byte, error) {
|
||||
b := make([]byte, n)
|
||||
got, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if n != got {
|
||||
return nil, errors.New("unable to generate enough random data")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// urlEqual checks two urls for equality using only the host and path portions.
|
||||
func urlEqual(url1, url2 string) bool {
|
||||
u1, err := url.Parse(url1)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
u2, err := url.Parse(url2)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path)
|
||||
}
|
|
@ -0,0 +1,190 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/go-oidc/key"
|
||||
)
|
||||
|
||||
func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) {
|
||||
jwtBytes := []byte(jwt.Data())
|
||||
for _, k := range keys {
|
||||
v, err := k.Verifier()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if v.Verify(jwt.Signature, jwtBytes) == nil {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// containsString returns true if the given string(needle) is found
|
||||
// in the string array(haystack).
|
||||
func containsString(needle string, haystack []string) bool {
|
||||
for _, v := range haystack {
|
||||
if v == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify claims in accordance with OIDC spec
|
||||
// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation
|
||||
func VerifyClaims(jwt jose.JWT, issuer, clientID string) error {
|
||||
now := time.Now().UTC()
|
||||
|
||||
claims, err := jwt.Claims()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ident, err := IdentityFromClaims(claims)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ident.ExpiresAt.Before(now) {
|
||||
return errors.New("token is expired")
|
||||
}
|
||||
|
||||
// iss REQUIRED. Issuer Identifier for the Issuer of the response.
|
||||
// The iss value is a case sensitive URL using the https scheme that contains scheme,
|
||||
// host, and optionally, port number and path components and no query or fragment components.
|
||||
if iss, exists := claims["iss"].(string); exists {
|
||||
if !urlEqual(iss, issuer) {
|
||||
return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss)
|
||||
}
|
||||
} else {
|
||||
return errors.New("missing claim: 'iss'")
|
||||
}
|
||||
|
||||
// iat REQUIRED. Time at which the JWT was issued.
|
||||
// Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z
|
||||
// as measured in UTC until the date/time.
|
||||
if _, exists := claims["iat"].(float64); !exists {
|
||||
return errors.New("missing claim: 'iat'")
|
||||
}
|
||||
|
||||
// aud REQUIRED. Audience(s) that this ID Token is intended for.
|
||||
// It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value.
|
||||
// It MAY also contain identifiers for other audiences. In the general case, the aud
|
||||
// value is an array of case sensitive strings. In the common special case when there
|
||||
// is one audience, the aud value MAY be a single case sensitive string.
|
||||
if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
|
||||
if aud != clientID {
|
||||
return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID)
|
||||
}
|
||||
} else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
|
||||
if !containsString(clientID, aud) {
|
||||
return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID)
|
||||
}
|
||||
} else {
|
||||
return errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT.
|
||||
// Returns the client ID if valid, or an error if invalid.
|
||||
func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) {
|
||||
claims, err := jwt.Claims()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse JWT claims: %v", err)
|
||||
}
|
||||
|
||||
iss, ok, err := claims.StringClaim("iss")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse 'iss' claim: %v", err)
|
||||
} else if !ok {
|
||||
return "", errors.New("missing required 'iss' claim")
|
||||
} else if !urlEqual(iss, issuer) {
|
||||
return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss)
|
||||
}
|
||||
|
||||
sub, ok, err := claims.StringClaim("sub")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse 'sub' claim: %v", err)
|
||||
} else if !ok {
|
||||
return "", errors.New("missing required 'sub' claim")
|
||||
}
|
||||
|
||||
if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
|
||||
if aud != sub {
|
||||
return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub)
|
||||
}
|
||||
} else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
|
||||
if !containsString(sub, aud) {
|
||||
return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub)
|
||||
}
|
||||
} else {
|
||||
return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
exp, ok, err := claims.TimeClaim("exp")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse 'exp' claim: %v", err)
|
||||
} else if !ok {
|
||||
return "", errors.New("missing required 'exp' claim")
|
||||
} else if exp.Before(now) {
|
||||
return "", fmt.Errorf("token already expired at: %v", exp)
|
||||
}
|
||||
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
type JWTVerifier struct {
|
||||
issuer string
|
||||
clientID string
|
||||
syncFunc func() error
|
||||
keysFunc func() []key.PublicKey
|
||||
clock clockwork.Clock
|
||||
}
|
||||
|
||||
func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier {
|
||||
return JWTVerifier{
|
||||
issuer: issuer,
|
||||
clientID: clientID,
|
||||
syncFunc: syncFunc,
|
||||
keysFunc: keysFunc,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
func (v *JWTVerifier) Verify(jwt jose.JWT) error {
|
||||
// Verify claims before verifying the signature. This is an optimization to throw out
|
||||
// tokens we know are invalid without undergoing an expensive signature check and
|
||||
// possibly a re-sync event.
|
||||
if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil {
|
||||
return fmt.Errorf("oidc: JWT claims invalid: %v", err)
|
||||
}
|
||||
|
||||
ok, err := VerifySignature(jwt, v.keysFunc())
|
||||
if err != nil {
|
||||
return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
|
||||
} else if ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = v.syncFunc(); err != nil {
|
||||
return fmt.Errorf("oidc: failed syncing KeySet: %v", err)
|
||||
}
|
||||
|
||||
ok, err = VerifySignature(jwt, v.keysFunc())
|
||||
if err != nil {
|
||||
return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
|
||||
} else if !ok {
|
||||
return errors.New("oidc: unable to verify JWT signature: no matching keys")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
CoreOS Project
|
||||
Copyright 2014 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
|
@ -0,0 +1,11 @@
|
|||
health
|
||||
====
|
||||
|
||||
A simple framework for implementing an HTTP health check endpoint on servers.
|
||||
|
||||
Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`.
|
||||
|
||||
### Documentation
|
||||
|
||||
For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health)
|
||||
|
|
@ -0,0 +1,127 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"expvar"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/coreos/pkg/httputil"
|
||||
)
|
||||
|
||||
// Checkables should return nil when the thing they are checking is healthy, and an error otherwise.
|
||||
type Checkable interface {
|
||||
Healthy() error
|
||||
}
|
||||
|
||||
// Checker provides a way to make an endpoint which can be probed for system health.
|
||||
type Checker struct {
|
||||
// Checks are the Checkables to be checked when probing.
|
||||
Checks []Checkable
|
||||
|
||||
// Unhealthyhandler is called when one or more of the checks are unhealthy.
|
||||
// If not provided DefaultUnhealthyHandler is called.
|
||||
UnhealthyHandler UnhealthyHandler
|
||||
|
||||
// HealthyHandler is called when all checks are healthy.
|
||||
// If not provided, DefaultHealthyHandler is called.
|
||||
HealthyHandler http.HandlerFunc
|
||||
}
|
||||
|
||||
func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
unhealthyHandler := c.UnhealthyHandler
|
||||
if unhealthyHandler == nil {
|
||||
unhealthyHandler = DefaultUnhealthyHandler
|
||||
}
|
||||
|
||||
successHandler := c.HealthyHandler
|
||||
if successHandler == nil {
|
||||
successHandler = DefaultHealthyHandler
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
w.Header().Set("Allow", "GET")
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
if err := Check(c.Checks); err != nil {
|
||||
unhealthyHandler(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
successHandler(w, r)
|
||||
}
|
||||
|
||||
type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error)
|
||||
|
||||
type StatusResponse struct {
|
||||
Status string `json:"status"`
|
||||
Details *StatusResponseDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
type StatusResponseDetails struct {
|
||||
Code int `json:"code,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func Check(checks []Checkable) (err error) {
|
||||
errs := []error{}
|
||||
for _, c := range checks {
|
||||
if e := c.Healthy(); e != nil {
|
||||
errs = append(errs, e)
|
||||
}
|
||||
}
|
||||
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
err = nil
|
||||
case 1:
|
||||
err = errs[0]
|
||||
default:
|
||||
err = fmt.Errorf("multiple health check failure: %v", errs)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{
|
||||
Status: "ok",
|
||||
})
|
||||
if err != nil {
|
||||
// TODO(bobbyrullo): replace with logging from new logging pkg,
|
||||
// once it lands.
|
||||
log.Printf("Failed to write JSON response: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) {
|
||||
writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{
|
||||
Status: "error",
|
||||
Details: &StatusResponseDetails{
|
||||
Code: http.StatusInternalServerError,
|
||||
Message: err.Error(),
|
||||
},
|
||||
})
|
||||
if writeErr != nil {
|
||||
// TODO(bobbyrullo): replace with logging from new logging pkg,
|
||||
// once it lands.
|
||||
log.Printf("Failed to write JSON response: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported.
|
||||
func ExpvarHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
fmt.Fprintf(w, "{\n")
|
||||
first := true
|
||||
expvar.Do(func(kv expvar.KeyValue) {
|
||||
if !first {
|
||||
fmt.Fprintf(w, ",\n")
|
||||
}
|
||||
first = false
|
||||
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
|
||||
})
|
||||
fmt.Fprintf(w, "\n}\n")
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
httputil
|
||||
====
|
||||
|
||||
Common code for dealing with HTTP.
|
||||
|
||||
Includes:
|
||||
|
||||
* Code for returning JSON responses.
|
||||
|
||||
### Documentation
|
||||
|
||||
Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil)
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
package httputil
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DeleteCookies effectively deletes all named cookies
|
||||
// by wiping all data and setting to expire immediately.
|
||||
func DeleteCookies(w http.ResponseWriter, cookieNames ...string) {
|
||||
for _, n := range cookieNames {
|
||||
c := &http.Cookie{
|
||||
Name: n,
|
||||
Value: "",
|
||||
Path: "/",
|
||||
MaxAge: -1,
|
||||
Expires: time.Time{},
|
||||
}
|
||||
http.SetCookie(w, c)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package httputil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const (
|
||||
JSONContentType = "application/json"
|
||||
)
|
||||
|
||||
func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error {
|
||||
enc, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return err
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", JSONContentType)
|
||||
w.WriteHeader(code)
|
||||
|
||||
_, err = w.Write(enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
package timeutil
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func ExpBackoff(prev, max time.Duration) time.Duration {
|
||||
if prev == 0 {
|
||||
return time.Second
|
||||
}
|
||||
if prev > max/2 {
|
||||
return max
|
||||
}
|
||||
return 2 * prev
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
|
||||
*.swp
|
|
@ -0,0 +1,5 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
|
||||
sudo: false
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,61 @@
|
|||
clockwork
|
||||
=========
|
||||
|
||||
[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork)
|
||||
[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork)
|
||||
|
||||
a simple fake clock for golang
|
||||
|
||||
# Usage
|
||||
|
||||
Replace uses of the `time` package with the `clockwork.Clock` interface instead.
|
||||
|
||||
For example, instead of using `time.Sleep` directly:
|
||||
|
||||
```
|
||||
func my_func() {
|
||||
time.Sleep(3 * time.Second)
|
||||
do_something()
|
||||
}
|
||||
```
|
||||
|
||||
inject a clock and use its `Sleep` method instead:
|
||||
|
||||
```
|
||||
func my_func(clock clockwork.Clock) {
|
||||
clock.Sleep(3 * time.Second)
|
||||
do_something()
|
||||
}
|
||||
```
|
||||
|
||||
Now you can easily test `my_func` with a `FakeClock`:
|
||||
|
||||
```
|
||||
func TestMyFunc(t *testing.T) {
|
||||
c := clockwork.NewFakeClock()
|
||||
|
||||
// Start our sleepy function
|
||||
my_func(c)
|
||||
|
||||
// Ensure we wait until my_func is sleeping
|
||||
c.BlockUntil(1)
|
||||
|
||||
assert_state()
|
||||
|
||||
// Advance the FakeClock forward in time
|
||||
c.Advance(3)
|
||||
|
||||
assert_state()
|
||||
}
|
||||
```
|
||||
|
||||
and in production builds, simply inject the real clock instead:
|
||||
```
|
||||
my_func(clockwork.NewRealClock())
|
||||
```
|
||||
|
||||
See [example_test.go](example_test.go) for a full example.
|
||||
|
||||
# Credits
|
||||
|
||||
clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time)
|
|
@ -0,0 +1,169 @@
|
|||
package clockwork
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Clock provides an interface that packages can use instead of directly
|
||||
// using the time module, so that chronology-related behavior can be tested
|
||||
type Clock interface {
|
||||
After(d time.Duration) <-chan time.Time
|
||||
Sleep(d time.Duration)
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// FakeClock provides an interface for a clock which can be
|
||||
// manually advanced through time
|
||||
type FakeClock interface {
|
||||
Clock
|
||||
// Advance advances the FakeClock to a new point in time, ensuring any existing
|
||||
// sleepers are notified appropriately before returning
|
||||
Advance(d time.Duration)
|
||||
// BlockUntil will block until the FakeClock has the given number of
|
||||
// sleepers (callers of Sleep or After)
|
||||
BlockUntil(n int)
|
||||
}
|
||||
|
||||
// NewRealClock returns a Clock which simply delegates calls to the actual time
|
||||
// package; it should be used by packages in production.
|
||||
func NewRealClock() Clock {
|
||||
return &realClock{}
|
||||
}
|
||||
|
||||
// NewFakeClock returns a FakeClock implementation which can be
|
||||
// manually advanced through time for testing. The initial time of the
|
||||
// FakeClock will be an arbitrary non-zero time.
|
||||
func NewFakeClock() FakeClock {
|
||||
// use a fixture that does not fulfill Time.IsZero()
|
||||
return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC))
|
||||
}
|
||||
|
||||
// NewFakeClockAt returns a FakeClock initialised at the given time.Time.
|
||||
func NewFakeClockAt(t time.Time) FakeClock {
|
||||
return &fakeClock{
|
||||
time: t,
|
||||
}
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func (rc *realClock) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
}
|
||||
|
||||
func (rc *realClock) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
func (rc *realClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
type fakeClock struct {
|
||||
sleepers []*sleeper
|
||||
blockers []*blocker
|
||||
time time.Time
|
||||
|
||||
l sync.RWMutex
|
||||
}
|
||||
|
||||
// sleeper represents a caller of After or Sleep
|
||||
type sleeper struct {
|
||||
until time.Time
|
||||
done chan time.Time
|
||||
}
|
||||
|
||||
// blocker represents a caller of BlockUntil
|
||||
type blocker struct {
|
||||
count int
|
||||
ch chan struct{}
|
||||
}
|
||||
|
||||
// After mimics time.After; it waits for the given duration to elapse on the
|
||||
// fakeClock, then sends the current time on the returned channel.
|
||||
func (fc *fakeClock) After(d time.Duration) <-chan time.Time {
|
||||
fc.l.Lock()
|
||||
defer fc.l.Unlock()
|
||||
now := fc.time
|
||||
done := make(chan time.Time, 1)
|
||||
if d.Nanoseconds() == 0 {
|
||||
// special case - trigger immediately
|
||||
done <- now
|
||||
} else {
|
||||
// otherwise, add to the set of sleepers
|
||||
s := &sleeper{
|
||||
until: now.Add(d),
|
||||
done: done,
|
||||
}
|
||||
fc.sleepers = append(fc.sleepers, s)
|
||||
// and notify any blockers
|
||||
fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
|
||||
}
|
||||
return done
|
||||
}
|
||||
|
||||
// notifyBlockers notifies all the blockers waiting until the
|
||||
// given number of sleepers are waiting on the fakeClock. It
|
||||
// returns an updated slice of blockers (i.e. those still waiting)
|
||||
func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) {
|
||||
for _, b := range blockers {
|
||||
if b.count == count {
|
||||
close(b.ch)
|
||||
} else {
|
||||
newBlockers = append(newBlockers, b)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Sleep blocks until the given duration has passed on the fakeClock
|
||||
func (fc *fakeClock) Sleep(d time.Duration) {
|
||||
<-fc.After(d)
|
||||
}
|
||||
|
||||
// Time returns the current time of the fakeClock
|
||||
func (fc *fakeClock) Now() time.Time {
|
||||
fc.l.RLock()
|
||||
t := fc.time
|
||||
fc.l.RUnlock()
|
||||
return t
|
||||
}
|
||||
|
||||
// Advance advances fakeClock to a new point in time, ensuring channels from any
|
||||
// previous invocations of After are notified appropriately before returning
|
||||
func (fc *fakeClock) Advance(d time.Duration) {
|
||||
fc.l.Lock()
|
||||
defer fc.l.Unlock()
|
||||
end := fc.time.Add(d)
|
||||
var newSleepers []*sleeper
|
||||
for _, s := range fc.sleepers {
|
||||
if end.Sub(s.until) >= 0 {
|
||||
s.done <- end
|
||||
} else {
|
||||
newSleepers = append(newSleepers, s)
|
||||
}
|
||||
}
|
||||
fc.sleepers = newSleepers
|
||||
fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
|
||||
fc.time = end
|
||||
}
|
||||
|
||||
// BlockUntil will block until the fakeClock has the given number of sleepers
|
||||
// (callers of Sleep or After)
|
||||
func (fc *fakeClock) BlockUntil(n int) {
|
||||
fc.l.Lock()
|
||||
// Fast path: current number of sleepers is what we're looking for
|
||||
if len(fc.sleepers) == n {
|
||||
fc.l.Unlock()
|
||||
return
|
||||
}
|
||||
// Otherwise, set up a new blocker
|
||||
b := &blocker{
|
||||
count: n,
|
||||
ch: make(chan struct{}),
|
||||
}
|
||||
fc.blockers = append(fc.blockers, b)
|
||||
fc.l.Unlock()
|
||||
<-b.ch
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
#define REDMASK51 0x0007FFFFFFFFFFFF
|
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// These constants cannot be encoded in non-MOVQ immediates.
|
||||
// We access them directly from memory instead.
|
||||
|
||||
DATA ·_121666_213(SB)/8, $996687872
|
||||
GLOBL ·_121666_213(SB), 8, $8
|
||||
|
||||
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
|
||||
GLOBL ·_2P0(SB), 8, $8
|
||||
|
||||
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
|
||||
GLOBL ·_2P1234(SB), 8, $8
|
|
@ -0,0 +1,65 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// func cswap(inout *[4][5]uint64, v uint64)
|
||||
TEXT ·cswap(SB),7,$0
|
||||
MOVQ inout+0(FP),DI
|
||||
MOVQ v+8(FP),SI
|
||||
|
||||
SUBQ $1, SI
|
||||
NOTQ SI
|
||||
MOVQ SI, X15
|
||||
PSHUFD $0x44, X15, X15
|
||||
|
||||
MOVOU 0(DI), X0
|
||||
MOVOU 16(DI), X2
|
||||
MOVOU 32(DI), X4
|
||||
MOVOU 48(DI), X6
|
||||
MOVOU 64(DI), X8
|
||||
MOVOU 80(DI), X1
|
||||
MOVOU 96(DI), X3
|
||||
MOVOU 112(DI), X5
|
||||
MOVOU 128(DI), X7
|
||||
MOVOU 144(DI), X9
|
||||
|
||||
MOVO X1, X10
|
||||
MOVO X3, X11
|
||||
MOVO X5, X12
|
||||
MOVO X7, X13
|
||||
MOVO X9, X14
|
||||
|
||||
PXOR X0, X10
|
||||
PXOR X2, X11
|
||||
PXOR X4, X12
|
||||
PXOR X6, X13
|
||||
PXOR X8, X14
|
||||
PAND X15, X10
|
||||
PAND X15, X11
|
||||
PAND X15, X12
|
||||
PAND X15, X13
|
||||
PAND X15, X14
|
||||
PXOR X10, X0
|
||||
PXOR X10, X1
|
||||
PXOR X11, X2
|
||||
PXOR X11, X3
|
||||
PXOR X12, X4
|
||||
PXOR X12, X5
|
||||
PXOR X13, X6
|
||||
PXOR X13, X7
|
||||
PXOR X14, X8
|
||||
PXOR X14, X9
|
||||
|
||||
MOVOU X0, 0(DI)
|
||||
MOVOU X2, 16(DI)
|
||||
MOVOU X4, 32(DI)
|
||||
MOVOU X6, 48(DI)
|
||||
MOVOU X8, 64(DI)
|
||||
MOVOU X1, 80(DI)
|
||||
MOVOU X3, 96(DI)
|
||||
MOVOU X5, 112(DI)
|
||||
MOVOU X7, 128(DI)
|
||||
MOVOU X9, 144(DI)
|
||||
RET
|
|
@ -0,0 +1,834 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// We have an implementation in amd64 assembly so this code is only run on
|
||||
// non-amd64 platforms. The amd64 assembly does not support gccgo.
|
||||
// +build !amd64 gccgo appengine
|
||||
|
||||
package curve25519
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// This code is a port of the public domain, "ref10" implementation of
|
||||
// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
|
||||
|
||||
// fieldElement represents an element of the field GF(2^255 - 19). An element
|
||||
// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
|
||||
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
|
||||
// context.
|
||||
type fieldElement [10]int32
|
||||
|
||||
func feZero(fe *fieldElement) {
|
||||
for i := range fe {
|
||||
fe[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func feOne(fe *fieldElement) {
|
||||
feZero(fe)
|
||||
fe[0] = 1
|
||||
}
|
||||
|
||||
func feAdd(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] + b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func feSub(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] - b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func feCopy(dst, src *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = src[i]
|
||||
}
|
||||
}
|
||||
|
||||
// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
|
||||
//
|
||||
// Preconditions: b in {0,1}.
|
||||
func feCSwap(f, g *fieldElement, b int32) {
|
||||
b = -b
|
||||
for i := range f {
|
||||
t := b & (f[i] ^ g[i])
|
||||
f[i] ^= t
|
||||
g[i] ^= t
|
||||
}
|
||||
}
|
||||
|
||||
// load3 reads a 24-bit, little-endian value from in.
|
||||
func load3(in []byte) int64 {
|
||||
var r int64
|
||||
r = int64(in[0])
|
||||
r |= int64(in[1]) << 8
|
||||
r |= int64(in[2]) << 16
|
||||
return r
|
||||
}
|
||||
|
||||
// load4 reads a 32-bit, little-endian value from in.
|
||||
func load4(in []byte) int64 {
|
||||
return int64(binary.LittleEndian.Uint32(in))
|
||||
}
|
||||
|
||||
func feFromBytes(dst *fieldElement, src *[32]byte) {
|
||||
h0 := load4(src[:])
|
||||
h1 := load3(src[4:]) << 6
|
||||
h2 := load3(src[7:]) << 5
|
||||
h3 := load3(src[10:]) << 3
|
||||
h4 := load3(src[13:]) << 2
|
||||
h5 := load4(src[16:])
|
||||
h6 := load3(src[20:]) << 7
|
||||
h7 := load3(src[23:]) << 5
|
||||
h8 := load3(src[26:]) << 4
|
||||
h9 := load3(src[29:]) << 2
|
||||
|
||||
var carry [10]int64
|
||||
carry[9] = (h9 + 1<<24) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + 1<<24) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + 1<<24) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + 1<<24) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + 1<<24) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + 1<<25) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + 1<<25) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + 1<<25) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + 1<<25) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + 1<<25) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
dst[0] = int32(h0)
|
||||
dst[1] = int32(h1)
|
||||
dst[2] = int32(h2)
|
||||
dst[3] = int32(h3)
|
||||
dst[4] = int32(h4)
|
||||
dst[5] = int32(h5)
|
||||
dst[6] = int32(h6)
|
||||
dst[7] = int32(h7)
|
||||
dst[8] = int32(h8)
|
||||
dst[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feToBytes marshals h to s.
|
||||
// Preconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Write p=2^255-19; q=floor(h/p).
|
||||
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
|
||||
//
|
||||
// Proof:
|
||||
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
|
||||
// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
|
||||
//
|
||||
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
|
||||
// Then 0<y<1.
|
||||
//
|
||||
// Write r=h-pq.
|
||||
// Have 0<=r<=p-1=2^255-20.
|
||||
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
|
||||
//
|
||||
// Write x=r+19(2^-255)r+y.
|
||||
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
|
||||
//
|
||||
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
|
||||
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
|
||||
func feToBytes(s *[32]byte, h *fieldElement) {
|
||||
var carry [10]int32
|
||||
|
||||
q := (19*h[9] + (1 << 24)) >> 25
|
||||
q = (h[0] + q) >> 26
|
||||
q = (h[1] + q) >> 25
|
||||
q = (h[2] + q) >> 26
|
||||
q = (h[3] + q) >> 25
|
||||
q = (h[4] + q) >> 26
|
||||
q = (h[5] + q) >> 25
|
||||
q = (h[6] + q) >> 26
|
||||
q = (h[7] + q) >> 25
|
||||
q = (h[8] + q) >> 26
|
||||
q = (h[9] + q) >> 25
|
||||
|
||||
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
|
||||
h[0] += 19 * q
|
||||
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
|
||||
|
||||
carry[0] = h[0] >> 26
|
||||
h[1] += carry[0]
|
||||
h[0] -= carry[0] << 26
|
||||
carry[1] = h[1] >> 25
|
||||
h[2] += carry[1]
|
||||
h[1] -= carry[1] << 25
|
||||
carry[2] = h[2] >> 26
|
||||
h[3] += carry[2]
|
||||
h[2] -= carry[2] << 26
|
||||
carry[3] = h[3] >> 25
|
||||
h[4] += carry[3]
|
||||
h[3] -= carry[3] << 25
|
||||
carry[4] = h[4] >> 26
|
||||
h[5] += carry[4]
|
||||
h[4] -= carry[4] << 26
|
||||
carry[5] = h[5] >> 25
|
||||
h[6] += carry[5]
|
||||
h[5] -= carry[5] << 25
|
||||
carry[6] = h[6] >> 26
|
||||
h[7] += carry[6]
|
||||
h[6] -= carry[6] << 26
|
||||
carry[7] = h[7] >> 25
|
||||
h[8] += carry[7]
|
||||
h[7] -= carry[7] << 25
|
||||
carry[8] = h[8] >> 26
|
||||
h[9] += carry[8]
|
||||
h[8] -= carry[8] << 26
|
||||
carry[9] = h[9] >> 25
|
||||
h[9] -= carry[9] << 25
|
||||
// h10 = carry9
|
||||
|
||||
// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
|
||||
// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
|
||||
// evidently 2^255 h10-2^255 q = 0.
|
||||
// Goal: Output h[0]+...+2^230 h[9].
|
||||
|
||||
s[0] = byte(h[0] >> 0)
|
||||
s[1] = byte(h[0] >> 8)
|
||||
s[2] = byte(h[0] >> 16)
|
||||
s[3] = byte((h[0] >> 24) | (h[1] << 2))
|
||||
s[4] = byte(h[1] >> 6)
|
||||
s[5] = byte(h[1] >> 14)
|
||||
s[6] = byte((h[1] >> 22) | (h[2] << 3))
|
||||
s[7] = byte(h[2] >> 5)
|
||||
s[8] = byte(h[2] >> 13)
|
||||
s[9] = byte((h[2] >> 21) | (h[3] << 5))
|
||||
s[10] = byte(h[3] >> 3)
|
||||
s[11] = byte(h[3] >> 11)
|
||||
s[12] = byte((h[3] >> 19) | (h[4] << 6))
|
||||
s[13] = byte(h[4] >> 2)
|
||||
s[14] = byte(h[4] >> 10)
|
||||
s[15] = byte(h[4] >> 18)
|
||||
s[16] = byte(h[5] >> 0)
|
||||
s[17] = byte(h[5] >> 8)
|
||||
s[18] = byte(h[5] >> 16)
|
||||
s[19] = byte((h[5] >> 24) | (h[6] << 1))
|
||||
s[20] = byte(h[6] >> 7)
|
||||
s[21] = byte(h[6] >> 15)
|
||||
s[22] = byte((h[6] >> 23) | (h[7] << 3))
|
||||
s[23] = byte(h[7] >> 5)
|
||||
s[24] = byte(h[7] >> 13)
|
||||
s[25] = byte((h[7] >> 21) | (h[8] << 4))
|
||||
s[26] = byte(h[8] >> 4)
|
||||
s[27] = byte(h[8] >> 12)
|
||||
s[28] = byte((h[8] >> 20) | (h[9] << 6))
|
||||
s[29] = byte(h[9] >> 2)
|
||||
s[30] = byte(h[9] >> 10)
|
||||
s[31] = byte(h[9] >> 18)
|
||||
}
|
||||
|
||||
// feMul calculates h = f * g
|
||||
// Can overlap h with f or g.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Notes on implementation strategy:
|
||||
//
|
||||
// Using schoolbook multiplication.
|
||||
// Karatsuba would save a little in some cost models.
|
||||
//
|
||||
// Most multiplications by 2 and 19 are 32-bit precomputations;
|
||||
// cheaper than 64-bit postcomputations.
|
||||
//
|
||||
// There is one remaining multiplication by 19 in the carry chain;
|
||||
// one *19 precomputation can be merged into this,
|
||||
// but the resulting data flow is considerably less clean.
|
||||
//
|
||||
// There are 12 carries below.
|
||||
// 10 of them are 2-way parallelizable and vectorizable.
|
||||
// Can get away with 11 carries, but then data flow is much deeper.
|
||||
//
|
||||
// With tighter constraints on inputs can squeeze carries into int32.
|
||||
func feMul(h, f, g *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
g0 := g[0]
|
||||
g1 := g[1]
|
||||
g2 := g[2]
|
||||
g3 := g[3]
|
||||
g4 := g[4]
|
||||
g5 := g[5]
|
||||
g6 := g[6]
|
||||
g7 := g[7]
|
||||
g8 := g[8]
|
||||
g9 := g[9]
|
||||
g1_19 := 19 * g1 // 1.4*2^29
|
||||
g2_19 := 19 * g2 // 1.4*2^30; still ok
|
||||
g3_19 := 19 * g3
|
||||
g4_19 := 19 * g4
|
||||
g5_19 := 19 * g5
|
||||
g6_19 := 19 * g6
|
||||
g7_19 := 19 * g7
|
||||
g8_19 := 19 * g8
|
||||
g9_19 := 19 * g9
|
||||
f1_2 := 2 * f1
|
||||
f3_2 := 2 * f3
|
||||
f5_2 := 2 * f5
|
||||
f7_2 := 2 * f7
|
||||
f9_2 := 2 * f9
|
||||
f0g0 := int64(f0) * int64(g0)
|
||||
f0g1 := int64(f0) * int64(g1)
|
||||
f0g2 := int64(f0) * int64(g2)
|
||||
f0g3 := int64(f0) * int64(g3)
|
||||
f0g4 := int64(f0) * int64(g4)
|
||||
f0g5 := int64(f0) * int64(g5)
|
||||
f0g6 := int64(f0) * int64(g6)
|
||||
f0g7 := int64(f0) * int64(g7)
|
||||
f0g8 := int64(f0) * int64(g8)
|
||||
f0g9 := int64(f0) * int64(g9)
|
||||
f1g0 := int64(f1) * int64(g0)
|
||||
f1g1_2 := int64(f1_2) * int64(g1)
|
||||
f1g2 := int64(f1) * int64(g2)
|
||||
f1g3_2 := int64(f1_2) * int64(g3)
|
||||
f1g4 := int64(f1) * int64(g4)
|
||||
f1g5_2 := int64(f1_2) * int64(g5)
|
||||
f1g6 := int64(f1) * int64(g6)
|
||||
f1g7_2 := int64(f1_2) * int64(g7)
|
||||
f1g8 := int64(f1) * int64(g8)
|
||||
f1g9_38 := int64(f1_2) * int64(g9_19)
|
||||
f2g0 := int64(f2) * int64(g0)
|
||||
f2g1 := int64(f2) * int64(g1)
|
||||
f2g2 := int64(f2) * int64(g2)
|
||||
f2g3 := int64(f2) * int64(g3)
|
||||
f2g4 := int64(f2) * int64(g4)
|
||||
f2g5 := int64(f2) * int64(g5)
|
||||
f2g6 := int64(f2) * int64(g6)
|
||||
f2g7 := int64(f2) * int64(g7)
|
||||
f2g8_19 := int64(f2) * int64(g8_19)
|
||||
f2g9_19 := int64(f2) * int64(g9_19)
|
||||
f3g0 := int64(f3) * int64(g0)
|
||||
f3g1_2 := int64(f3_2) * int64(g1)
|
||||
f3g2 := int64(f3) * int64(g2)
|
||||
f3g3_2 := int64(f3_2) * int64(g3)
|
||||
f3g4 := int64(f3) * int64(g4)
|
||||
f3g5_2 := int64(f3_2) * int64(g5)
|
||||
f3g6 := int64(f3) * int64(g6)
|
||||
f3g7_38 := int64(f3_2) * int64(g7_19)
|
||||
f3g8_19 := int64(f3) * int64(g8_19)
|
||||
f3g9_38 := int64(f3_2) * int64(g9_19)
|
||||
f4g0 := int64(f4) * int64(g0)
|
||||
f4g1 := int64(f4) * int64(g1)
|
||||
f4g2 := int64(f4) * int64(g2)
|
||||
f4g3 := int64(f4) * int64(g3)
|
||||
f4g4 := int64(f4) * int64(g4)
|
||||
f4g5 := int64(f4) * int64(g5)
|
||||
f4g6_19 := int64(f4) * int64(g6_19)
|
||||
f4g7_19 := int64(f4) * int64(g7_19)
|
||||
f4g8_19 := int64(f4) * int64(g8_19)
|
||||
f4g9_19 := int64(f4) * int64(g9_19)
|
||||
f5g0 := int64(f5) * int64(g0)
|
||||
f5g1_2 := int64(f5_2) * int64(g1)
|
||||
f5g2 := int64(f5) * int64(g2)
|
||||
f5g3_2 := int64(f5_2) * int64(g3)
|
||||
f5g4 := int64(f5) * int64(g4)
|
||||
f5g5_38 := int64(f5_2) * int64(g5_19)
|
||||
f5g6_19 := int64(f5) * int64(g6_19)
|
||||
f5g7_38 := int64(f5_2) * int64(g7_19)
|
||||
f5g8_19 := int64(f5) * int64(g8_19)
|
||||
f5g9_38 := int64(f5_2) * int64(g9_19)
|
||||
f6g0 := int64(f6) * int64(g0)
|
||||
f6g1 := int64(f6) * int64(g1)
|
||||
f6g2 := int64(f6) * int64(g2)
|
||||
f6g3 := int64(f6) * int64(g3)
|
||||
f6g4_19 := int64(f6) * int64(g4_19)
|
||||
f6g5_19 := int64(f6) * int64(g5_19)
|
||||
f6g6_19 := int64(f6) * int64(g6_19)
|
||||
f6g7_19 := int64(f6) * int64(g7_19)
|
||||
f6g8_19 := int64(f6) * int64(g8_19)
|
||||
f6g9_19 := int64(f6) * int64(g9_19)
|
||||
f7g0 := int64(f7) * int64(g0)
|
||||
f7g1_2 := int64(f7_2) * int64(g1)
|
||||
f7g2 := int64(f7) * int64(g2)
|
||||
f7g3_38 := int64(f7_2) * int64(g3_19)
|
||||
f7g4_19 := int64(f7) * int64(g4_19)
|
||||
f7g5_38 := int64(f7_2) * int64(g5_19)
|
||||
f7g6_19 := int64(f7) * int64(g6_19)
|
||||
f7g7_38 := int64(f7_2) * int64(g7_19)
|
||||
f7g8_19 := int64(f7) * int64(g8_19)
|
||||
f7g9_38 := int64(f7_2) * int64(g9_19)
|
||||
f8g0 := int64(f8) * int64(g0)
|
||||
f8g1 := int64(f8) * int64(g1)
|
||||
f8g2_19 := int64(f8) * int64(g2_19)
|
||||
f8g3_19 := int64(f8) * int64(g3_19)
|
||||
f8g4_19 := int64(f8) * int64(g4_19)
|
||||
f8g5_19 := int64(f8) * int64(g5_19)
|
||||
f8g6_19 := int64(f8) * int64(g6_19)
|
||||
f8g7_19 := int64(f8) * int64(g7_19)
|
||||
f8g8_19 := int64(f8) * int64(g8_19)
|
||||
f8g9_19 := int64(f8) * int64(g9_19)
|
||||
f9g0 := int64(f9) * int64(g0)
|
||||
f9g1_38 := int64(f9_2) * int64(g1_19)
|
||||
f9g2_19 := int64(f9) * int64(g2_19)
|
||||
f9g3_38 := int64(f9_2) * int64(g3_19)
|
||||
f9g4_19 := int64(f9) * int64(g4_19)
|
||||
f9g5_38 := int64(f9_2) * int64(g5_19)
|
||||
f9g6_19 := int64(f9) * int64(g6_19)
|
||||
f9g7_38 := int64(f9_2) * int64(g7_19)
|
||||
f9g8_19 := int64(f9) * int64(g8_19)
|
||||
f9g9_38 := int64(f9_2) * int64(g9_19)
|
||||
h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
|
||||
h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
|
||||
h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
|
||||
h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
|
||||
h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
|
||||
h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
|
||||
h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
|
||||
h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
|
||||
h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
|
||||
h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
|
||||
var carry [10]int64
|
||||
|
||||
// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
|
||||
// i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
|
||||
// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
|
||||
// i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
// |h0| <= 2^25
|
||||
// |h4| <= 2^25
|
||||
// |h1| <= 1.51*2^58
|
||||
// |h5| <= 1.51*2^58
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
// |h1| <= 2^24; from now on fits into int32
|
||||
// |h5| <= 2^24; from now on fits into int32
|
||||
// |h2| <= 1.21*2^59
|
||||
// |h6| <= 1.21*2^59
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
// |h2| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h6| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h3| <= 1.51*2^58
|
||||
// |h7| <= 1.51*2^58
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
// |h3| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h7| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h4| <= 1.52*2^33
|
||||
// |h8| <= 1.52*2^33
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
// |h4| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h8| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h5| <= 1.01*2^24
|
||||
// |h9| <= 1.51*2^58
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
// |h9| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h0| <= 1.8*2^37
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
// |h0| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h1| <= 1.01*2^24
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feSquare calculates h = f*f. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feSquare(h, f *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
f0_2 := 2 * f0
|
||||
f1_2 := 2 * f1
|
||||
f2_2 := 2 * f2
|
||||
f3_2 := 2 * f3
|
||||
f4_2 := 2 * f4
|
||||
f5_2 := 2 * f5
|
||||
f6_2 := 2 * f6
|
||||
f7_2 := 2 * f7
|
||||
f5_38 := 38 * f5 // 1.31*2^30
|
||||
f6_19 := 19 * f6 // 1.31*2^30
|
||||
f7_38 := 38 * f7 // 1.31*2^30
|
||||
f8_19 := 19 * f8 // 1.31*2^30
|
||||
f9_38 := 38 * f9 // 1.31*2^30
|
||||
f0f0 := int64(f0) * int64(f0)
|
||||
f0f1_2 := int64(f0_2) * int64(f1)
|
||||
f0f2_2 := int64(f0_2) * int64(f2)
|
||||
f0f3_2 := int64(f0_2) * int64(f3)
|
||||
f0f4_2 := int64(f0_2) * int64(f4)
|
||||
f0f5_2 := int64(f0_2) * int64(f5)
|
||||
f0f6_2 := int64(f0_2) * int64(f6)
|
||||
f0f7_2 := int64(f0_2) * int64(f7)
|
||||
f0f8_2 := int64(f0_2) * int64(f8)
|
||||
f0f9_2 := int64(f0_2) * int64(f9)
|
||||
f1f1_2 := int64(f1_2) * int64(f1)
|
||||
f1f2_2 := int64(f1_2) * int64(f2)
|
||||
f1f3_4 := int64(f1_2) * int64(f3_2)
|
||||
f1f4_2 := int64(f1_2) * int64(f4)
|
||||
f1f5_4 := int64(f1_2) * int64(f5_2)
|
||||
f1f6_2 := int64(f1_2) * int64(f6)
|
||||
f1f7_4 := int64(f1_2) * int64(f7_2)
|
||||
f1f8_2 := int64(f1_2) * int64(f8)
|
||||
f1f9_76 := int64(f1_2) * int64(f9_38)
|
||||
f2f2 := int64(f2) * int64(f2)
|
||||
f2f3_2 := int64(f2_2) * int64(f3)
|
||||
f2f4_2 := int64(f2_2) * int64(f4)
|
||||
f2f5_2 := int64(f2_2) * int64(f5)
|
||||
f2f6_2 := int64(f2_2) * int64(f6)
|
||||
f2f7_2 := int64(f2_2) * int64(f7)
|
||||
f2f8_38 := int64(f2_2) * int64(f8_19)
|
||||
f2f9_38 := int64(f2) * int64(f9_38)
|
||||
f3f3_2 := int64(f3_2) * int64(f3)
|
||||
f3f4_2 := int64(f3_2) * int64(f4)
|
||||
f3f5_4 := int64(f3_2) * int64(f5_2)
|
||||
f3f6_2 := int64(f3_2) * int64(f6)
|
||||
f3f7_76 := int64(f3_2) * int64(f7_38)
|
||||
f3f8_38 := int64(f3_2) * int64(f8_19)
|
||||
f3f9_76 := int64(f3_2) * int64(f9_38)
|
||||
f4f4 := int64(f4) * int64(f4)
|
||||
f4f5_2 := int64(f4_2) * int64(f5)
|
||||
f4f6_38 := int64(f4_2) * int64(f6_19)
|
||||
f4f7_38 := int64(f4) * int64(f7_38)
|
||||
f4f8_38 := int64(f4_2) * int64(f8_19)
|
||||
f4f9_38 := int64(f4) * int64(f9_38)
|
||||
f5f5_38 := int64(f5) * int64(f5_38)
|
||||
f5f6_38 := int64(f5_2) * int64(f6_19)
|
||||
f5f7_76 := int64(f5_2) * int64(f7_38)
|
||||
f5f8_38 := int64(f5_2) * int64(f8_19)
|
||||
f5f9_76 := int64(f5_2) * int64(f9_38)
|
||||
f6f6_19 := int64(f6) * int64(f6_19)
|
||||
f6f7_38 := int64(f6) * int64(f7_38)
|
||||
f6f8_38 := int64(f6_2) * int64(f8_19)
|
||||
f6f9_38 := int64(f6) * int64(f9_38)
|
||||
f7f7_38 := int64(f7) * int64(f7_38)
|
||||
f7f8_38 := int64(f7_2) * int64(f8_19)
|
||||
f7f9_76 := int64(f7_2) * int64(f9_38)
|
||||
f8f8_19 := int64(f8) * int64(f8_19)
|
||||
f8f9_38 := int64(f8) * int64(f9_38)
|
||||
f9f9_38 := int64(f9) * int64(f9_38)
|
||||
h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
|
||||
h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
|
||||
h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
|
||||
h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
|
||||
h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
|
||||
h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
|
||||
h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
|
||||
h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
|
||||
h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
|
||||
h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
|
||||
var carry [10]int64
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feMul121666 calculates h = f * 121666. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feMul121666(h, f *fieldElement) {
|
||||
h0 := int64(f[0]) * 121666
|
||||
h1 := int64(f[1]) * 121666
|
||||
h2 := int64(f[2]) * 121666
|
||||
h3 := int64(f[3]) * 121666
|
||||
h4 := int64(f[4]) * 121666
|
||||
h5 := int64(f[5]) * 121666
|
||||
h6 := int64(f[6]) * 121666
|
||||
h7 := int64(f[7]) * 121666
|
||||
h8 := int64(f[8]) * 121666
|
||||
h9 := int64(f[9]) * 121666
|
||||
var carry [10]int64
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feInvert sets out = z^-1.
|
||||
func feInvert(out, z *fieldElement) {
|
||||
var t0, t1, t2, t3 fieldElement
|
||||
var i int
|
||||
|
||||
feSquare(&t0, z)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t0, &t0)
|
||||
}
|
||||
feSquare(&t1, &t0)
|
||||
for i = 1; i < 2; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(&t1, z, &t1)
|
||||
feMul(&t0, &t0, &t1)
|
||||
feSquare(&t2, &t0)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t1, &t2)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 20; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 100; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t1, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(out, &t1, &t0)
|
||||
}
|
||||
|
||||
func scalarMult(out, in, base *[32]byte) {
|
||||
var e [32]byte
|
||||
|
||||
copy(e[:], in[:])
|
||||
e[0] &= 248
|
||||
e[31] &= 127
|
||||
e[31] |= 64
|
||||
|
||||
var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
|
||||
feFromBytes(&x1, base)
|
||||
feOne(&x2)
|
||||
feCopy(&x3, &x1)
|
||||
feOne(&z3)
|
||||
|
||||
swap := int32(0)
|
||||
for pos := 254; pos >= 0; pos-- {
|
||||
b := e[pos/8] >> uint(pos&7)
|
||||
b &= 1
|
||||
swap ^= int32(b)
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
swap = int32(b)
|
||||
|
||||
feSub(&tmp0, &x3, &z3)
|
||||
feSub(&tmp1, &x2, &z2)
|
||||
feAdd(&x2, &x2, &z2)
|
||||
feAdd(&z2, &x3, &z3)
|
||||
feMul(&z3, &tmp0, &x2)
|
||||
feMul(&z2, &z2, &tmp1)
|
||||
feSquare(&tmp0, &tmp1)
|
||||
feSquare(&tmp1, &x2)
|
||||
feAdd(&x3, &z3, &z2)
|
||||
feSub(&z2, &z3, &z2)
|
||||
feMul(&x2, &tmp1, &tmp0)
|
||||
feSub(&tmp1, &tmp1, &tmp0)
|
||||
feSquare(&z2, &z2)
|
||||
feMul121666(&z3, &tmp1)
|
||||
feSquare(&x3, &x3)
|
||||
feAdd(&tmp0, &tmp0, &z3)
|
||||
feMul(&z3, &x1, &z2)
|
||||
feMul(&z2, &tmp1, &tmp0)
|
||||
}
|
||||
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
|
||||
feInvert(&z2, &z2)
|
||||
feMul(&x2, &x2, &z2)
|
||||
feToBytes(out, &x2)
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package curve25519 provides an implementation of scalar multiplication on
|
||||
// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html
|
||||
package curve25519 // import "golang.org/x/crypto/curve25519"
|
||||
|
||||
// basePoint is the x coordinate of the generator of the curve.
|
||||
var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
// ScalarMult sets dst to the product in*base where dst and base are the x
|
||||
// coordinates of group points and all values are in little-endian form.
|
||||
func ScalarMult(dst, in, base *[32]byte) {
|
||||
scalarMult(dst, in, base)
|
||||
}
|
||||
|
||||
// ScalarBaseMult sets dst to the product in*base where dst and base are the x
|
||||
// coordinates of group points, base is the standard generator and all values
|
||||
// are in little-endian form.
|
||||
func ScalarBaseMult(dst, in *[32]byte) {
|
||||
ScalarMult(dst, in, &basePoint)
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "const_amd64.h"
|
||||
|
||||
// func freeze(inout *[5]uint64)
|
||||
TEXT ·freeze(SB),7,$0-8
|
||||
MOVQ inout+0(FP), DI
|
||||
|
||||
MOVQ 0(DI),SI
|
||||
MOVQ 8(DI),DX
|
||||
MOVQ 16(DI),CX
|
||||
MOVQ 24(DI),R8
|
||||
MOVQ 32(DI),R9
|
||||
MOVQ $REDMASK51,AX
|
||||
MOVQ AX,R10
|
||||
SUBQ $18,R10
|
||||
MOVQ $3,R11
|
||||
REDUCELOOP:
|
||||
MOVQ SI,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,SI
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,DX
|
||||
ADDQ R12,CX
|
||||
MOVQ CX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,CX
|
||||
ADDQ R12,R8
|
||||
MOVQ R8,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R8
|
||||
ADDQ R12,R9
|
||||
MOVQ R9,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R9
|
||||
IMUL3Q $19,R12,R12
|
||||
ADDQ R12,SI
|
||||
SUBQ $1,R11
|
||||
JA REDUCELOOP
|
||||
MOVQ $1,R12
|
||||
CMPQ R10,SI
|
||||
CMOVQLT R11,R12
|
||||
CMPQ AX,DX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,CX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R8
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R9
|
||||
CMOVQNE R11,R12
|
||||
NEGQ R12
|
||||
ANDQ R12,AX
|
||||
ANDQ R12,R10
|
||||
SUBQ R10,SI
|
||||
SUBQ AX,DX
|
||||
SUBQ AX,CX
|
||||
SUBQ AX,R8
|
||||
SUBQ AX,R9
|
||||
MOVQ SI,0(DI)
|
||||
MOVQ DX,8(DI)
|
||||
MOVQ CX,16(DI)
|
||||
MOVQ R8,24(DI)
|
||||
MOVQ R9,32(DI)
|
||||
RET
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,240 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
package curve25519
|
||||
|
||||
// These functions are implemented in the .s files. The names of the functions
|
||||
// in the rest of the file are also taken from the SUPERCOP sources to help
|
||||
// people following along.
|
||||
|
||||
//go:noescape
|
||||
|
||||
func cswap(inout *[5]uint64, v uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func ladderstep(inout *[5][5]uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func freeze(inout *[5]uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func mul(dest, a, b *[5]uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func square(out, in *[5]uint64)
|
||||
|
||||
// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
|
||||
func mladder(xr, zr *[5]uint64, s *[32]byte) {
|
||||
var work [5][5]uint64
|
||||
|
||||
work[0] = *xr
|
||||
setint(&work[1], 1)
|
||||
setint(&work[2], 0)
|
||||
work[3] = *xr
|
||||
setint(&work[4], 1)
|
||||
|
||||
j := uint(6)
|
||||
var prevbit byte
|
||||
|
||||
for i := 31; i >= 0; i-- {
|
||||
for j < 8 {
|
||||
bit := ((*s)[i] >> j) & 1
|
||||
swap := bit ^ prevbit
|
||||
prevbit = bit
|
||||
cswap(&work[1], uint64(swap))
|
||||
ladderstep(&work)
|
||||
j--
|
||||
}
|
||||
j = 7
|
||||
}
|
||||
|
||||
*xr = work[1]
|
||||
*zr = work[2]
|
||||
}
|
||||
|
||||
func scalarMult(out, in, base *[32]byte) {
|
||||
var e [32]byte
|
||||
copy(e[:], (*in)[:])
|
||||
e[0] &= 248
|
||||
e[31] &= 127
|
||||
e[31] |= 64
|
||||
|
||||
var t, z [5]uint64
|
||||
unpack(&t, base)
|
||||
mladder(&t, &z, &e)
|
||||
invert(&z, &z)
|
||||
mul(&t, &t, &z)
|
||||
pack(out, &t)
|
||||
}
|
||||
|
||||
func setint(r *[5]uint64, v uint64) {
|
||||
r[0] = v
|
||||
r[1] = 0
|
||||
r[2] = 0
|
||||
r[3] = 0
|
||||
r[4] = 0
|
||||
}
|
||||
|
||||
// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
|
||||
// order.
|
||||
func unpack(r *[5]uint64, x *[32]byte) {
|
||||
r[0] = uint64(x[0]) |
|
||||
uint64(x[1])<<8 |
|
||||
uint64(x[2])<<16 |
|
||||
uint64(x[3])<<24 |
|
||||
uint64(x[4])<<32 |
|
||||
uint64(x[5])<<40 |
|
||||
uint64(x[6]&7)<<48
|
||||
|
||||
r[1] = uint64(x[6])>>3 |
|
||||
uint64(x[7])<<5 |
|
||||
uint64(x[8])<<13 |
|
||||
uint64(x[9])<<21 |
|
||||
uint64(x[10])<<29 |
|
||||
uint64(x[11])<<37 |
|
||||
uint64(x[12]&63)<<45
|
||||
|
||||
r[2] = uint64(x[12])>>6 |
|
||||
uint64(x[13])<<2 |
|
||||
uint64(x[14])<<10 |
|
||||
uint64(x[15])<<18 |
|
||||
uint64(x[16])<<26 |
|
||||
uint64(x[17])<<34 |
|
||||
uint64(x[18])<<42 |
|
||||
uint64(x[19]&1)<<50
|
||||
|
||||
r[3] = uint64(x[19])>>1 |
|
||||
uint64(x[20])<<7 |
|
||||
uint64(x[21])<<15 |
|
||||
uint64(x[22])<<23 |
|
||||
uint64(x[23])<<31 |
|
||||
uint64(x[24])<<39 |
|
||||
uint64(x[25]&15)<<47
|
||||
|
||||
r[4] = uint64(x[25])>>4 |
|
||||
uint64(x[26])<<4 |
|
||||
uint64(x[27])<<12 |
|
||||
uint64(x[28])<<20 |
|
||||
uint64(x[29])<<28 |
|
||||
uint64(x[30])<<36 |
|
||||
uint64(x[31]&127)<<44
|
||||
}
|
||||
|
||||
// pack sets out = x where out is the usual, little-endian form of the 5,
|
||||
// 51-bit limbs in x.
|
||||
func pack(out *[32]byte, x *[5]uint64) {
|
||||
t := *x
|
||||
freeze(&t)
|
||||
|
||||
out[0] = byte(t[0])
|
||||
out[1] = byte(t[0] >> 8)
|
||||
out[2] = byte(t[0] >> 16)
|
||||
out[3] = byte(t[0] >> 24)
|
||||
out[4] = byte(t[0] >> 32)
|
||||
out[5] = byte(t[0] >> 40)
|
||||
out[6] = byte(t[0] >> 48)
|
||||
|
||||
out[6] ^= byte(t[1]<<3) & 0xf8
|
||||
out[7] = byte(t[1] >> 5)
|
||||
out[8] = byte(t[1] >> 13)
|
||||
out[9] = byte(t[1] >> 21)
|
||||
out[10] = byte(t[1] >> 29)
|
||||
out[11] = byte(t[1] >> 37)
|
||||
out[12] = byte(t[1] >> 45)
|
||||
|
||||
out[12] ^= byte(t[2]<<6) & 0xc0
|
||||
out[13] = byte(t[2] >> 2)
|
||||
out[14] = byte(t[2] >> 10)
|
||||
out[15] = byte(t[2] >> 18)
|
||||
out[16] = byte(t[2] >> 26)
|
||||
out[17] = byte(t[2] >> 34)
|
||||
out[18] = byte(t[2] >> 42)
|
||||
out[19] = byte(t[2] >> 50)
|
||||
|
||||
out[19] ^= byte(t[3]<<1) & 0xfe
|
||||
out[20] = byte(t[3] >> 7)
|
||||
out[21] = byte(t[3] >> 15)
|
||||
out[22] = byte(t[3] >> 23)
|
||||
out[23] = byte(t[3] >> 31)
|
||||
out[24] = byte(t[3] >> 39)
|
||||
out[25] = byte(t[3] >> 47)
|
||||
|
||||
out[25] ^= byte(t[4]<<4) & 0xf0
|
||||
out[26] = byte(t[4] >> 4)
|
||||
out[27] = byte(t[4] >> 12)
|
||||
out[28] = byte(t[4] >> 20)
|
||||
out[29] = byte(t[4] >> 28)
|
||||
out[30] = byte(t[4] >> 36)
|
||||
out[31] = byte(t[4] >> 44)
|
||||
}
|
||||
|
||||
// invert calculates r = x^-1 mod p using Fermat's little theorem.
|
||||
func invert(r *[5]uint64, x *[5]uint64) {
|
||||
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
|
||||
|
||||
square(&z2, x) /* 2 */
|
||||
square(&t, &z2) /* 4 */
|
||||
square(&t, &t) /* 8 */
|
||||
mul(&z9, &t, x) /* 9 */
|
||||
mul(&z11, &z9, &z2) /* 11 */
|
||||
square(&t, &z11) /* 22 */
|
||||
mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
|
||||
|
||||
square(&t, &z2_5_0) /* 2^6 - 2^1 */
|
||||
for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
|
||||
|
||||
square(&t, &z2_10_0) /* 2^11 - 2^1 */
|
||||
for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
|
||||
|
||||
square(&t, &z2_20_0) /* 2^21 - 2^1 */
|
||||
for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
|
||||
|
||||
square(&t, &t) /* 2^41 - 2^1 */
|
||||
for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
|
||||
|
||||
square(&t, &z2_50_0) /* 2^51 - 2^1 */
|
||||
for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
|
||||
|
||||
square(&t, &z2_100_0) /* 2^101 - 2^1 */
|
||||
for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
|
||||
|
||||
square(&t, &t) /* 2^201 - 2^1 */
|
||||
for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
|
||||
|
||||
square(&t, &t) /* 2^251 - 2^1 */
|
||||
square(&t, &t) /* 2^252 - 2^2 */
|
||||
square(&t, &t) /* 2^253 - 2^3 */
|
||||
|
||||
square(&t, &t) /* 2^254 - 2^4 */
|
||||
|
||||
square(&t, &t) /* 2^255 - 2^5 */
|
||||
mul(r, &t, &z11) /* 2^255 - 21 */
|
||||
}
|
|
@ -0,0 +1,169 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "const_amd64.h"
|
||||
|
||||
// func mul(dest, a, b *[5]uint64)
|
||||
TEXT ·mul(SB),0,$16-24
|
||||
MOVQ dest+0(FP), DI
|
||||
MOVQ a+8(FP), SI
|
||||
MOVQ b+16(FP), DX
|
||||
|
||||
MOVQ DX,CX
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,0(SP)
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R8
|
||||
MOVQ DX,R9
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,8(SP)
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 8(CX)
|
||||
MOVQ AX,R10
|
||||
MOVQ DX,R11
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R12
|
||||
MOVQ DX,R13
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 24(CX)
|
||||
MOVQ AX,R14
|
||||
MOVQ DX,R15
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 32(CX)
|
||||
MOVQ AX,BX
|
||||
MOVQ DX,BP
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 0(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 0(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 32(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ $REDMASK51,SI
|
||||
SHLQ $13,R9:R8
|
||||
ANDQ SI,R8
|
||||
SHLQ $13,R11:R10
|
||||
ANDQ SI,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
ANDQ SI,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
ANDQ SI,R14
|
||||
ADDQ R13,R14
|
||||
SHLQ $13,BP:BX
|
||||
ANDQ SI,BX
|
||||
ADDQ R15,BX
|
||||
IMUL3Q $19,BP,DX
|
||||
ADDQ DX,R8
|
||||
MOVQ R8,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R10,DX
|
||||
MOVQ DX,CX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R8
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,CX
|
||||
ADDQ R14,DX
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R9
|
||||
ADDQ BX,DX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,AX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,R8
|
||||
ANDQ SI,R10
|
||||
MOVQ R8,0(DI)
|
||||
MOVQ CX,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
RET
|
|
@ -0,0 +1,132 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "const_amd64.h"
|
||||
|
||||
// func square(out, in *[5]uint64)
|
||||
TEXT ·square(SB),7,$0-16
|
||||
MOVQ out+0(FP), DI
|
||||
MOVQ in+8(FP), SI
|
||||
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(SI)
|
||||
MOVQ AX,CX
|
||||
MOVQ DX,R8
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 8(SI)
|
||||
MOVQ AX,R9
|
||||
MOVQ DX,R10
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
MOVQ AX,R11
|
||||
MOVQ DX,R12
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
MOVQ AX,R13
|
||||
MOVQ DX,R14
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 32(SI)
|
||||
MOVQ AX,R15
|
||||
MOVQ DX,BX
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ $REDMASK51,SI
|
||||
SHLQ $13,R8:CX
|
||||
ANDQ SI,CX
|
||||
SHLQ $13,R10:R9
|
||||
ANDQ SI,R9
|
||||
ADDQ R8,R9
|
||||
SHLQ $13,R12:R11
|
||||
ANDQ SI,R11
|
||||
ADDQ R10,R11
|
||||
SHLQ $13,R14:R13
|
||||
ANDQ SI,R13
|
||||
ADDQ R12,R13
|
||||
SHLQ $13,BX:R15
|
||||
ANDQ SI,R15
|
||||
ADDQ R14,R15
|
||||
IMUL3Q $19,BX,DX
|
||||
ADDQ DX,CX
|
||||
MOVQ CX,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R9,DX
|
||||
ANDQ SI,CX
|
||||
MOVQ DX,R8
|
||||
SHRQ $51,DX
|
||||
ADDQ R11,DX
|
||||
ANDQ SI,R8
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ADDQ R13,DX
|
||||
ANDQ SI,R9
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ADDQ R15,DX
|
||||
ANDQ SI,AX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,CX
|
||||
ANDQ SI,R10
|
||||
MOVQ CX,0(DI)
|
||||
MOVQ R8,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
RET
|
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
// Package subtle implements functions that are often useful in cryptographic
|
||||
// code but require careful thought to use correctly.
|
||||
package subtle // import "golang.org/x/crypto/internal/subtle"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// AnyOverlap reports whether x and y share memory at any (not necessarily
|
||||
// corresponding) index. The memory beyond the slice length is ignored.
|
||||
func AnyOverlap(x, y []byte) bool {
|
||||
return len(x) > 0 && len(y) > 0 &&
|
||||
uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
|
||||
uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
|
||||
}
|
||||
|
||||
// InexactOverlap reports whether x and y share memory at any non-corresponding
|
||||
// index. The memory beyond the slice length is ignored. Note that x and y can
|
||||
// have different lengths and still not have any inexact overlap.
|
||||
//
|
||||
// InexactOverlap can be used to implement the requirements of the crypto/cipher
|
||||
// AEAD, Block, BlockMode and Stream interfaces.
|
||||
func InexactOverlap(x, y []byte) bool {
|
||||
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
|
||||
return false
|
||||
}
|
||||
return AnyOverlap(x, y)
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
// Package subtle implements functions that are often useful in cryptographic
|
||||
// code but require careful thought to use correctly.
|
||||
package subtle // import "golang.org/x/crypto/internal/subtle"
|
||||
|
||||
// This is the Google App Engine standard variant based on reflect
|
||||
// because the unsafe package and cgo are disallowed.
|
||||
|
||||
import "reflect"
|
||||
|
||||
// AnyOverlap reports whether x and y share memory at any (not necessarily
|
||||
// corresponding) index. The memory beyond the slice length is ignored.
|
||||
func AnyOverlap(x, y []byte) bool {
|
||||
return len(x) > 0 && len(y) > 0 &&
|
||||
reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
|
||||
reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
|
||||
}
|
||||
|
||||
// InexactOverlap reports whether x and y share memory at any non-corresponding
|
||||
// index. The memory beyond the slice length is ignored. Note that x and y can
|
||||
// have different lengths and still not have any inexact overlap.
|
||||
//
|
||||
// InexactOverlap can be used to implement the requirements of the crypto/cipher
|
||||
// AEAD, Block, BlockMode and Stream interfaces.
|
||||
func InexactOverlap(x, y []byte) bool {
|
||||
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
|
||||
return false
|
||||
}
|
||||
return AnyOverlap(x, y)
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package box authenticates and encrypts small messages using public-key cryptography.
|
||||
|
||||
Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate
|
||||
messages. The length of messages is not hidden.
|
||||
|
||||
It is the caller's responsibility to ensure the uniqueness of nonces—for
|
||||
example, by using nonce 1 for the first message, nonce 2 for the second
|
||||
message, etc. Nonces are long enough that randomly generated nonces have
|
||||
negligible risk of collision.
|
||||
|
||||
Messages should be small because:
|
||||
|
||||
1. The whole message needs to be held in memory to be processed.
|
||||
|
||||
2. Using large messages pressures implementations on small machines to decrypt
|
||||
and process plaintext before authenticating it. This is very dangerous, and
|
||||
this API does not allow it, but a protocol that uses excessive message sizes
|
||||
might present some implementations with no other choice.
|
||||
|
||||
3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
|
||||
|
||||
4. Performance may be improved by working with messages that fit into data caches.
|
||||
|
||||
Thus large amounts of data should be chunked so that each message is small.
|
||||
(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
|
||||
chunk size.
|
||||
|
||||
This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html.
|
||||
*/
|
||||
package box // import "golang.org/x/crypto/nacl/box"
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/salsa20/salsa"
|
||||
)
|
||||
|
||||
// Overhead is the number of bytes of overhead when boxing a message.
|
||||
const Overhead = secretbox.Overhead
|
||||
|
||||
// GenerateKey generates a new public/private key pair suitable for use with
|
||||
// Seal and Open.
|
||||
func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) {
|
||||
publicKey = new([32]byte)
|
||||
privateKey = new([32]byte)
|
||||
_, err = io.ReadFull(rand, privateKey[:])
|
||||
if err != nil {
|
||||
publicKey = nil
|
||||
privateKey = nil
|
||||
return
|
||||
}
|
||||
|
||||
curve25519.ScalarBaseMult(publicKey, privateKey)
|
||||
return
|
||||
}
|
||||
|
||||
var zeros [16]byte
|
||||
|
||||
// Precompute calculates the shared key between peersPublicKey and privateKey
|
||||
// and writes it to sharedKey. The shared key can be used with
|
||||
// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing
|
||||
// when using the same pair of keys repeatedly.
|
||||
func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) {
|
||||
curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey)
|
||||
salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma)
|
||||
}
|
||||
|
||||
// Seal appends an encrypted and authenticated copy of message to out, which
|
||||
// will be Overhead bytes longer than the original and must not overlap it. The
|
||||
// nonce must be unique for each distinct message for a given pair of keys.
|
||||
func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte {
|
||||
var sharedKey [32]byte
|
||||
Precompute(&sharedKey, peersPublicKey, privateKey)
|
||||
return secretbox.Seal(out, message, nonce, &sharedKey)
|
||||
}
|
||||
|
||||
// SealAfterPrecomputation performs the same actions as Seal, but takes a
|
||||
// shared key as generated by Precompute.
|
||||
func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte {
|
||||
return secretbox.Seal(out, message, nonce, sharedKey)
|
||||
}
|
||||
|
||||
// Open authenticates and decrypts a box produced by Seal and appends the
|
||||
// message to out, which must not overlap box. The output will be Overhead
|
||||
// bytes smaller than box.
|
||||
func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) {
|
||||
var sharedKey [32]byte
|
||||
Precompute(&sharedKey, peersPublicKey, privateKey)
|
||||
return secretbox.Open(out, box, nonce, &sharedKey)
|
||||
}
|
||||
|
||||
// OpenAfterPrecomputation performs the same actions as Open, but takes a
|
||||
// shared key as generated by Precompute.
|
||||
func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) {
|
||||
return secretbox.Open(out, box, nonce, sharedKey)
|
||||
}
|
|
@ -0,0 +1,173 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package secretbox encrypts and authenticates small messages.
|
||||
|
||||
Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with
|
||||
secret-key cryptography. The length of messages is not hidden.
|
||||
|
||||
It is the caller's responsibility to ensure the uniqueness of nonces—for
|
||||
example, by using nonce 1 for the first message, nonce 2 for the second
|
||||
message, etc. Nonces are long enough that randomly generated nonces have
|
||||
negligible risk of collision.
|
||||
|
||||
Messages should be small because:
|
||||
|
||||
1. The whole message needs to be held in memory to be processed.
|
||||
|
||||
2. Using large messages pressures implementations on small machines to decrypt
|
||||
and process plaintext before authenticating it. This is very dangerous, and
|
||||
this API does not allow it, but a protocol that uses excessive message sizes
|
||||
might present some implementations with no other choice.
|
||||
|
||||
3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
|
||||
|
||||
4. Performance may be improved by working with messages that fit into data caches.
|
||||
|
||||
Thus large amounts of data should be chunked so that each message is small.
|
||||
(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
|
||||
chunk size.
|
||||
|
||||
This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
|
||||
*/
|
||||
package secretbox // import "golang.org/x/crypto/nacl/secretbox"
|
||||
|
||||
import (
|
||||
"golang.org/x/crypto/internal/subtle"
|
||||
"golang.org/x/crypto/poly1305"
|
||||
"golang.org/x/crypto/salsa20/salsa"
|
||||
)
|
||||
|
||||
// Overhead is the number of bytes of overhead when boxing a message.
|
||||
const Overhead = poly1305.TagSize
|
||||
|
||||
// setup produces a sub-key and Salsa20 counter given a nonce and key.
|
||||
func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
|
||||
// We use XSalsa20 for encryption so first we need to generate a
|
||||
// key and nonce with HSalsa20.
|
||||
var hNonce [16]byte
|
||||
copy(hNonce[:], nonce[:])
|
||||
salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
|
||||
|
||||
// The final 8 bytes of the original nonce form the new nonce.
|
||||
copy(counter[:], nonce[16:])
|
||||
}
|
||||
|
||||
// sliceForAppend takes a slice and a requested number of bytes. It returns a
|
||||
// slice with the contents of the given slice followed by that many bytes and a
|
||||
// second slice that aliases into it and contains only the extra bytes. If the
|
||||
// original slice has sufficient capacity then no allocation is performed.
|
||||
func sliceForAppend(in []byte, n int) (head, tail []byte) {
|
||||
if total := len(in) + n; cap(in) >= total {
|
||||
head = in[:total]
|
||||
} else {
|
||||
head = make([]byte, total)
|
||||
copy(head, in)
|
||||
}
|
||||
tail = head[len(in):]
|
||||
return
|
||||
}
|
||||
|
||||
// Seal appends an encrypted and authenticated copy of message to out, which
|
||||
// must not overlap message. The key and nonce pair must be unique for each
|
||||
// distinct message and the output will be Overhead bytes longer than message.
|
||||
func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
|
||||
var subKey [32]byte
|
||||
var counter [16]byte
|
||||
setup(&subKey, &counter, nonce, key)
|
||||
|
||||
// The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
|
||||
// Salsa20 works with 64-byte blocks, we also generate 32 bytes of
|
||||
// keystream as a side effect.
|
||||
var firstBlock [64]byte
|
||||
salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
|
||||
|
||||
var poly1305Key [32]byte
|
||||
copy(poly1305Key[:], firstBlock[:])
|
||||
|
||||
ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
|
||||
if subtle.AnyOverlap(out, message) {
|
||||
panic("nacl: invalid buffer overlap")
|
||||
}
|
||||
|
||||
// We XOR up to 32 bytes of message with the keystream generated from
|
||||
// the first block.
|
||||
firstMessageBlock := message
|
||||
if len(firstMessageBlock) > 32 {
|
||||
firstMessageBlock = firstMessageBlock[:32]
|
||||
}
|
||||
|
||||
tagOut := out
|
||||
out = out[poly1305.TagSize:]
|
||||
for i, x := range firstMessageBlock {
|
||||
out[i] = firstBlock[32+i] ^ x
|
||||
}
|
||||
message = message[len(firstMessageBlock):]
|
||||
ciphertext := out
|
||||
out = out[len(firstMessageBlock):]
|
||||
|
||||
// Now encrypt the rest.
|
||||
counter[8] = 1
|
||||
salsa.XORKeyStream(out, message, &counter, &subKey)
|
||||
|
||||
var tag [poly1305.TagSize]byte
|
||||
poly1305.Sum(&tag, ciphertext, &poly1305Key)
|
||||
copy(tagOut, tag[:])
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Open authenticates and decrypts a box produced by Seal and appends the
|
||||
// message to out, which must not overlap box. The output will be Overhead
|
||||
// bytes smaller than box.
|
||||
func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
|
||||
if len(box) < Overhead {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var subKey [32]byte
|
||||
var counter [16]byte
|
||||
setup(&subKey, &counter, nonce, key)
|
||||
|
||||
// The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
|
||||
// Salsa20 works with 64-byte blocks, we also generate 32 bytes of
|
||||
// keystream as a side effect.
|
||||
var firstBlock [64]byte
|
||||
salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
|
||||
|
||||
var poly1305Key [32]byte
|
||||
copy(poly1305Key[:], firstBlock[:])
|
||||
var tag [poly1305.TagSize]byte
|
||||
copy(tag[:], box)
|
||||
|
||||
if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
ret, out := sliceForAppend(out, len(box)-Overhead)
|
||||
if subtle.AnyOverlap(out, box) {
|
||||
panic("nacl: invalid buffer overlap")
|
||||
}
|
||||
|
||||
// We XOR up to 32 bytes of box with the keystream generated from
|
||||
// the first block.
|
||||
box = box[Overhead:]
|
||||
firstMessageBlock := box
|
||||
if len(firstMessageBlock) > 32 {
|
||||
firstMessageBlock = firstMessageBlock[:32]
|
||||
}
|
||||
for i, x := range firstMessageBlock {
|
||||
out[i] = firstBlock[32+i] ^ x
|
||||
}
|
||||
|
||||
box = box[len(firstMessageBlock):]
|
||||
out = out[len(firstMessageBlock):]
|
||||
|
||||
// Now decrypt the rest.
|
||||
counter[8] = 1
|
||||
salsa.XORKeyStream(out, box, &counter, &subKey)
|
||||
|
||||
return ret, true
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package poly1305 implements Poly1305 one-time message authentication code as
|
||||
specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
|
||||
|
||||
Poly1305 is a fast, one-time authentication function. It is infeasible for an
|
||||
attacker to generate an authenticator for a message without the key. However, a
|
||||
key must only be used for a single message. Authenticating two different
|
||||
messages with the same key allows an attacker to forge authenticators for other
|
||||
messages with the same key.
|
||||
|
||||
Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
|
||||
used with a fixed key in order to generate one-time keys from an nonce.
|
||||
However, in this package AES isn't used and the one-time key is specified
|
||||
directly.
|
||||
*/
|
||||
package poly1305 // import "golang.org/x/crypto/poly1305"
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
// TagSize is the size, in bytes, of a poly1305 authenticator.
|
||||
const TagSize = 16
|
||||
|
||||
// Verify returns true if mac is a valid authenticator for m with the given
|
||||
// key.
|
||||
func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
|
||||
var tmp [16]byte
|
||||
Sum(&tmp, m, key)
|
||||
return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
package poly1305
|
||||
|
||||
// This function is implemented in sum_amd64.s
|
||||
//go:noescape
|
||||
func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
|
||||
|
||||
// Sum generates an authenticator for m using a one-time key and puts the
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
var mPtr *byte
|
||||
if len(m) > 0 {
|
||||
mPtr = &m[0]
|
||||
}
|
||||
poly1305(out, mPtr, uint64(len(m)), key)
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define POLY1305_ADD(msg, h0, h1, h2) \
|
||||
ADDQ 0(msg), h0; \
|
||||
ADCQ 8(msg), h1; \
|
||||
ADCQ $1, h2; \
|
||||
LEAQ 16(msg), msg
|
||||
|
||||
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
|
||||
MOVQ r0, AX; \
|
||||
MULQ h0; \
|
||||
MOVQ AX, t0; \
|
||||
MOVQ DX, t1; \
|
||||
MOVQ r0, AX; \
|
||||
MULQ h1; \
|
||||
ADDQ AX, t1; \
|
||||
ADCQ $0, DX; \
|
||||
MOVQ r0, t2; \
|
||||
IMULQ h2, t2; \
|
||||
ADDQ DX, t2; \
|
||||
\
|
||||
MOVQ r1, AX; \
|
||||
MULQ h0; \
|
||||
ADDQ AX, t1; \
|
||||
ADCQ $0, DX; \
|
||||
MOVQ DX, h0; \
|
||||
MOVQ r1, t3; \
|
||||
IMULQ h2, t3; \
|
||||
MOVQ r1, AX; \
|
||||
MULQ h1; \
|
||||
ADDQ AX, t2; \
|
||||
ADCQ DX, t3; \
|
||||
ADDQ h0, t2; \
|
||||
ADCQ $0, t3; \
|
||||
\
|
||||
MOVQ t0, h0; \
|
||||
MOVQ t1, h1; \
|
||||
MOVQ t2, h2; \
|
||||
ANDQ $3, h2; \
|
||||
MOVQ t2, t0; \
|
||||
ANDQ $0xFFFFFFFFFFFFFFFC, t0; \
|
||||
ADDQ t0, h0; \
|
||||
ADCQ t3, h1; \
|
||||
ADCQ $0, h2; \
|
||||
SHRQ $2, t3, t2; \
|
||||
SHRQ $2, t3; \
|
||||
ADDQ t2, h0; \
|
||||
ADCQ t3, h1; \
|
||||
ADCQ $0, h2
|
||||
|
||||
DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
|
||||
DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
|
||||
GLOBL ·poly1305Mask<>(SB), RODATA, $16
|
||||
|
||||
// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
|
||||
TEXT ·poly1305(SB), $0-32
|
||||
MOVQ out+0(FP), DI
|
||||
MOVQ m+8(FP), SI
|
||||
MOVQ mlen+16(FP), R15
|
||||
MOVQ key+24(FP), AX
|
||||
|
||||
MOVQ 0(AX), R11
|
||||
MOVQ 8(AX), R12
|
||||
ANDQ ·poly1305Mask<>(SB), R11 // r0
|
||||
ANDQ ·poly1305Mask<>+8(SB), R12 // r1
|
||||
XORQ R8, R8 // h0
|
||||
XORQ R9, R9 // h1
|
||||
XORQ R10, R10 // h2
|
||||
|
||||
CMPQ R15, $16
|
||||
JB bytes_between_0_and_15
|
||||
|
||||
loop:
|
||||
POLY1305_ADD(SI, R8, R9, R10)
|
||||
|
||||
multiply:
|
||||
POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
|
||||
SUBQ $16, R15
|
||||
CMPQ R15, $16
|
||||
JAE loop
|
||||
|
||||
bytes_between_0_and_15:
|
||||
TESTQ R15, R15
|
||||
JZ done
|
||||
MOVQ $1, BX
|
||||
XORQ CX, CX
|
||||
XORQ R13, R13
|
||||
ADDQ R15, SI
|
||||
|
||||
flush_buffer:
|
||||
SHLQ $8, BX, CX
|
||||
SHLQ $8, BX
|
||||
MOVB -1(SI), R13
|
||||
XORQ R13, BX
|
||||
DECQ SI
|
||||
DECQ R15
|
||||
JNZ flush_buffer
|
||||
|
||||
ADDQ BX, R8
|
||||
ADCQ CX, R9
|
||||
ADCQ $0, R10
|
||||
MOVQ $16, R15
|
||||
JMP multiply
|
||||
|
||||
done:
|
||||
MOVQ R8, AX
|
||||
MOVQ R9, BX
|
||||
SUBQ $0xFFFFFFFFFFFFFFFB, AX
|
||||
SBBQ $0xFFFFFFFFFFFFFFFF, BX
|
||||
SBBQ $3, R10
|
||||
CMOVQCS R8, AX
|
||||
CMOVQCS R9, BX
|
||||
MOVQ key+24(FP), R8
|
||||
ADDQ 16(R8), AX
|
||||
ADCQ 24(R8), BX
|
||||
|
||||
MOVQ AX, 0(DI)
|
||||
MOVQ BX, 8(DI)
|
||||
RET
|
|
@ -0,0 +1,22 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build arm,!gccgo,!appengine,!nacl
|
||||
|
||||
package poly1305
|
||||
|
||||
// This function is implemented in sum_arm.s
|
||||
//go:noescape
|
||||
func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
|
||||
|
||||
// Sum generates an authenticator for m using a one-time key and puts the
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
var mPtr *byte
|
||||
if len(m) > 0 {
|
||||
mPtr = &m[0]
|
||||
}
|
||||
poly1305_auth_armv6(out, mPtr, uint32(len(m)), key)
|
||||
}
|
|
@ -0,0 +1,427 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build arm,!gccgo,!appengine,!nacl
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// This code was translated into a form compatible with 5a from the public
|
||||
// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
|
||||
|
||||
DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
|
||||
DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
|
||||
DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
|
||||
DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
|
||||
DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
|
||||
GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20
|
||||
|
||||
// Warning: the linker may use R11 to synthesize certain instructions. Please
|
||||
// take care and verify that no synthetic instructions use it.
|
||||
|
||||
TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0
|
||||
// Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It
|
||||
// might look like it's only 60 bytes of space but the final four bytes
|
||||
// will be written by another function.) We need to skip over four
|
||||
// bytes of stack because that's saving the value of 'g'.
|
||||
ADD $4, R13, R8
|
||||
MOVM.IB [R4-R7], (R8)
|
||||
MOVM.IA.W (R1), [R2-R5]
|
||||
MOVW $·poly1305_init_constants_armv6<>(SB), R7
|
||||
MOVW R2, R8
|
||||
MOVW R2>>26, R9
|
||||
MOVW R3>>20, g
|
||||
MOVW R4>>14, R11
|
||||
MOVW R5>>8, R12
|
||||
ORR R3<<6, R9, R9
|
||||
ORR R4<<12, g, g
|
||||
ORR R5<<18, R11, R11
|
||||
MOVM.IA (R7), [R2-R6]
|
||||
AND R8, R2, R2
|
||||
AND R9, R3, R3
|
||||
AND g, R4, R4
|
||||
AND R11, R5, R5
|
||||
AND R12, R6, R6
|
||||
MOVM.IA.W [R2-R6], (R0)
|
||||
EOR R2, R2, R2
|
||||
EOR R3, R3, R3
|
||||
EOR R4, R4, R4
|
||||
EOR R5, R5, R5
|
||||
EOR R6, R6, R6
|
||||
MOVM.IA.W [R2-R6], (R0)
|
||||
MOVM.IA.W (R1), [R2-R5]
|
||||
MOVM.IA [R2-R6], (R0)
|
||||
ADD $20, R13, R0
|
||||
MOVM.DA (R0), [R4-R7]
|
||||
RET
|
||||
|
||||
#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
|
||||
MOVBU (offset+0)(Rsrc), Rtmp; \
|
||||
MOVBU Rtmp, (offset+0)(Rdst); \
|
||||
MOVBU (offset+1)(Rsrc), Rtmp; \
|
||||
MOVBU Rtmp, (offset+1)(Rdst); \
|
||||
MOVBU (offset+2)(Rsrc), Rtmp; \
|
||||
MOVBU Rtmp, (offset+2)(Rdst); \
|
||||
MOVBU (offset+3)(Rsrc), Rtmp; \
|
||||
MOVBU Rtmp, (offset+3)(Rdst)
|
||||
|
||||
TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0
|
||||
// Needs 24 bytes of stack for saved registers and then 88 bytes of
|
||||
// scratch space after that. We assume that 24 bytes at (R13) have
|
||||
// already been used: four bytes for the link register saved in the
|
||||
// prelude of poly1305_auth_armv6, four bytes for saving the value of g
|
||||
// in that function and 16 bytes of scratch space used around
|
||||
// poly1305_finish_ext_armv6_skip1.
|
||||
ADD $24, R13, R12
|
||||
MOVM.IB [R4-R8, R14], (R12)
|
||||
MOVW R0, 88(R13)
|
||||
MOVW R1, 92(R13)
|
||||
MOVW R2, 96(R13)
|
||||
MOVW R1, R14
|
||||
MOVW R2, R12
|
||||
MOVW 56(R0), R8
|
||||
WORD $0xe1180008 // TST R8, R8 not working see issue 5921
|
||||
EOR R6, R6, R6
|
||||
MOVW.EQ $(1<<24), R6
|
||||
MOVW R6, 84(R13)
|
||||
ADD $116, R13, g
|
||||
MOVM.IA (R0), [R0-R9]
|
||||
MOVM.IA [R0-R4], (g)
|
||||
CMP $16, R12
|
||||
BLO poly1305_blocks_armv6_done
|
||||
|
||||
poly1305_blocks_armv6_mainloop:
|
||||
WORD $0xe31e0003 // TST R14, #3 not working see issue 5921
|
||||
BEQ poly1305_blocks_armv6_mainloop_aligned
|
||||
ADD $100, R13, g
|
||||
MOVW_UNALIGNED(R14, g, R0, 0)
|
||||
MOVW_UNALIGNED(R14, g, R0, 4)
|
||||
MOVW_UNALIGNED(R14, g, R0, 8)
|
||||
MOVW_UNALIGNED(R14, g, R0, 12)
|
||||
MOVM.IA (g), [R0-R3]
|
||||
ADD $16, R14
|
||||
B poly1305_blocks_armv6_mainloop_loaded
|
||||
|
||||
poly1305_blocks_armv6_mainloop_aligned:
|
||||
MOVM.IA.W (R14), [R0-R3]
|
||||
|
||||
poly1305_blocks_armv6_mainloop_loaded:
|
||||
MOVW R0>>26, g
|
||||
MOVW R1>>20, R11
|
||||
MOVW R2>>14, R12
|
||||
MOVW R14, 92(R13)
|
||||
MOVW R3>>8, R4
|
||||
ORR R1<<6, g, g
|
||||
ORR R2<<12, R11, R11
|
||||
ORR R3<<18, R12, R12
|
||||
BIC $0xfc000000, R0, R0
|
||||
BIC $0xfc000000, g, g
|
||||
MOVW 84(R13), R3
|
||||
BIC $0xfc000000, R11, R11
|
||||
BIC $0xfc000000, R12, R12
|
||||
ADD R0, R5, R5
|
||||
ADD g, R6, R6
|
||||
ORR R3, R4, R4
|
||||
ADD R11, R7, R7
|
||||
ADD $116, R13, R14
|
||||
ADD R12, R8, R8
|
||||
ADD R4, R9, R9
|
||||
MOVM.IA (R14), [R0-R4]
|
||||
MULLU R4, R5, (R11, g)
|
||||
MULLU R3, R5, (R14, R12)
|
||||
MULALU R3, R6, (R11, g)
|
||||
MULALU R2, R6, (R14, R12)
|
||||
MULALU R2, R7, (R11, g)
|
||||
MULALU R1, R7, (R14, R12)
|
||||
ADD R4<<2, R4, R4
|
||||
ADD R3<<2, R3, R3
|
||||
MULALU R1, R8, (R11, g)
|
||||
MULALU R0, R8, (R14, R12)
|
||||
MULALU R0, R9, (R11, g)
|
||||
MULALU R4, R9, (R14, R12)
|
||||
MOVW g, 76(R13)
|
||||
MOVW R11, 80(R13)
|
||||
MOVW R12, 68(R13)
|
||||
MOVW R14, 72(R13)
|
||||
MULLU R2, R5, (R11, g)
|
||||
MULLU R1, R5, (R14, R12)
|
||||
MULALU R1, R6, (R11, g)
|
||||
MULALU R0, R6, (R14, R12)
|
||||
MULALU R0, R7, (R11, g)
|
||||
MULALU R4, R7, (R14, R12)
|
||||
ADD R2<<2, R2, R2
|
||||
ADD R1<<2, R1, R1
|
||||
MULALU R4, R8, (R11, g)
|
||||
MULALU R3, R8, (R14, R12)
|
||||
MULALU R3, R9, (R11, g)
|
||||
MULALU R2, R9, (R14, R12)
|
||||
MOVW g, 60(R13)
|
||||
MOVW R11, 64(R13)
|
||||
MOVW R12, 52(R13)
|
||||
MOVW R14, 56(R13)
|
||||
MULLU R0, R5, (R11, g)
|
||||
MULALU R4, R6, (R11, g)
|
||||
MULALU R3, R7, (R11, g)
|
||||
MULALU R2, R8, (R11, g)
|
||||
MULALU R1, R9, (R11, g)
|
||||
ADD $52, R13, R0
|
||||
MOVM.IA (R0), [R0-R7]
|
||||
MOVW g>>26, R12
|
||||
MOVW R4>>26, R14
|
||||
ORR R11<<6, R12, R12
|
||||
ORR R5<<6, R14, R14
|
||||
BIC $0xfc000000, g, g
|
||||
BIC $0xfc000000, R4, R4
|
||||
ADD.S R12, R0, R0
|
||||
ADC $0, R1, R1
|
||||
ADD.S R14, R6, R6
|
||||
ADC $0, R7, R7
|
||||
MOVW R0>>26, R12
|
||||
MOVW R6>>26, R14
|
||||
ORR R1<<6, R12, R12
|
||||
ORR R7<<6, R14, R14
|
||||
BIC $0xfc000000, R0, R0
|
||||
BIC $0xfc000000, R6, R6
|
||||
ADD R14<<2, R14, R14
|
||||
ADD.S R12, R2, R2
|
||||
ADC $0, R3, R3
|
||||
ADD R14, g, g
|
||||
MOVW R2>>26, R12
|
||||
MOVW g>>26, R14
|
||||
ORR R3<<6, R12, R12
|
||||
BIC $0xfc000000, g, R5
|
||||
BIC $0xfc000000, R2, R7
|
||||
ADD R12, R4, R4
|
||||
ADD R14, R0, R0
|
||||
MOVW R4>>26, R12
|
||||
BIC $0xfc000000, R4, R8
|
||||
ADD R12, R6, R9
|
||||
MOVW 96(R13), R12
|
||||
MOVW 92(R13), R14
|
||||
MOVW R0, R6
|
||||
CMP $32, R12
|
||||
SUB $16, R12, R12
|
||||
MOVW R12, 96(R13)
|
||||
BHS poly1305_blocks_armv6_mainloop
|
||||
|
||||
poly1305_blocks_armv6_done:
|
||||
MOVW 88(R13), R12
|
||||
MOVW R5, 20(R12)
|
||||
MOVW R6, 24(R12)
|
||||
MOVW R7, 28(R12)
|
||||
MOVW R8, 32(R12)
|
||||
MOVW R9, 36(R12)
|
||||
ADD $48, R13, R0
|
||||
MOVM.DA (R0), [R4-R8, R14]
|
||||
RET
|
||||
|
||||
#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
|
||||
MOVBU.P 1(Rsrc), Rtmp; \
|
||||
MOVBU.P Rtmp, 1(Rdst); \
|
||||
MOVBU.P 1(Rsrc), Rtmp; \
|
||||
MOVBU.P Rtmp, 1(Rdst)
|
||||
|
||||
#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
|
||||
MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
|
||||
MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
|
||||
|
||||
// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
|
||||
TEXT ·poly1305_auth_armv6(SB), $196-16
|
||||
// The value 196, just above, is the sum of 64 (the size of the context
|
||||
// structure) and 132 (the amount of stack needed).
|
||||
//
|
||||
// At this point, the stack pointer (R13) has been moved down. It
|
||||
// points to the saved link register and there's 196 bytes of free
|
||||
// space above it.
|
||||
//
|
||||
// The stack for this function looks like:
|
||||
//
|
||||
// +---------------------
|
||||
// |
|
||||
// | 64 bytes of context structure
|
||||
// |
|
||||
// +---------------------
|
||||
// |
|
||||
// | 112 bytes for poly1305_blocks_armv6
|
||||
// |
|
||||
// +---------------------
|
||||
// | 16 bytes of final block, constructed at
|
||||
// | poly1305_finish_ext_armv6_skip8
|
||||
// +---------------------
|
||||
// | four bytes of saved 'g'
|
||||
// +---------------------
|
||||
// | lr, saved by prelude <- R13 points here
|
||||
// +---------------------
|
||||
MOVW g, 4(R13)
|
||||
|
||||
MOVW out+0(FP), R4
|
||||
MOVW m+4(FP), R5
|
||||
MOVW mlen+8(FP), R6
|
||||
MOVW key+12(FP), R7
|
||||
|
||||
ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112
|
||||
MOVW R7, R1
|
||||
|
||||
// poly1305_init_ext_armv6 will write to the stack from R13+4, but
|
||||
// that's ok because none of the other values have been written yet.
|
||||
BL poly1305_init_ext_armv6<>(SB)
|
||||
BIC.S $15, R6, R2
|
||||
BEQ poly1305_auth_armv6_noblocks
|
||||
ADD $136, R13, R0
|
||||
MOVW R5, R1
|
||||
ADD R2, R5, R5
|
||||
SUB R2, R6, R6
|
||||
BL poly1305_blocks_armv6<>(SB)
|
||||
|
||||
poly1305_auth_armv6_noblocks:
|
||||
ADD $136, R13, R0
|
||||
MOVW R5, R1
|
||||
MOVW R6, R2
|
||||
MOVW R4, R3
|
||||
|
||||
MOVW R0, R5
|
||||
MOVW R1, R6
|
||||
MOVW R2, R7
|
||||
MOVW R3, R8
|
||||
AND.S R2, R2, R2
|
||||
BEQ poly1305_finish_ext_armv6_noremaining
|
||||
EOR R0, R0
|
||||
ADD $8, R13, R9 // 8 = offset to 16 byte scratch space
|
||||
MOVW R0, (R9)
|
||||
MOVW R0, 4(R9)
|
||||
MOVW R0, 8(R9)
|
||||
MOVW R0, 12(R9)
|
||||
WORD $0xe3110003 // TST R1, #3 not working see issue 5921
|
||||
BEQ poly1305_finish_ext_armv6_aligned
|
||||
WORD $0xe3120008 // TST R2, #8 not working see issue 5921
|
||||
BEQ poly1305_finish_ext_armv6_skip8
|
||||
MOVWP_UNALIGNED(R1, R9, g)
|
||||
MOVWP_UNALIGNED(R1, R9, g)
|
||||
|
||||
poly1305_finish_ext_armv6_skip8:
|
||||
WORD $0xe3120004 // TST $4, R2 not working see issue 5921
|
||||
BEQ poly1305_finish_ext_armv6_skip4
|
||||
MOVWP_UNALIGNED(R1, R9, g)
|
||||
|
||||
poly1305_finish_ext_armv6_skip4:
|
||||
WORD $0xe3120002 // TST $2, R2 not working see issue 5921
|
||||
BEQ poly1305_finish_ext_armv6_skip2
|
||||
MOVHUP_UNALIGNED(R1, R9, g)
|
||||
B poly1305_finish_ext_armv6_skip2
|
||||
|
||||
poly1305_finish_ext_armv6_aligned:
|
||||
WORD $0xe3120008 // TST R2, #8 not working see issue 5921
|
||||
BEQ poly1305_finish_ext_armv6_skip8_aligned
|
||||
MOVM.IA.W (R1), [g-R11]
|
||||
MOVM.IA.W [g-R11], (R9)
|
||||
|
||||
poly1305_finish_ext_armv6_skip8_aligned:
|
||||
WORD $0xe3120004 // TST $4, R2 not working see issue 5921
|
||||
BEQ poly1305_finish_ext_armv6_skip4_aligned
|
||||
MOVW.P 4(R1), g
|
||||
MOVW.P g, 4(R9)
|
||||
|
||||
poly1305_finish_ext_armv6_skip4_aligned:
|
||||
WORD $0xe3120002 // TST $2, R2 not working see issue 5921
|
||||
BEQ poly1305_finish_ext_armv6_skip2
|
||||
MOVHU.P 2(R1), g
|
||||
MOVH.P g, 2(R9)
|
||||
|
||||
poly1305_finish_ext_armv6_skip2:
|
||||
WORD $0xe3120001 // TST $1, R2 not working see issue 5921
|
||||
BEQ poly1305_finish_ext_armv6_skip1
|
||||
MOVBU.P 1(R1), g
|
||||
MOVBU.P g, 1(R9)
|
||||
|
||||
poly1305_finish_ext_armv6_skip1:
|
||||
MOVW $1, R11
|
||||
MOVBU R11, 0(R9)
|
||||
MOVW R11, 56(R5)
|
||||
MOVW R5, R0
|
||||
ADD $8, R13, R1
|
||||
MOVW $16, R2
|
||||
BL poly1305_blocks_armv6<>(SB)
|
||||
|
||||
poly1305_finish_ext_armv6_noremaining:
|
||||
MOVW 20(R5), R0
|
||||
MOVW 24(R5), R1
|
||||
MOVW 28(R5), R2
|
||||
MOVW 32(R5), R3
|
||||
MOVW 36(R5), R4
|
||||
MOVW R4>>26, R12
|
||||
BIC $0xfc000000, R4, R4
|
||||
ADD R12<<2, R12, R12
|
||||
ADD R12, R0, R0
|
||||
MOVW R0>>26, R12
|
||||
BIC $0xfc000000, R0, R0
|
||||
ADD R12, R1, R1
|
||||
MOVW R1>>26, R12
|
||||
BIC $0xfc000000, R1, R1
|
||||
ADD R12, R2, R2
|
||||
MOVW R2>>26, R12
|
||||
BIC $0xfc000000, R2, R2
|
||||
ADD R12, R3, R3
|
||||
MOVW R3>>26, R12
|
||||
BIC $0xfc000000, R3, R3
|
||||
ADD R12, R4, R4
|
||||
ADD $5, R0, R6
|
||||
MOVW R6>>26, R12
|
||||
BIC $0xfc000000, R6, R6
|
||||
ADD R12, R1, R7
|
||||
MOVW R7>>26, R12
|
||||
BIC $0xfc000000, R7, R7
|
||||
ADD R12, R2, g
|
||||
MOVW g>>26, R12
|
||||
BIC $0xfc000000, g, g
|
||||
ADD R12, R3, R11
|
||||
MOVW $-(1<<26), R12
|
||||
ADD R11>>26, R12, R12
|
||||
BIC $0xfc000000, R11, R11
|
||||
ADD R12, R4, R9
|
||||
MOVW R9>>31, R12
|
||||
SUB $1, R12
|
||||
AND R12, R6, R6
|
||||
AND R12, R7, R7
|
||||
AND R12, g, g
|
||||
AND R12, R11, R11
|
||||
AND R12, R9, R9
|
||||
MVN R12, R12
|
||||
AND R12, R0, R0
|
||||
AND R12, R1, R1
|
||||
AND R12, R2, R2
|
||||
AND R12, R3, R3
|
||||
AND R12, R4, R4
|
||||
ORR R6, R0, R0
|
||||
ORR R7, R1, R1
|
||||
ORR g, R2, R2
|
||||
ORR R11, R3, R3
|
||||
ORR R9, R4, R4
|
||||
ORR R1<<26, R0, R0
|
||||
MOVW R1>>6, R1
|
||||
ORR R2<<20, R1, R1
|
||||
MOVW R2>>12, R2
|
||||
ORR R3<<14, R2, R2
|
||||
MOVW R3>>18, R3
|
||||
ORR R4<<8, R3, R3
|
||||
MOVW 40(R5), R6
|
||||
MOVW 44(R5), R7
|
||||
MOVW 48(R5), g
|
||||
MOVW 52(R5), R11
|
||||
ADD.S R6, R0, R0
|
||||
ADC.S R7, R1, R1
|
||||
ADC.S g, R2, R2
|
||||
ADC.S R11, R3, R3
|
||||
MOVM.IA [R0-R3], (R8)
|
||||
MOVW R5, R12
|
||||
EOR R0, R0, R0
|
||||
EOR R1, R1, R1
|
||||
EOR R2, R2, R2
|
||||
EOR R3, R3, R3
|
||||
EOR R4, R4, R4
|
||||
EOR R5, R5, R5
|
||||
EOR R6, R6, R6
|
||||
EOR R7, R7, R7
|
||||
MOVM.IA.W [R0-R7], (R12)
|
||||
MOVM.IA [R0-R7], (R12)
|
||||
MOVW 4(R13), g
|
||||
RET
|
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,!go1.11 !arm,!amd64,!s390x gccgo appengine nacl
|
||||
|
||||
package poly1305
|
||||
|
||||
// Sum generates an authenticator for msg using a one-time key and puts the
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
||||
sumGeneric(out, msg, key)
|
||||
}
|
|
@ -0,0 +1,139 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package poly1305
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// sumGeneric generates an authenticator for msg using a one-time key and
|
||||
// puts the 16-byte result into out. This is the generic implementation of
|
||||
// Sum and should be called if no assembly implementation is available.
|
||||
func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
||||
var (
|
||||
h0, h1, h2, h3, h4 uint32 // the hash accumulators
|
||||
r0, r1, r2, r3, r4 uint64 // the r part of the key
|
||||
)
|
||||
|
||||
r0 = uint64(binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff)
|
||||
r1 = uint64((binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03)
|
||||
r2 = uint64((binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff)
|
||||
r3 = uint64((binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff)
|
||||
r4 = uint64((binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff)
|
||||
|
||||
R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5
|
||||
|
||||
for len(msg) >= TagSize {
|
||||
// h += msg
|
||||
h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff
|
||||
h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff
|
||||
h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff
|
||||
h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff
|
||||
h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | (1 << 24)
|
||||
|
||||
// h *= r
|
||||
d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1)
|
||||
d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2)
|
||||
d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3)
|
||||
d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4)
|
||||
d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0)
|
||||
|
||||
// h %= p
|
||||
h0 = uint32(d0) & 0x3ffffff
|
||||
h1 = uint32(d1) & 0x3ffffff
|
||||
h2 = uint32(d2) & 0x3ffffff
|
||||
h3 = uint32(d3) & 0x3ffffff
|
||||
h4 = uint32(d4) & 0x3ffffff
|
||||
|
||||
h0 += uint32(d4>>26) * 5
|
||||
h1 += h0 >> 26
|
||||
h0 = h0 & 0x3ffffff
|
||||
|
||||
msg = msg[TagSize:]
|
||||
}
|
||||
|
||||
if len(msg) > 0 {
|
||||
var block [TagSize]byte
|
||||
off := copy(block[:], msg)
|
||||
block[off] = 0x01
|
||||
|
||||
// h += msg
|
||||
h0 += binary.LittleEndian.Uint32(block[0:]) & 0x3ffffff
|
||||
h1 += (binary.LittleEndian.Uint32(block[3:]) >> 2) & 0x3ffffff
|
||||
h2 += (binary.LittleEndian.Uint32(block[6:]) >> 4) & 0x3ffffff
|
||||
h3 += (binary.LittleEndian.Uint32(block[9:]) >> 6) & 0x3ffffff
|
||||
h4 += (binary.LittleEndian.Uint32(block[12:]) >> 8)
|
||||
|
||||
// h *= r
|
||||
d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1)
|
||||
d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2)
|
||||
d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3)
|
||||
d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4)
|
||||
d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0)
|
||||
|
||||
// h %= p
|
||||
h0 = uint32(d0) & 0x3ffffff
|
||||
h1 = uint32(d1) & 0x3ffffff
|
||||
h2 = uint32(d2) & 0x3ffffff
|
||||
h3 = uint32(d3) & 0x3ffffff
|
||||
h4 = uint32(d4) & 0x3ffffff
|
||||
|
||||
h0 += uint32(d4>>26) * 5
|
||||
h1 += h0 >> 26
|
||||
h0 = h0 & 0x3ffffff
|
||||
}
|
||||
|
||||
// h %= p reduction
|
||||
h2 += h1 >> 26
|
||||
h1 &= 0x3ffffff
|
||||
h3 += h2 >> 26
|
||||
h2 &= 0x3ffffff
|
||||
h4 += h3 >> 26
|
||||
h3 &= 0x3ffffff
|
||||
h0 += 5 * (h4 >> 26)
|
||||
h4 &= 0x3ffffff
|
||||
h1 += h0 >> 26
|
||||
h0 &= 0x3ffffff
|
||||
|
||||
// h - p
|
||||
t0 := h0 + 5
|
||||
t1 := h1 + (t0 >> 26)
|
||||
t2 := h2 + (t1 >> 26)
|
||||
t3 := h3 + (t2 >> 26)
|
||||
t4 := h4 + (t3 >> 26) - (1 << 26)
|
||||
t0 &= 0x3ffffff
|
||||
t1 &= 0x3ffffff
|
||||
t2 &= 0x3ffffff
|
||||
t3 &= 0x3ffffff
|
||||
|
||||
// select h if h < p else h - p
|
||||
t_mask := (t4 >> 31) - 1
|
||||
h_mask := ^t_mask
|
||||
h0 = (h0 & h_mask) | (t0 & t_mask)
|
||||
h1 = (h1 & h_mask) | (t1 & t_mask)
|
||||
h2 = (h2 & h_mask) | (t2 & t_mask)
|
||||
h3 = (h3 & h_mask) | (t3 & t_mask)
|
||||
h4 = (h4 & h_mask) | (t4 & t_mask)
|
||||
|
||||
// h %= 2^128
|
||||
h0 |= h1 << 26
|
||||
h1 = ((h1 >> 6) | (h2 << 20))
|
||||
h2 = ((h2 >> 12) | (h3 << 14))
|
||||
h3 = ((h3 >> 18) | (h4 << 8))
|
||||
|
||||
// s: the s part of the key
|
||||
// tag = (h + s) % (2^128)
|
||||
t := uint64(h0) + uint64(binary.LittleEndian.Uint32(key[16:]))
|
||||
h0 = uint32(t)
|
||||
t = uint64(h1) + uint64(binary.LittleEndian.Uint32(key[20:])) + (t >> 32)
|
||||
h1 = uint32(t)
|
||||
t = uint64(h2) + uint64(binary.LittleEndian.Uint32(key[24:])) + (t >> 32)
|
||||
h2 = uint32(t)
|
||||
t = uint64(h3) + uint64(binary.LittleEndian.Uint32(key[28:])) + (t >> 32)
|
||||
h3 = uint32(t)
|
||||
|
||||
binary.LittleEndian.PutUint32(out[0:], h0)
|
||||
binary.LittleEndian.PutUint32(out[4:], h1)
|
||||
binary.LittleEndian.PutUint32(out[8:], h2)
|
||||
binary.LittleEndian.PutUint32(out[12:], h3)
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,go1.11,!gccgo,!appengine
|
||||
|
||||
package poly1305
|
||||
|
||||
// hasVectorFacility reports whether the machine supports
|
||||
// the vector facility (vx).
|
||||
func hasVectorFacility() bool
|
||||
|
||||
// hasVMSLFacility reports whether the machine supports
|
||||
// Vector Multiply Sum Logical (VMSL).
|
||||
func hasVMSLFacility() bool
|
||||
|
||||
var hasVX = hasVectorFacility()
|
||||
var hasVMSL = hasVMSLFacility()
|
||||
|
||||
// poly1305vx is an assembly implementation of Poly1305 that uses vector
|
||||
// instructions. It must only be called if the vector facility (vx) is
|
||||
// available.
|
||||
//go:noescape
|
||||
func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
|
||||
|
||||
// poly1305vmsl is an assembly implementation of Poly1305 that uses vector
|
||||
// instructions, including VMSL. It must only be called if the vector facility (vx) is
|
||||
// available and if VMSL is supported.
|
||||
//go:noescape
|
||||
func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
|
||||
|
||||
// Sum generates an authenticator for m using a one-time key and puts the
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
if hasVX {
|
||||
var mPtr *byte
|
||||
if len(m) > 0 {
|
||||
mPtr = &m[0]
|
||||
}
|
||||
if hasVMSL && len(m) > 256 {
|
||||
poly1305vmsl(out, mPtr, uint64(len(m)), key)
|
||||
} else {
|
||||
poly1305vx(out, mPtr, uint64(len(m)), key)
|
||||
}
|
||||
} else {
|
||||
sumGeneric(out, m, key)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,400 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,go1.11,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Implementation of Poly1305 using the vector facility (vx).
|
||||
|
||||
// constants
|
||||
#define MOD26 V0
|
||||
#define EX0 V1
|
||||
#define EX1 V2
|
||||
#define EX2 V3
|
||||
|
||||
// temporaries
|
||||
#define T_0 V4
|
||||
#define T_1 V5
|
||||
#define T_2 V6
|
||||
#define T_3 V7
|
||||
#define T_4 V8
|
||||
|
||||
// key (r)
|
||||
#define R_0 V9
|
||||
#define R_1 V10
|
||||
#define R_2 V11
|
||||
#define R_3 V12
|
||||
#define R_4 V13
|
||||
#define R5_1 V14
|
||||
#define R5_2 V15
|
||||
#define R5_3 V16
|
||||
#define R5_4 V17
|
||||
#define RSAVE_0 R5
|
||||
#define RSAVE_1 R6
|
||||
#define RSAVE_2 R7
|
||||
#define RSAVE_3 R8
|
||||
#define RSAVE_4 R9
|
||||
#define R5SAVE_1 V28
|
||||
#define R5SAVE_2 V29
|
||||
#define R5SAVE_3 V30
|
||||
#define R5SAVE_4 V31
|
||||
|
||||
// message block
|
||||
#define F_0 V18
|
||||
#define F_1 V19
|
||||
#define F_2 V20
|
||||
#define F_3 V21
|
||||
#define F_4 V22
|
||||
|
||||
// accumulator
|
||||
#define H_0 V23
|
||||
#define H_1 V24
|
||||
#define H_2 V25
|
||||
#define H_3 V26
|
||||
#define H_4 V27
|
||||
|
||||
GLOBL ·keyMask<>(SB), RODATA, $16
|
||||
DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
|
||||
DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
|
||||
|
||||
GLOBL ·bswapMask<>(SB), RODATA, $16
|
||||
DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
|
||||
DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
|
||||
|
||||
GLOBL ·constants<>(SB), RODATA, $64
|
||||
// MOD26
|
||||
DATA ·constants<>+0(SB)/8, $0x3ffffff
|
||||
DATA ·constants<>+8(SB)/8, $0x3ffffff
|
||||
// EX0
|
||||
DATA ·constants<>+16(SB)/8, $0x0006050403020100
|
||||
DATA ·constants<>+24(SB)/8, $0x1016151413121110
|
||||
// EX1
|
||||
DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706
|
||||
DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716
|
||||
// EX2
|
||||
DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d
|
||||
DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d
|
||||
|
||||
// h = (f*g) % (2**130-5) [partial reduction]
|
||||
#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
|
||||
VMLOF f0, g0, h0 \
|
||||
VMLOF f0, g1, h1 \
|
||||
VMLOF f0, g2, h2 \
|
||||
VMLOF f0, g3, h3 \
|
||||
VMLOF f0, g4, h4 \
|
||||
VMLOF f1, g54, T_0 \
|
||||
VMLOF f1, g0, T_1 \
|
||||
VMLOF f1, g1, T_2 \
|
||||
VMLOF f1, g2, T_3 \
|
||||
VMLOF f1, g3, T_4 \
|
||||
VMALOF f2, g53, h0, h0 \
|
||||
VMALOF f2, g54, h1, h1 \
|
||||
VMALOF f2, g0, h2, h2 \
|
||||
VMALOF f2, g1, h3, h3 \
|
||||
VMALOF f2, g2, h4, h4 \
|
||||
VMALOF f3, g52, T_0, T_0 \
|
||||
VMALOF f3, g53, T_1, T_1 \
|
||||
VMALOF f3, g54, T_2, T_2 \
|
||||
VMALOF f3, g0, T_3, T_3 \
|
||||
VMALOF f3, g1, T_4, T_4 \
|
||||
VMALOF f4, g51, h0, h0 \
|
||||
VMALOF f4, g52, h1, h1 \
|
||||
VMALOF f4, g53, h2, h2 \
|
||||
VMALOF f4, g54, h3, h3 \
|
||||
VMALOF f4, g0, h4, h4 \
|
||||
VAG T_0, h0, h0 \
|
||||
VAG T_1, h1, h1 \
|
||||
VAG T_2, h2, h2 \
|
||||
VAG T_3, h3, h3 \
|
||||
VAG T_4, h4, h4
|
||||
|
||||
// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4
|
||||
#define REDUCE(h0, h1, h2, h3, h4) \
|
||||
VESRLG $26, h0, T_0 \
|
||||
VESRLG $26, h3, T_1 \
|
||||
VN MOD26, h0, h0 \
|
||||
VN MOD26, h3, h3 \
|
||||
VAG T_0, h1, h1 \
|
||||
VAG T_1, h4, h4 \
|
||||
VESRLG $26, h1, T_2 \
|
||||
VESRLG $26, h4, T_3 \
|
||||
VN MOD26, h1, h1 \
|
||||
VN MOD26, h4, h4 \
|
||||
VESLG $2, T_3, T_4 \
|
||||
VAG T_3, T_4, T_4 \
|
||||
VAG T_2, h2, h2 \
|
||||
VAG T_4, h0, h0 \
|
||||
VESRLG $26, h2, T_0 \
|
||||
VESRLG $26, h0, T_1 \
|
||||
VN MOD26, h2, h2 \
|
||||
VN MOD26, h0, h0 \
|
||||
VAG T_0, h3, h3 \
|
||||
VAG T_1, h1, h1 \
|
||||
VESRLG $26, h3, T_2 \
|
||||
VN MOD26, h3, h3 \
|
||||
VAG T_2, h4, h4
|
||||
|
||||
// expand in0 into d[0] and in1 into d[1]
|
||||
#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
|
||||
VGBM $0x0707, d1 \ // d1=tmp
|
||||
VPERM in0, in1, EX2, d4 \
|
||||
VPERM in0, in1, EX0, d0 \
|
||||
VPERM in0, in1, EX1, d2 \
|
||||
VN d1, d4, d4 \
|
||||
VESRLG $26, d0, d1 \
|
||||
VESRLG $30, d2, d3 \
|
||||
VESRLG $4, d2, d2 \
|
||||
VN MOD26, d0, d0 \
|
||||
VN MOD26, d1, d1 \
|
||||
VN MOD26, d2, d2 \
|
||||
VN MOD26, d3, d3
|
||||
|
||||
// pack h4:h0 into h1:h0 (no carry)
|
||||
#define PACK(h0, h1, h2, h3, h4) \
|
||||
VESLG $26, h1, h1 \
|
||||
VESLG $26, h3, h3 \
|
||||
VO h0, h1, h0 \
|
||||
VO h2, h3, h2 \
|
||||
VESLG $4, h2, h2 \
|
||||
VLEIB $7, $48, h1 \
|
||||
VSLB h1, h2, h2 \
|
||||
VO h0, h2, h0 \
|
||||
VLEIB $7, $104, h1 \
|
||||
VSLB h1, h4, h3 \
|
||||
VO h3, h0, h0 \
|
||||
VLEIB $7, $24, h1 \
|
||||
VSRLB h1, h4, h1
|
||||
|
||||
// if h > 2**130-5 then h -= 2**130-5
|
||||
#define MOD(h0, h1, t0, t1, t2) \
|
||||
VZERO t0 \
|
||||
VLEIG $1, $5, t0 \
|
||||
VACCQ h0, t0, t1 \
|
||||
VAQ h0, t0, t0 \
|
||||
VONE t2 \
|
||||
VLEIG $1, $-4, t2 \
|
||||
VAQ t2, t1, t1 \
|
||||
VACCQ h1, t1, t1 \
|
||||
VONE t2 \
|
||||
VAQ t2, t1, t1 \
|
||||
VN h0, t1, t2 \
|
||||
VNC t0, t1, t1 \
|
||||
VO t1, t2, h0
|
||||
|
||||
// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key)
|
||||
TEXT ·poly1305vx(SB), $0-32
|
||||
// This code processes up to 2 blocks (32 bytes) per iteration
|
||||
// using the algorithm described in:
|
||||
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
|
||||
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
|
||||
LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
|
||||
|
||||
// load MOD26, EX0, EX1 and EX2
|
||||
MOVD $·constants<>(SB), R5
|
||||
VLM (R5), MOD26, EX2
|
||||
|
||||
// setup r
|
||||
VL (R4), T_0
|
||||
MOVD $·keyMask<>(SB), R6
|
||||
VL (R6), T_1
|
||||
VN T_0, T_1, T_0
|
||||
EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4)
|
||||
|
||||
// setup r*5
|
||||
VLEIG $0, $5, T_0
|
||||
VLEIG $1, $5, T_0
|
||||
|
||||
// store r (for final block)
|
||||
VMLOF T_0, R_1, R5SAVE_1
|
||||
VMLOF T_0, R_2, R5SAVE_2
|
||||
VMLOF T_0, R_3, R5SAVE_3
|
||||
VMLOF T_0, R_4, R5SAVE_4
|
||||
VLGVG $0, R_0, RSAVE_0
|
||||
VLGVG $0, R_1, RSAVE_1
|
||||
VLGVG $0, R_2, RSAVE_2
|
||||
VLGVG $0, R_3, RSAVE_3
|
||||
VLGVG $0, R_4, RSAVE_4
|
||||
|
||||
// skip r**2 calculation
|
||||
CMPBLE R3, $16, skip
|
||||
|
||||
// calculate r**2
|
||||
MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4)
|
||||
REDUCE(H_0, H_1, H_2, H_3, H_4)
|
||||
VLEIG $0, $5, T_0
|
||||
VLEIG $1, $5, T_0
|
||||
VMLOF T_0, H_1, R5_1
|
||||
VMLOF T_0, H_2, R5_2
|
||||
VMLOF T_0, H_3, R5_3
|
||||
VMLOF T_0, H_4, R5_4
|
||||
VLR H_0, R_0
|
||||
VLR H_1, R_1
|
||||
VLR H_2, R_2
|
||||
VLR H_3, R_3
|
||||
VLR H_4, R_4
|
||||
|
||||
// initialize h
|
||||
VZERO H_0
|
||||
VZERO H_1
|
||||
VZERO H_2
|
||||
VZERO H_3
|
||||
VZERO H_4
|
||||
|
||||
loop:
|
||||
CMPBLE R3, $32, b2
|
||||
VLM (R2), T_0, T_1
|
||||
SUB $32, R3
|
||||
MOVD $32(R2), R2
|
||||
EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
|
||||
VLEIB $4, $1, F_4
|
||||
VLEIB $12, $1, F_4
|
||||
|
||||
multiply:
|
||||
VAG H_0, F_0, F_0
|
||||
VAG H_1, F_1, F_1
|
||||
VAG H_2, F_2, F_2
|
||||
VAG H_3, F_3, F_3
|
||||
VAG H_4, F_4, F_4
|
||||
MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
|
||||
REDUCE(H_0, H_1, H_2, H_3, H_4)
|
||||
CMPBNE R3, $0, loop
|
||||
|
||||
finish:
|
||||
// sum vectors
|
||||
VZERO T_0
|
||||
VSUMQG H_0, T_0, H_0
|
||||
VSUMQG H_1, T_0, H_1
|
||||
VSUMQG H_2, T_0, H_2
|
||||
VSUMQG H_3, T_0, H_3
|
||||
VSUMQG H_4, T_0, H_4
|
||||
|
||||
// h may be >= 2*(2**130-5) so we need to reduce it again
|
||||
REDUCE(H_0, H_1, H_2, H_3, H_4)
|
||||
|
||||
// carry h1->h4
|
||||
VESRLG $26, H_1, T_1
|
||||
VN MOD26, H_1, H_1
|
||||
VAQ T_1, H_2, H_2
|
||||
VESRLG $26, H_2, T_2
|
||||
VN MOD26, H_2, H_2
|
||||
VAQ T_2, H_3, H_3
|
||||
VESRLG $26, H_3, T_3
|
||||
VN MOD26, H_3, H_3
|
||||
VAQ T_3, H_4, H_4
|
||||
|
||||
// h is now < 2*(2**130-5)
|
||||
// pack h into h1 (hi) and h0 (lo)
|
||||
PACK(H_0, H_1, H_2, H_3, H_4)
|
||||
|
||||
// if h > 2**130-5 then h -= 2**130-5
|
||||
MOD(H_0, H_1, T_0, T_1, T_2)
|
||||
|
||||
// h += s
|
||||
MOVD $·bswapMask<>(SB), R5
|
||||
VL (R5), T_1
|
||||
VL 16(R4), T_0
|
||||
VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
|
||||
VAQ T_0, H_0, H_0
|
||||
VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little)
|
||||
VST H_0, (R1)
|
||||
|
||||
RET
|
||||
|
||||
b2:
|
||||
CMPBLE R3, $16, b1
|
||||
|
||||
// 2 blocks remaining
|
||||
SUB $17, R3
|
||||
VL (R2), T_0
|
||||
VLL R3, 16(R2), T_1
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, T_1
|
||||
EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $12, $1, F_4
|
||||
VLEIB $4, $1, F_4
|
||||
|
||||
// setup [r²,r]
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, RSAVE_3, R_3
|
||||
VLVGG $1, RSAVE_4, R_4
|
||||
VPDI $0, R5_1, R5SAVE_1, R5_1
|
||||
VPDI $0, R5_2, R5SAVE_2, R5_2
|
||||
VPDI $0, R5_3, R5SAVE_3, R5_3
|
||||
VPDI $0, R5_4, R5SAVE_4, R5_4
|
||||
|
||||
MOVD $0, R3
|
||||
BR multiply
|
||||
|
||||
skip:
|
||||
VZERO H_0
|
||||
VZERO H_1
|
||||
VZERO H_2
|
||||
VZERO H_3
|
||||
VZERO H_4
|
||||
|
||||
CMPBEQ R3, $0, finish
|
||||
|
||||
b1:
|
||||
// 1 block remaining
|
||||
SUB $1, R3
|
||||
VLL R3, (R2), T_0
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, T_0
|
||||
VZERO T_1
|
||||
EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $4, $1, F_4
|
||||
VLEIG $1, $1, R_0
|
||||
VZERO R_1
|
||||
VZERO R_2
|
||||
VZERO R_3
|
||||
VZERO R_4
|
||||
VZERO R5_1
|
||||
VZERO R5_2
|
||||
VZERO R5_3
|
||||
VZERO R5_4
|
||||
|
||||
// setup [r, 1]
|
||||
VLVGG $0, RSAVE_0, R_0
|
||||
VLVGG $0, RSAVE_1, R_1
|
||||
VLVGG $0, RSAVE_2, R_2
|
||||
VLVGG $0, RSAVE_3, R_3
|
||||
VLVGG $0, RSAVE_4, R_4
|
||||
VPDI $0, R5SAVE_1, R5_1, R5_1
|
||||
VPDI $0, R5SAVE_2, R5_2, R5_2
|
||||
VPDI $0, R5SAVE_3, R5_3, R5_3
|
||||
VPDI $0, R5SAVE_4, R5_4, R5_4
|
||||
|
||||
MOVD $0, R3
|
||||
BR multiply
|
||||
|
||||
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
|
||||
MOVD $x-24(SP), R1
|
||||
XC $24, 0(R1), 0(R1) // clear the storage
|
||||
MOVD $2, R0 // R0 is the number of double words stored -1
|
||||
WORD $0xB2B01000 // STFLE 0(R1)
|
||||
XOR R0, R0 // reset the value of R0
|
||||
MOVBZ z-8(SP), R1
|
||||
AND $0x40, R1
|
||||
BEQ novector
|
||||
|
||||
vectorinstalled:
|
||||
// check if the vector instruction has been enabled
|
||||
VLEIB $0, $0xF, V16
|
||||
VLGVB $0, V16, R1
|
||||
CMPBNE R1, $0xF, novector
|
||||
MOVB $1, ret+0(FP) // have vx
|
||||
RET
|
||||
|
||||
novector:
|
||||
MOVB $0, ret+0(FP) // no vx
|
||||
RET
|
|
@ -0,0 +1,931 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,go1.11,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction.
|
||||
|
||||
// constants
|
||||
#define EX0 V1
|
||||
#define EX1 V2
|
||||
#define EX2 V3
|
||||
|
||||
// temporaries
|
||||
#define T_0 V4
|
||||
#define T_1 V5
|
||||
#define T_2 V6
|
||||
#define T_3 V7
|
||||
#define T_4 V8
|
||||
#define T_5 V9
|
||||
#define T_6 V10
|
||||
#define T_7 V11
|
||||
#define T_8 V12
|
||||
#define T_9 V13
|
||||
#define T_10 V14
|
||||
|
||||
// r**2 & r**4
|
||||
#define R_0 V15
|
||||
#define R_1 V16
|
||||
#define R_2 V17
|
||||
#define R5_1 V18
|
||||
#define R5_2 V19
|
||||
// key (r)
|
||||
#define RSAVE_0 R7
|
||||
#define RSAVE_1 R8
|
||||
#define RSAVE_2 R9
|
||||
#define R5SAVE_1 R10
|
||||
#define R5SAVE_2 R11
|
||||
|
||||
// message block
|
||||
#define M0 V20
|
||||
#define M1 V21
|
||||
#define M2 V22
|
||||
#define M3 V23
|
||||
#define M4 V24
|
||||
#define M5 V25
|
||||
|
||||
// accumulator
|
||||
#define H0_0 V26
|
||||
#define H1_0 V27
|
||||
#define H2_0 V28
|
||||
#define H0_1 V29
|
||||
#define H1_1 V30
|
||||
#define H2_1 V31
|
||||
|
||||
GLOBL ·keyMask<>(SB), RODATA, $16
|
||||
DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
|
||||
DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
|
||||
|
||||
GLOBL ·bswapMask<>(SB), RODATA, $16
|
||||
DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
|
||||
DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
|
||||
|
||||
GLOBL ·constants<>(SB), RODATA, $48
|
||||
// EX0
|
||||
DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f
|
||||
DATA ·constants<>+8(SB)/8, $0x0000050403020100
|
||||
// EX1
|
||||
DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f
|
||||
DATA ·constants<>+24(SB)/8, $0x00000a0908070605
|
||||
// EX2
|
||||
DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f
|
||||
DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b
|
||||
|
||||
GLOBL ·c<>(SB), RODATA, $48
|
||||
// EX0
|
||||
DATA ·c<>+0(SB)/8, $0x0000050403020100
|
||||
DATA ·c<>+8(SB)/8, $0x0000151413121110
|
||||
// EX1
|
||||
DATA ·c<>+16(SB)/8, $0x00000a0908070605
|
||||
DATA ·c<>+24(SB)/8, $0x00001a1918171615
|
||||
// EX2
|
||||
DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b
|
||||
DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b
|
||||
|
||||
GLOBL ·reduce<>(SB), RODATA, $32
|
||||
// 44 bit
|
||||
DATA ·reduce<>+0(SB)/8, $0x0
|
||||
DATA ·reduce<>+8(SB)/8, $0xfffffffffff
|
||||
// 42 bit
|
||||
DATA ·reduce<>+16(SB)/8, $0x0
|
||||
DATA ·reduce<>+24(SB)/8, $0x3ffffffffff
|
||||
|
||||
// h = (f*g) % (2**130-5) [partial reduction]
|
||||
// uses T_0...T_9 temporary registers
|
||||
// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2
|
||||
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
|
||||
// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2
|
||||
#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \
|
||||
\ // Eliminate the dependency for the last 2 VMSLs
|
||||
VMSLG m02_0, r_2, m4_2, m4_2 \
|
||||
VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined
|
||||
VMSLG m02_0, r_0, m4_0, m4_0 \
|
||||
VMSLG m02_1, r5_2, V0, T_0 \
|
||||
VMSLG m02_0, r_1, m4_1, m4_1 \
|
||||
VMSLG m02_1, r_0, V0, T_1 \
|
||||
VMSLG m02_1, r_1, V0, T_2 \
|
||||
VMSLG m02_2, r5_1, V0, T_3 \
|
||||
VMSLG m02_2, r5_2, V0, T_4 \
|
||||
VMSLG m13_0, r_0, m5_0, m5_0 \
|
||||
VMSLG m13_1, r5_2, V0, T_5 \
|
||||
VMSLG m13_0, r_1, m5_1, m5_1 \
|
||||
VMSLG m13_1, r_0, V0, T_6 \
|
||||
VMSLG m13_1, r_1, V0, T_7 \
|
||||
VMSLG m13_2, r5_1, V0, T_8 \
|
||||
VMSLG m13_2, r5_2, V0, T_9 \
|
||||
VMSLG m02_2, r_0, m4_2, m4_2 \
|
||||
VMSLG m13_2, r_0, m5_2, m5_2 \
|
||||
VAQ m4_0, T_0, m02_0 \
|
||||
VAQ m4_1, T_1, m02_1 \
|
||||
VAQ m5_0, T_5, m13_0 \
|
||||
VAQ m5_1, T_6, m13_1 \
|
||||
VAQ m02_0, T_3, m02_0 \
|
||||
VAQ m02_1, T_4, m02_1 \
|
||||
VAQ m13_0, T_8, m13_0 \
|
||||
VAQ m13_1, T_9, m13_1 \
|
||||
VAQ m4_2, T_2, m02_2 \
|
||||
VAQ m5_2, T_7, m13_2 \
|
||||
|
||||
// SQUARE uses three limbs of r and r_2*5 to output square of r
|
||||
// uses T_1, T_5 and T_7 temporary registers
|
||||
// input: r_0, r_1, r_2, r5_2
|
||||
// temp: TEMP0, TEMP1, TEMP2
|
||||
// output: p0, p1, p2
|
||||
#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \
|
||||
VMSLG r_0, r_0, p0, p0 \
|
||||
VMSLG r_1, r5_2, V0, TEMP0 \
|
||||
VMSLG r_2, r5_2, p1, p1 \
|
||||
VMSLG r_0, r_1, V0, TEMP1 \
|
||||
VMSLG r_1, r_1, p2, p2 \
|
||||
VMSLG r_0, r_2, V0, TEMP2 \
|
||||
VAQ TEMP0, p0, p0 \
|
||||
VAQ TEMP1, p1, p1 \
|
||||
VAQ TEMP2, p2, p2 \
|
||||
VAQ TEMP0, p0, p0 \
|
||||
VAQ TEMP1, p1, p1 \
|
||||
VAQ TEMP2, p2, p2 \
|
||||
|
||||
// carry h0->h1->h2->h0 || h3->h4->h5->h3
|
||||
// uses T_2, T_4, T_5, T_7, T_8, T_9
|
||||
// t6, t7, t8, t9, t10, t11
|
||||
// input: h0, h1, h2, h3, h4, h5
|
||||
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11
|
||||
// output: h0, h1, h2, h3, h4, h5
|
||||
#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \
|
||||
VLM (R12), t6, t7 \ // 44 and 42 bit clear mask
|
||||
VLEIB $7, $0x28, t10 \ // 5 byte shift mask
|
||||
VREPIB $4, t8 \ // 4 bit shift mask
|
||||
VREPIB $2, t11 \ // 2 bit shift mask
|
||||
VSRLB t10, h0, t0 \ // h0 byte shift
|
||||
VSRLB t10, h1, t1 \ // h1 byte shift
|
||||
VSRLB t10, h2, t2 \ // h2 byte shift
|
||||
VSRLB t10, h3, t3 \ // h3 byte shift
|
||||
VSRLB t10, h4, t4 \ // h4 byte shift
|
||||
VSRLB t10, h5, t5 \ // h5 byte shift
|
||||
VSRL t8, t0, t0 \ // h0 bit shift
|
||||
VSRL t8, t1, t1 \ // h2 bit shift
|
||||
VSRL t11, t2, t2 \ // h2 bit shift
|
||||
VSRL t8, t3, t3 \ // h3 bit shift
|
||||
VSRL t8, t4, t4 \ // h4 bit shift
|
||||
VESLG $2, t2, t9 \ // h2 carry x5
|
||||
VSRL t11, t5, t5 \ // h5 bit shift
|
||||
VN t6, h0, h0 \ // h0 clear carry
|
||||
VAQ t2, t9, t2 \ // h2 carry x5
|
||||
VESLG $2, t5, t9 \ // h5 carry x5
|
||||
VN t6, h1, h1 \ // h1 clear carry
|
||||
VN t7, h2, h2 \ // h2 clear carry
|
||||
VAQ t5, t9, t5 \ // h5 carry x5
|
||||
VN t6, h3, h3 \ // h3 clear carry
|
||||
VN t6, h4, h4 \ // h4 clear carry
|
||||
VN t7, h5, h5 \ // h5 clear carry
|
||||
VAQ t0, h1, h1 \ // h0->h1
|
||||
VAQ t3, h4, h4 \ // h3->h4
|
||||
VAQ t1, h2, h2 \ // h1->h2
|
||||
VAQ t4, h5, h5 \ // h4->h5
|
||||
VAQ t2, h0, h0 \ // h2->h0
|
||||
VAQ t5, h3, h3 \ // h5->h3
|
||||
VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves
|
||||
VREPG $1, t7, t7 \
|
||||
VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5]
|
||||
VSLDB $8, h1, h1, h1 \
|
||||
VSLDB $8, h2, h2, h2 \
|
||||
VO h0, h3, h3 \
|
||||
VO h1, h4, h4 \
|
||||
VO h2, h5, h5 \
|
||||
VESRLG $44, h3, t0 \ // 44 bit shift right
|
||||
VESRLG $44, h4, t1 \
|
||||
VESRLG $42, h5, t2 \
|
||||
VN t6, h3, h3 \ // clear carry bits
|
||||
VN t6, h4, h4 \
|
||||
VN t7, h5, h5 \
|
||||
VESLG $2, t2, t9 \ // multiply carry by 5
|
||||
VAQ t9, t2, t2 \
|
||||
VAQ t0, h4, h4 \
|
||||
VAQ t1, h5, h5 \
|
||||
VAQ t2, h3, h3 \
|
||||
|
||||
// carry h0->h1->h2->h0
|
||||
// input: h0, h1, h2
|
||||
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8
|
||||
// output: h0, h1, h2
|
||||
#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \
|
||||
VLEIB $7, $0x28, t3 \ // 5 byte shift mask
|
||||
VREPIB $4, t4 \ // 4 bit shift mask
|
||||
VREPIB $2, t7 \ // 2 bit shift mask
|
||||
VGBM $0x003F, t5 \ // mask to clear carry bits
|
||||
VSRLB t3, h0, t0 \
|
||||
VSRLB t3, h1, t1 \
|
||||
VSRLB t3, h2, t2 \
|
||||
VESRLG $4, t5, t5 \ // 44 bit clear mask
|
||||
VSRL t4, t0, t0 \
|
||||
VSRL t4, t1, t1 \
|
||||
VSRL t7, t2, t2 \
|
||||
VESRLG $2, t5, t6 \ // 42 bit clear mask
|
||||
VESLG $2, t2, t8 \
|
||||
VAQ t8, t2, t2 \
|
||||
VN t5, h0, h0 \
|
||||
VN t5, h1, h1 \
|
||||
VN t6, h2, h2 \
|
||||
VAQ t0, h1, h1 \
|
||||
VAQ t1, h2, h2 \
|
||||
VAQ t2, h0, h0 \
|
||||
VSRLB t3, h0, t0 \
|
||||
VSRLB t3, h1, t1 \
|
||||
VSRLB t3, h2, t2 \
|
||||
VSRL t4, t0, t0 \
|
||||
VSRL t4, t1, t1 \
|
||||
VSRL t7, t2, t2 \
|
||||
VN t5, h0, h0 \
|
||||
VN t5, h1, h1 \
|
||||
VESLG $2, t2, t8 \
|
||||
VN t6, h2, h2 \
|
||||
VAQ t0, h1, h1 \
|
||||
VAQ t8, t2, t2 \
|
||||
VAQ t1, h2, h2 \
|
||||
VAQ t2, h0, h0 \
|
||||
|
||||
// expands two message blocks into the lower halfs of the d registers
|
||||
// moves the contents of the d registers into upper halfs
|
||||
// input: in1, in2, d0, d1, d2, d3, d4, d5
|
||||
// temp: TEMP0, TEMP1, TEMP2, TEMP3
|
||||
// output: d0, d1, d2, d3, d4, d5
|
||||
#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \
|
||||
VGBM $0xff3f, TEMP0 \
|
||||
VGBM $0xff1f, TEMP1 \
|
||||
VESLG $4, d1, TEMP2 \
|
||||
VESLG $4, d4, TEMP3 \
|
||||
VESRLG $4, TEMP0, TEMP0 \
|
||||
VPERM in1, d0, EX0, d0 \
|
||||
VPERM in2, d3, EX0, d3 \
|
||||
VPERM in1, d2, EX2, d2 \
|
||||
VPERM in2, d5, EX2, d5 \
|
||||
VPERM in1, TEMP2, EX1, d1 \
|
||||
VPERM in2, TEMP3, EX1, d4 \
|
||||
VN TEMP0, d0, d0 \
|
||||
VN TEMP0, d3, d3 \
|
||||
VESRLG $4, d1, d1 \
|
||||
VESRLG $4, d4, d4 \
|
||||
VN TEMP1, d2, d2 \
|
||||
VN TEMP1, d5, d5 \
|
||||
VN TEMP0, d1, d1 \
|
||||
VN TEMP0, d4, d4 \
|
||||
|
||||
// expands one message block into the lower halfs of the d registers
|
||||
// moves the contents of the d registers into upper halfs
|
||||
// input: in, d0, d1, d2
|
||||
// temp: TEMP0, TEMP1, TEMP2
|
||||
// output: d0, d1, d2
|
||||
#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \
|
||||
VGBM $0xff3f, TEMP0 \
|
||||
VESLG $4, d1, TEMP2 \
|
||||
VGBM $0xff1f, TEMP1 \
|
||||
VPERM in, d0, EX0, d0 \
|
||||
VESRLG $4, TEMP0, TEMP0 \
|
||||
VPERM in, d2, EX2, d2 \
|
||||
VPERM in, TEMP2, EX1, d1 \
|
||||
VN TEMP0, d0, d0 \
|
||||
VN TEMP1, d2, d2 \
|
||||
VESRLG $4, d1, d1 \
|
||||
VN TEMP0, d1, d1 \
|
||||
|
||||
// pack h2:h0 into h1:h0 (no carry)
|
||||
// input: h0, h1, h2
|
||||
// output: h0, h1, h2
|
||||
#define PACK(h0, h1, h2) \
|
||||
VMRLG h1, h2, h2 \ // copy h1 to upper half h2
|
||||
VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20
|
||||
VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1
|
||||
VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1
|
||||
VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1
|
||||
VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1)
|
||||
VLEIG $0, $0, h2 \ // clear upper half of h2
|
||||
VESRLG $40, h2, h1 \ // h1 now has upper two bits of result
|
||||
VLEIB $7, $88, h1 \ // for byte shift (11 bytes)
|
||||
VSLB h1, h2, h2 \ // shift h2 11 bytes to the left
|
||||
VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1
|
||||
VLEIG $0, $0, h1 \ // clear upper half of h1
|
||||
|
||||
// if h > 2**130-5 then h -= 2**130-5
|
||||
// input: h0, h1
|
||||
// temp: t0, t1, t2
|
||||
// output: h0
|
||||
#define MOD(h0, h1, t0, t1, t2) \
|
||||
VZERO t0 \
|
||||
VLEIG $1, $5, t0 \
|
||||
VACCQ h0, t0, t1 \
|
||||
VAQ h0, t0, t0 \
|
||||
VONE t2 \
|
||||
VLEIG $1, $-4, t2 \
|
||||
VAQ t2, t1, t1 \
|
||||
VACCQ h1, t1, t1 \
|
||||
VONE t2 \
|
||||
VAQ t2, t1, t1 \
|
||||
VN h0, t1, t2 \
|
||||
VNC t0, t1, t1 \
|
||||
VO t1, t2, h0 \
|
||||
|
||||
// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key)
|
||||
TEXT ·poly1305vmsl(SB), $0-32
|
||||
// This code processes 6 + up to 4 blocks (32 bytes) per iteration
|
||||
// using the algorithm described in:
|
||||
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
|
||||
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
|
||||
// And as moddified for VMSL as described in
|
||||
// Accelerating Poly1305 Cryptographic Message Authentication on the z14
|
||||
// O'Farrell et al, CASCON 2017, p48-55
|
||||
// https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht
|
||||
|
||||
LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
|
||||
VZERO V0 // c
|
||||
|
||||
// load EX0, EX1 and EX2
|
||||
MOVD $·constants<>(SB), R5
|
||||
VLM (R5), EX0, EX2 // c
|
||||
|
||||
// setup r
|
||||
VL (R4), T_0
|
||||
MOVD $·keyMask<>(SB), R6
|
||||
VL (R6), T_1
|
||||
VN T_0, T_1, T_0
|
||||
VZERO T_2 // limbs for r
|
||||
VZERO T_3
|
||||
VZERO T_4
|
||||
EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7)
|
||||
|
||||
// T_2, T_3, T_4: [0, r]
|
||||
|
||||
// setup r*20
|
||||
VLEIG $0, $0, T_0
|
||||
VLEIG $1, $20, T_0 // T_0: [0, 20]
|
||||
VZERO T_5
|
||||
VZERO T_6
|
||||
VMSLG T_0, T_3, T_5, T_5
|
||||
VMSLG T_0, T_4, T_6, T_6
|
||||
|
||||
// store r for final block in GR
|
||||
VLGVG $1, T_2, RSAVE_0 // c
|
||||
VLGVG $1, T_3, RSAVE_1 // c
|
||||
VLGVG $1, T_4, RSAVE_2 // c
|
||||
VLGVG $1, T_5, R5SAVE_1 // c
|
||||
VLGVG $1, T_6, R5SAVE_2 // c
|
||||
|
||||
// initialize h
|
||||
VZERO H0_0
|
||||
VZERO H1_0
|
||||
VZERO H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
// initialize pointer for reduce constants
|
||||
MOVD $·reduce<>(SB), R12
|
||||
|
||||
// calculate r**2 and 20*(r**2)
|
||||
VZERO R_0
|
||||
VZERO R_1
|
||||
VZERO R_2
|
||||
SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7)
|
||||
REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1)
|
||||
VZERO R5_1
|
||||
VZERO R5_2
|
||||
VMSLG T_0, R_1, R5_1, R5_1
|
||||
VMSLG T_0, R_2, R5_2, R5_2
|
||||
|
||||
// skip r**4 calculation if 3 blocks or less
|
||||
CMPBLE R3, $48, b4
|
||||
|
||||
// calculate r**4 and 20*(r**4)
|
||||
VZERO T_8
|
||||
VZERO T_9
|
||||
VZERO T_10
|
||||
SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7)
|
||||
REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1)
|
||||
VZERO T_2
|
||||
VZERO T_3
|
||||
VMSLG T_0, T_9, T_2, T_2
|
||||
VMSLG T_0, T_10, T_3, T_3
|
||||
|
||||
// put r**2 to the right and r**4 to the left of R_0, R_1, R_2
|
||||
VSLDB $8, T_8, T_8, T_8
|
||||
VSLDB $8, T_9, T_9, T_9
|
||||
VSLDB $8, T_10, T_10, T_10
|
||||
VSLDB $8, T_2, T_2, T_2
|
||||
VSLDB $8, T_3, T_3, T_3
|
||||
|
||||
VO T_8, R_0, R_0
|
||||
VO T_9, R_1, R_1
|
||||
VO T_10, R_2, R_2
|
||||
VO T_2, R5_1, R5_1
|
||||
VO T_3, R5_2, R5_2
|
||||
|
||||
CMPBLE R3, $80, load // less than or equal to 5 blocks in message
|
||||
|
||||
// 6(or 5+1) blocks
|
||||
SUB $81, R3
|
||||
VLM (R2), M0, M4
|
||||
VLL R3, 80(R2), M5
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBGE R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M5
|
||||
MOVD $96(R2), R2
|
||||
EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
|
||||
EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
|
||||
VLEIB $2, $1, H2_0
|
||||
VLEIB $2, $1, H2_1
|
||||
VLEIB $10, $1, H2_0
|
||||
VLEIB $10, $1, H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO T_4
|
||||
VZERO T_10
|
||||
EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3)
|
||||
VLR T_4, M4
|
||||
VLEIB $10, $1, M2
|
||||
CMPBLT R3, $16, 2(PC)
|
||||
VLEIB $10, $1, T_10
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
|
||||
SUB $16, R3
|
||||
CMPBLE R3, $0, square
|
||||
|
||||
load:
|
||||
// load EX0, EX1 and EX2
|
||||
MOVD $·c<>(SB), R5
|
||||
VLM (R5), EX0, EX2
|
||||
|
||||
loop:
|
||||
CMPBLE R3, $64, add // b4 // last 4 or less blocks left
|
||||
|
||||
// next 4 full blocks
|
||||
VLM (R2), M2, M5
|
||||
SUB $64, R3
|
||||
MOVD $64(R2), R2
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
|
||||
// expacc in-lined to create [m2, m3] limbs
|
||||
VGBM $0x3f3f, T_0 // 44 bit clear mask
|
||||
VGBM $0x1f1f, T_1 // 40 bit clear mask
|
||||
VPERM M2, M3, EX0, T_3
|
||||
VESRLG $4, T_0, T_0 // 44 bit clear mask ready
|
||||
VPERM M2, M3, EX1, T_4
|
||||
VPERM M2, M3, EX2, T_5
|
||||
VN T_0, T_3, T_3
|
||||
VESRLG $4, T_4, T_4
|
||||
VN T_1, T_5, T_5
|
||||
VN T_0, T_4, T_4
|
||||
VMRHG H0_1, T_3, H0_0
|
||||
VMRHG H1_1, T_4, H1_0
|
||||
VMRHG H2_1, T_5, H2_0
|
||||
VMRLG H0_1, T_3, H0_1
|
||||
VMRLG H1_1, T_4, H1_1
|
||||
VMRLG H2_1, T_5, H2_1
|
||||
VLEIB $10, $1, H2_0
|
||||
VLEIB $10, $1, H2_1
|
||||
VPERM M4, M5, EX0, T_3
|
||||
VPERM M4, M5, EX1, T_4
|
||||
VPERM M4, M5, EX2, T_5
|
||||
VN T_0, T_3, T_3
|
||||
VESRLG $4, T_4, T_4
|
||||
VN T_1, T_5, T_5
|
||||
VN T_0, T_4, T_4
|
||||
VMRHG V0, T_3, M0
|
||||
VMRHG V0, T_4, M1
|
||||
VMRHG V0, T_5, M2
|
||||
VMRLG V0, T_3, M3
|
||||
VMRLG V0, T_4, M4
|
||||
VMRLG V0, T_5, M5
|
||||
VLEIB $10, $1, M2
|
||||
VLEIB $10, $1, M5
|
||||
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
CMPBNE R3, $0, loop
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
|
||||
// load EX0, EX1, EX2
|
||||
MOVD $·constants<>(SB), R5
|
||||
VLM (R5), EX0, EX2
|
||||
|
||||
// sum vectors
|
||||
VAQ H0_0, H0_1, H0_0
|
||||
VAQ H1_0, H1_1, H1_0
|
||||
VAQ H2_0, H2_1, H2_0
|
||||
|
||||
// h may be >= 2*(2**130-5) so we need to reduce it again
|
||||
// M0...M4 are used as temps here
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
|
||||
|
||||
next: // carry h1->h2
|
||||
VLEIB $7, $0x28, T_1
|
||||
VREPIB $4, T_2
|
||||
VGBM $0x003F, T_3
|
||||
VESRLG $4, T_3
|
||||
|
||||
// byte shift
|
||||
VSRLB T_1, H1_0, T_4
|
||||
|
||||
// bit shift
|
||||
VSRL T_2, T_4, T_4
|
||||
|
||||
// clear h1 carry bits
|
||||
VN T_3, H1_0, H1_0
|
||||
|
||||
// add carry
|
||||
VAQ T_4, H2_0, H2_0
|
||||
|
||||
// h is now < 2*(2**130-5)
|
||||
// pack h into h1 (hi) and h0 (lo)
|
||||
PACK(H0_0, H1_0, H2_0)
|
||||
|
||||
// if h > 2**130-5 then h -= 2**130-5
|
||||
MOD(H0_0, H1_0, T_0, T_1, T_2)
|
||||
|
||||
// h += s
|
||||
MOVD $·bswapMask<>(SB), R5
|
||||
VL (R5), T_1
|
||||
VL 16(R4), T_0
|
||||
VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
|
||||
VAQ T_0, H0_0, H0_0
|
||||
VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little)
|
||||
VST H0_0, (R1)
|
||||
RET
|
||||
|
||||
add:
|
||||
// load EX0, EX1, EX2
|
||||
MOVD $·constants<>(SB), R5
|
||||
VLM (R5), EX0, EX2
|
||||
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
CMPBLE R3, $64, b4
|
||||
|
||||
b4:
|
||||
CMPBLE R3, $48, b3 // 3 blocks or less
|
||||
|
||||
// 4(3+1) blocks remaining
|
||||
SUB $49, R3
|
||||
VLM (R2), M0, M2
|
||||
VLL R3, 48(R2), M3
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M3
|
||||
MOVD $64(R2), R2
|
||||
EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
|
||||
VLEIB $10, $1, H2_0
|
||||
VLEIB $10, $1, H2_1
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
VZERO T_4
|
||||
VZERO T_10
|
||||
EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3)
|
||||
VLR T_4, M2
|
||||
VLEIB $10, $1, M4
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $10, $1, T_10
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
SUB $16, R3
|
||||
CMPBLE R3, $0, square // this condition must always hold true!
|
||||
|
||||
b3:
|
||||
CMPBLE R3, $32, b2
|
||||
|
||||
// 3 blocks remaining
|
||||
|
||||
// setup [r²,r]
|
||||
VSLDB $8, R_0, R_0, R_0
|
||||
VSLDB $8, R_1, R_1, R_1
|
||||
VSLDB $8, R_2, R_2, R_2
|
||||
VSLDB $8, R5_1, R5_1, R5_1
|
||||
VSLDB $8, R5_2, R5_2, R5_2
|
||||
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, R5SAVE_1, R5_1
|
||||
VLVGG $1, R5SAVE_2, R5_2
|
||||
|
||||
// setup [h0, h1]
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
VO H0_1, H0_0, H0_0
|
||||
VO H1_1, H1_0, H1_0
|
||||
VO H2_1, H2_0, H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
|
||||
// H*[r**2, r]
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5)
|
||||
|
||||
SUB $33, R3
|
||||
VLM (R2), M0, M1
|
||||
VLL R3, 32(R2), M2
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M2
|
||||
|
||||
// H += m0
|
||||
VZERO T_1
|
||||
VZERO T_2
|
||||
VZERO T_3
|
||||
EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)
|
||||
VLEIB $10, $1, T_3
|
||||
VAG H0_0, T_1, H0_0
|
||||
VAG H1_0, T_2, H1_0
|
||||
VAG H2_0, T_3, H2_0
|
||||
|
||||
VZERO M0
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
VZERO T_10
|
||||
|
||||
// (H+m0)*r
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9)
|
||||
|
||||
// H += m1
|
||||
VZERO V0
|
||||
VZERO T_1
|
||||
VZERO T_2
|
||||
VZERO T_3
|
||||
EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6)
|
||||
VLEIB $10, $1, T_3
|
||||
VAQ H0_0, T_1, H0_0
|
||||
VAQ H1_0, T_2, H1_0
|
||||
VAQ H2_0, T_3, H2_0
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
|
||||
|
||||
// [H, m2] * [r**2, r]
|
||||
EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3)
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $10, $1, H2_0
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10)
|
||||
SUB $16, R3
|
||||
CMPBLE R3, $0, next // this condition must always hold true!
|
||||
|
||||
b2:
|
||||
CMPBLE R3, $16, b1
|
||||
|
||||
// 2 blocks remaining
|
||||
|
||||
// setup [r²,r]
|
||||
VSLDB $8, R_0, R_0, R_0
|
||||
VSLDB $8, R_1, R_1, R_1
|
||||
VSLDB $8, R_2, R_2, R_2
|
||||
VSLDB $8, R5_1, R5_1, R5_1
|
||||
VSLDB $8, R5_2, R5_2, R5_2
|
||||
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, R5SAVE_1, R5_1
|
||||
VLVGG $1, R5SAVE_2, R5_2
|
||||
|
||||
// setup [h0, h1]
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
VO H0_1, H0_0, H0_0
|
||||
VO H1_1, H1_0, H1_0
|
||||
VO H2_1, H2_0, H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
|
||||
// H*[r**2, r]
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
|
||||
// move h to the left and 0s at the right
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
|
||||
// get message blocks and append 1 to start
|
||||
SUB $17, R3
|
||||
VL (R2), M0
|
||||
VLL R3, 16(R2), M1
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M1
|
||||
VZERO T_6
|
||||
VZERO T_7
|
||||
VZERO T_8
|
||||
EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3)
|
||||
EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3)
|
||||
VLEIB $2, $1, T_8
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $10, $1, T_8
|
||||
|
||||
// add [m0, m1] to h
|
||||
VAG H0_0, T_6, H0_0
|
||||
VAG H1_0, T_7, H1_0
|
||||
VAG H2_0, T_8, H2_0
|
||||
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
VZERO T_10
|
||||
VZERO M0
|
||||
|
||||
// at this point R_0 .. R5_2 look like [r**2, r]
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
|
||||
SUB $16, R3, R3
|
||||
CMPBLE R3, $0, next
|
||||
|
||||
b1:
|
||||
CMPBLE R3, $0, next
|
||||
|
||||
// 1 block remaining
|
||||
|
||||
// setup [r²,r]
|
||||
VSLDB $8, R_0, R_0, R_0
|
||||
VSLDB $8, R_1, R_1, R_1
|
||||
VSLDB $8, R_2, R_2, R_2
|
||||
VSLDB $8, R5_1, R5_1, R5_1
|
||||
VSLDB $8, R5_2, R5_2, R5_2
|
||||
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, R5SAVE_1, R5_1
|
||||
VLVGG $1, R5SAVE_2, R5_2
|
||||
|
||||
// setup [h0, h1]
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
VO H0_1, H0_0, H0_0
|
||||
VO H1_1, H1_0, H1_0
|
||||
VO H2_1, H2_0, H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
|
||||
// H*[r**2, r]
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
|
||||
|
||||
// set up [0, m0] limbs
|
||||
SUB $1, R3
|
||||
VLL R3, (R2), M0
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M0
|
||||
VZERO T_1
|
||||
VZERO T_2
|
||||
VZERO T_3
|
||||
EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m]
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $10, $1, T_3
|
||||
|
||||
// h+m0
|
||||
VAQ H0_0, T_1, H0_0
|
||||
VAQ H1_0, T_2, H1_0
|
||||
VAQ H2_0, T_3, H2_0
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
|
||||
|
||||
BR next
|
||||
|
||||
square:
|
||||
// setup [r²,r]
|
||||
VSLDB $8, R_0, R_0, R_0
|
||||
VSLDB $8, R_1, R_1, R_1
|
||||
VSLDB $8, R_2, R_2, R_2
|
||||
VSLDB $8, R5_1, R5_1, R5_1
|
||||
VSLDB $8, R5_2, R5_2, R5_2
|
||||
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, R5SAVE_1, R5_1
|
||||
VLVGG $1, R5SAVE_2, R5_2
|
||||
|
||||
// setup [h0, h1]
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
VO H0_1, H0_0, H0_0
|
||||
VO H1_1, H1_0, H1_0
|
||||
VO H2_1, H2_0, H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
|
||||
// (h0*r**2) + (h1*r)
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
|
||||
BR next
|
||||
|
||||
TEXT ·hasVMSLFacility(SB), NOSPLIT, $24-1
|
||||
MOVD $x-24(SP), R1
|
||||
XC $24, 0(R1), 0(R1) // clear the storage
|
||||
MOVD $2, R0 // R0 is the number of double words stored -1
|
||||
WORD $0xB2B01000 // STFLE 0(R1)
|
||||
XOR R0, R0 // reset the value of R0
|
||||
MOVBZ z-8(SP), R1
|
||||
AND $0x01, R1
|
||||
BEQ novmsl
|
||||
|
||||
vectorinstalled:
|
||||
// check if the vector instruction has been enabled
|
||||
VLEIB $0, $0xF, V16
|
||||
VLGVB $0, V16, R1
|
||||
CMPBNE R1, $0xF, novmsl
|
||||
MOVB $1, ret+0(FP) // have vx
|
||||
RET
|
||||
|
||||
novmsl:
|
||||
MOVB $0, ret+0(FP) // no vx
|
||||
RET
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue