Merge branch 'master' into fix/Use-filepath-methods

This commit is contained in:
João "Pisco" Fernandes 2025-04-01 17:59:00 +01:00 committed by GitHub
commit 16b365f268
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
186 changed files with 19619 additions and 6026 deletions

View File

@ -22,7 +22,7 @@ RUN .teamcity/install-cloudflare-go.sh
RUN PATH="/tmp/go/bin:$PATH" make cloudflared
# use a distroless base image with glibc
FROM gcr.io/distroless/base-debian11:nonroot
FROM gcr.io/distroless/base-debian12:nonroot
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"

View File

@ -17,7 +17,7 @@ RUN .teamcity/install-cloudflare-go.sh
RUN GOOS=linux GOARCH=amd64 PATH="/tmp/go/bin:$PATH" make cloudflared
# use a distroless base image with glibc
FROM gcr.io/distroless/base-debian11:nonroot
FROM gcr.io/distroless/base-debian12:nonroot
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"

View File

@ -17,7 +17,7 @@ RUN .teamcity/install-cloudflare-go.sh
RUN GOOS=linux GOARCH=arm64 PATH="/tmp/go/bin:$PATH" make cloudflared
# use a distroless base image with glibc
FROM gcr.io/distroless/base-debian11:nonroot-arm64
FROM gcr.io/distroless/base-debian12:nonroot-arm64
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"

View File

@ -24,7 +24,7 @@ else
DEB_PACKAGE_NAME := $(BINARY_NAME)
endif
DATE := $(shell date -u '+%Y-%m-%d-%H%M UTC')
DATE := $(shell date -u -r RELEASE_NOTES '+%Y-%m-%d-%H%M UTC')
VERSION_FLAGS := -X "main.Version=$(VERSION)" -X "main.BuildTime=$(DATE)"
ifdef PACKAGE_MANAGER
VERSION_FLAGS := $(VERSION_FLAGS) -X "github.com/cloudflare/cloudflared/cmd/cloudflared/updater.BuiltForPackageManager=$(PACKAGE_MANAGER)"

View File

@ -40,7 +40,7 @@ User documentation for Cloudflare Tunnel can be found at https://developers.clou
Once installed, you can authenticate `cloudflared` into your Cloudflare account and begin creating Tunnels to serve traffic to your origins.
* Create a Tunnel with [these instructions](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/create-tunnel)
* Create a Tunnel with [these instructions](https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/get-started/)
* Route traffic to that Tunnel:
* Via public [DNS records in Cloudflare](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/routing-to-tunnel/dns)
* Or via a public hostname guided by a [Cloudflare Load Balancer](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/routing-to-tunnel/lb)

View File

@ -1,3 +1,14 @@
2025.2.1
- 2025-02-26 TUN-9016: update base-debian to v12
- 2025-02-25 TUN-8960: Connect to FED API GW based on the OriginCert's endpoint
- 2025-02-25 TUN-9007: modify logic to resolve region when the tunnel token has an endpoint field
- 2025-02-13 SDLC-3762: Remove backstage.io/source-location from catalog-info.yaml
- 2025-02-06 TUN-8914: Create a flags module to group all cloudflared cli flags
2025.2.0
- 2025-02-03 TUN-8914: Add a new configuration to locally override the max-active-flows
- 2025-02-03 Bump x/crypto to 0.31.0
2025.1.1
- 2025-01-30 TUN-8858: update go to 1.22.10 and include quic-go FIPS changes
- 2025-01-30 TUN-8855: fix lint issues

View File

@ -4,7 +4,6 @@ metadata:
name: cloudflared
description: Client for Cloudflare Tunnels
annotations:
backstage.io/source-location: url:https://bitbucket.cfdata.org/projects/TUN/repos/cloudflared/browse
cloudflare.com/software-excellence-opt-in: "true"
cloudflare.com/jira-project-key: "TUN"
cloudflare.com/jira-project-component: "Cloudflare Tunnel"

View File

@ -16,7 +16,7 @@ bullseye: &bullseye
- golangci-lint
pre-cache: &build_pre_cache
- export GOCACHE=/cfsetup_build/.cache/go-build
- go install golang.org/x/tools/cmd/goimports@latest
- go install golang.org/x/tools/cmd/goimports@v0.30.0
post-cache:
# Linting
- make lint

View File

@ -104,7 +104,7 @@ func ssh(c *cli.Context) error {
case 3:
options.OriginURL = fmt.Sprintf("https://%s:%s", parts[2], parts[1])
options.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
InsecureSkipVerify: true, // #nosec G402
ServerName: parts[0],
}
log.Warn().Msgf("Using insecure SSL connection because SNI overridden to %s", parts[0])
@ -141,6 +141,5 @@ func ssh(c *cli.Context) error {
logger := log.With().Str("host", url.Host).Logger()
s = stream.NewDebugStream(s, &logger, maxMessages)
}
carrier.StartClient(wsConn, s, options)
return nil
return carrier.StartClient(wsConn, s, options)
}

View File

@ -19,6 +19,7 @@ import (
"github.com/cloudflare/cloudflared/carrier"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/logger"
"github.com/cloudflare/cloudflared/sshgen"
"github.com/cloudflare/cloudflared/token"
@ -172,15 +173,15 @@ func Commands() []*cli.Command {
EnvVars: []string{"TUNNEL_SERVICE_TOKEN_SECRET"},
},
&cli.StringFlag{
Name: logger.LogFileFlag,
Name: cfdflags.LogFile,
Usage: "Save application log to this file for reporting issues.",
},
&cli.StringFlag{
Name: logger.LogSSHDirectoryFlag,
Name: cfdflags.LogDirectory,
Usage: "Save application log to this directory for reporting issues.",
},
&cli.StringFlag{
Name: logger.LogSSHLevelFlag,
Name: cfdflags.LogLevelSSH,
Aliases: []string{"loglevel"}, //added to match the tunnel side
Usage: "Application logging level {debug, info, warn, error, fatal}. ",
},
@ -342,7 +343,7 @@ func run(cmd string, args ...string) error {
return err
}
go func() {
io.Copy(os.Stderr, stderr)
_, _ = io.Copy(os.Stderr, stderr)
}()
stdout, err := c.StdoutPipe()
@ -350,7 +351,7 @@ func run(cmd string, args ...string) error {
return err
}
go func() {
io.Copy(os.Stdout, stdout)
_, _ = io.Copy(os.Stdout, stdout)
}()
return c.Run()
}
@ -531,7 +532,7 @@ func isFileThere(candidate string) bool {
}
// verifyTokenAtEdge checks for a token on disk, or generates a new one.
// Then makes a request to to the origin with the token to ensure it is valid.
// Then makes a request to the origin with the token to ensure it is valid.
// Returns nil if token is valid.
func verifyTokenAtEdge(appUrl *url.URL, appInfo *token.AppInfo, c *cli.Context, log *zerolog.Logger) error {
headers := parseRequestHeaders(c.StringSlice(sshHeaderFlag))

View File

@ -4,7 +4,7 @@ import (
"github.com/urfave/cli/v2"
"github.com/urfave/cli/v2/altsrc"
"github.com/cloudflare/cloudflared/logger"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
)
var (
@ -15,14 +15,14 @@ var (
func ConfigureLoggingFlags(shouldHide bool) []cli.Flag {
return []cli.Flag{
altsrc.NewStringFlag(&cli.StringFlag{
Name: logger.LogLevelFlag,
Name: cfdflags.LogLevel,
Value: "info",
Usage: "Application logging level {debug, info, warn, error, fatal}. " + debugLevelWarning,
EnvVars: []string{"TUNNEL_LOGLEVEL"},
Hidden: shouldHide,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: logger.LogTransportLevelFlag,
Name: cfdflags.TransportLogLevel,
Aliases: []string{"proto-loglevel"}, // This flag used to be called proto-loglevel
Value: "info",
Usage: "Transport logging level(previously called protocol logging level) {debug, info, warn, error, fatal}",
@ -30,19 +30,19 @@ func ConfigureLoggingFlags(shouldHide bool) []cli.Flag {
Hidden: shouldHide,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: logger.LogFileFlag,
Name: cfdflags.LogFile,
Usage: "Save application log to this file for reporting issues.",
EnvVars: []string{"TUNNEL_LOGFILE"},
Hidden: shouldHide,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: logger.LogDirectoryFlag,
Name: cfdflags.LogDirectory,
Usage: "Save application log to this directory for reporting issues.",
EnvVars: []string{"TUNNEL_LOGDIRECTORY"},
Hidden: shouldHide,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "trace-output",
Name: cfdflags.TraceOutput,
Usage: "Name of trace output file, generated when cloudflared stops.",
EnvVars: []string{"TUNNEL_TRACE_OUTPUT"},
Hidden: shouldHide,

View File

@ -0,0 +1,155 @@
package flags
const (
// HaConnections specifies how many connections to make to the edge
HaConnections = "ha-connections"
// SshPort is the port on localhost the cloudflared ssh server will run on
SshPort = "local-ssh-port"
// SshIdleTimeout defines the duration a SSH session can remain idle before being closed
SshIdleTimeout = "ssh-idle-timeout"
// SshMaxTimeout defines the max duration a SSH session can remain open for
SshMaxTimeout = "ssh-max-timeout"
// SshLogUploaderBucketName is the bucket name to use for the SSH log uploader
SshLogUploaderBucketName = "bucket-name"
// SshLogUploaderRegionName is the AWS region name to use for the SSH log uploader
SshLogUploaderRegionName = "region-name"
// SshLogUploaderSecretID is the Secret id of SSH log uploader
SshLogUploaderSecretID = "secret-id"
// SshLogUploaderAccessKeyID is the Access key id of SSH log uploader
SshLogUploaderAccessKeyID = "access-key-id"
// SshLogUploaderSessionTokenID is the Session token of SSH log uploader
SshLogUploaderSessionTokenID = "session-token"
// SshLogUploaderS3URL is the S3 URL of SSH log uploader (e.g. don't use AWS s3 and use google storage bucket instead)
SshLogUploaderS3URL = "s3-url-host"
// HostKeyPath is the path of the dir to save SSH host keys too
HostKeyPath = "host-key-path"
// RpcTimeout is how long to wait for a Capnp RPC request to the edge
RpcTimeout = "rpc-timeout"
// WriteStreamTimeout sets if we should have a timeout when writing data to a stream towards the destination (edge/origin).
WriteStreamTimeout = "write-stream-timeout"
// QuicDisablePathMTUDiscovery sets if QUIC should not perform PTMU discovery and use a smaller (safe) packet size.
// Packets will then be at most 1252 (IPv4) / 1232 (IPv6) bytes in size.
// Note that this may result in packet drops for UDP proxying, since we expect being able to send at least 1280 bytes of inner packets.
QuicDisablePathMTUDiscovery = "quic-disable-pmtu-discovery"
// QuicConnLevelFlowControlLimit controls the max flow control limit allocated for a QUIC connection. This controls how much data is the
// receiver willing to buffer. Once the limit is reached, the sender will send a DATA_BLOCKED frame to indicate it has more data to write,
// but it's blocked by flow control
QuicConnLevelFlowControlLimit = "quic-connection-level-flow-control-limit"
// QuicStreamLevelFlowControlLimit is similar to quicConnLevelFlowControlLimit but for each QUIC stream. When the sender is blocked,
// it will send a STREAM_DATA_BLOCKED frame
QuicStreamLevelFlowControlLimit = "quic-stream-level-flow-control-limit"
// Ui is to enable launching cloudflared in interactive UI mode
Ui = "ui"
// ConnectorLabel is the command line flag to give a meaningful label to a specific connector
ConnectorLabel = "label"
// MaxActiveFlows is the command line flag to set the maximum number of flows that cloudflared can be processing at the same time
MaxActiveFlows = "max-active-flows"
// Tag is the command line flag to set custom tags used to identify this tunnel via added HTTP request headers to the origin
Tag = "tag"
// Protocol is the command line flag to set the protocol to use to connect to the Cloudflare Edge
Protocol = "protocol"
// PostQuantum is the command line flag to force the connection to Cloudflare Edge to use Post Quantum cryptography
PostQuantum = "post-quantum"
// Features is the command line flag to opt into various features that are still being developed or tested
Features = "features"
// EdgeIpVersion is the command line flag to set the Cloudflare Edge IP address version to connect with
EdgeIpVersion = "edge-ip-version"
// EdgeBindAddress is the command line flag to bind to IP address for outgoing connections to Cloudflare Edge
EdgeBindAddress = "edge-bind-address"
// Force is the command line flag to specify if you wish to force an action
Force = "force"
// Edge is the command line flag to set the address of the Cloudflare tunnel server. Only works in Cloudflare's internal testing environment
Edge = "edge"
// Region is the command line flag to set the Cloudflare Edge region to connect to
Region = "region"
// IsAutoUpdated is the command line flag to signal the new process that cloudflared has been autoupdated
IsAutoUpdated = "is-autoupdated"
// LBPool is the command line flag to set the name of the load balancing pool to add this origin to
LBPool = "lb-pool"
// Retries is the command line flag to set the maximum number of retries for connection/protocol errors
Retries = "retries"
// MaxEdgeAddrRetries is the command line flag to set the maximum number of times to retry on edge addrs before falling back to a lower protocol
MaxEdgeAddrRetries = "max-edge-addr-retries"
// GracePeriod is the command line flag to set the maximum amount of time that cloudflared waits to shut down if it is still serving requests
GracePeriod = "grace-period"
// ICMPV4Src is the command line flag to set the source address and the interface name to send/receive ICMPv4 messages
ICMPV4Src = "icmpv4-src"
// ICMPV6Src is the command line flag to set the source address and the interface name to send/receive ICMPv6 messages
ICMPV6Src = "icmpv6-src"
// ProxyDns is the command line flag to run DNS server over HTTPS
ProxyDns = "proxy-dns"
// Name is the command line to set the name of the tunnel
Name = "name"
// AutoUpdateFreq is the command line for setting the frequency that cloudflared checks for updates
AutoUpdateFreq = "autoupdate-freq"
// NoAutoUpdate is the command line flag to disable cloudflared from checking for updates
NoAutoUpdate = "no-autoupdate"
// LogLevel is the command line flag for the cloudflared logging level
LogLevel = "loglevel"
// LogLevelSSH is the command line flag for the cloudflared ssh logging level
LogLevelSSH = "log-level"
// TransportLogLevel is the command line flag for the transport logging level
TransportLogLevel = "transport-loglevel"
// LogFile is the command line flag to define the file where application logs will be stored
LogFile = "logfile"
// LogDirectory is the command line flag to define the directory where application logs will be stored.
LogDirectory = "log-directory"
// TraceOutput is the command line flag to set the name of trace output file
TraceOutput = "trace-output"
// OriginCert is the command line flag to define the path for the origin certificate used by cloudflared
OriginCert = "origincert"
// Metrics is the command line flag to define the address of the metrics server
Metrics = "metrics"
// MetricsUpdateFreq is the command line flag to define how frequently tunnel metrics are updated
MetricsUpdateFreq = "metrics-update-freq"
// ApiURL is the command line flag used to define the base URL of the API
ApiURL = "api-url"
)

View File

@ -3,11 +3,36 @@
package main
import (
"fmt"
"os"
cli "github.com/urfave/cli/v2"
)
func runApp(app *cli.App, graceShutdownC chan struct{}) {
app.Commands = append(app.Commands, &cli.Command{
Name: "service",
Usage: "Manages the cloudflared system service (not supported on this operating system)",
Subcommands: []*cli.Command{
{
Name: "install",
Usage: "Install cloudflared as a system service (not supported on this operating system)",
Action: cliutil.ConfiguredAction(installGenericService),
},
{
Name: "uninstall",
Usage: "Uninstall the cloudflared service (not supported on this operating system)",
Action: cliutil.ConfiguredAction(uninstallGenericService),
},
},
})
app.Run(os.Args)
}
func installGenericService(c *cli.Context) error {
return fmt.Errorf("service installation is not supported on this operating system")
}
func uninstallGenericService(c *cli.Context) error {
return fmt.Errorf("service uninstallation is not supported on this operating system")
}

View File

@ -120,7 +120,7 @@ func installLaunchd(c *cli.Context) error {
log.Info().Msg("Installing cloudflared client as an user launch agent. " +
"Note that cloudflared client will only run when the user is logged in. " +
"If you want to run cloudflared client at boot, install with root permission. " +
"For more information, visit https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/run-as-service")
"For more information, visit https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/configure-tunnels/local-management/as-a-service/macos/")
}
etPath, err := os.Executable()
if err != nil {

View File

@ -12,6 +12,7 @@ import (
"github.com/cloudflare/cloudflared/cmd/cloudflared/access"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/cmd/cloudflared/proxydns"
"github.com/cloudflare/cloudflared/cmd/cloudflared/tail"
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
@ -105,7 +106,7 @@ func commands(version func(c *cli.Context)) []*cli.Command {
Usage: "specify if you wish to update to the latest beta version",
},
&cli.BoolFlag{
Name: "force",
Name: cfdflags.Force,
Usage: "specify if you wish to force an upgrade to the latest version regardless of the current version",
Hidden: true,
},

View File

@ -18,14 +18,12 @@ import (
"nhooyr.io/websocket"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/credentials"
"github.com/cloudflare/cloudflared/logger"
"github.com/cloudflare/cloudflared/management"
)
var (
buildInfo *cliutil.BuildInfo
)
var buildInfo *cliutil.BuildInfo
func Init(bi *cliutil.BuildInfo) {
buildInfo = bi
@ -56,7 +54,7 @@ func managementTokenCommand(c *cli.Context) error {
if err != nil {
return err
}
var tokenResponse = struct {
tokenResponse := struct {
Token string `json:"token"`
}{Token: token}
@ -119,13 +117,13 @@ func buildTailCommand(subcommands []*cli.Command) *cli.Command {
Value: "",
},
&cli.StringFlag{
Name: logger.LogLevelFlag,
Name: cfdflags.LogLevel,
Value: "info",
Usage: "Application logging level {debug, info, warn, error, fatal}",
EnvVars: []string{"TUNNEL_LOGLEVEL"},
},
&cli.StringFlag{
Name: credentials.OriginCertFlag,
Name: cfdflags.OriginCert,
Usage: "Path to the certificate generated for your origin when you run cloudflared login.",
EnvVars: []string{"TUNNEL_ORIGIN_CERT"},
Value: credentials.FindDefaultOriginCertPath(),
@ -169,7 +167,7 @@ func handleValidationError(resp *http.Response, log *zerolog.Logger) {
// logger will be created to emit only against the os.Stderr as to not obstruct with normal output from
// management requests
func createLogger(c *cli.Context) *zerolog.Logger {
level, levelErr := zerolog.ParseLevel(c.String(logger.LogLevelFlag))
level, levelErr := zerolog.ParseLevel(c.String(cfdflags.LogLevel))
if levelErr != nil {
level = zerolog.InfoLevel
}
@ -183,9 +181,10 @@ func createLogger(c *cli.Context) *zerolog.Logger {
// parseFilters will attempt to parse provided filters to send to with the EventStartStreaming
func parseFilters(c *cli.Context) (*management.StreamingFilters, error) {
var level *management.LogLevel
var events []management.LogEventType
var sample float64
events := make([]management.LogEventType, 0)
argLevel := c.String("level")
argEvents := c.StringSlice("event")
argSample := c.Float64("sample")
@ -225,12 +224,12 @@ func parseFilters(c *cli.Context) (*management.StreamingFilters, error) {
// getManagementToken will make a call to the Cloudflare API to acquire a management token for the requested tunnel.
func getManagementToken(c *cli.Context, log *zerolog.Logger) (string, error) {
userCreds, err := credentials.Read(c.String(credentials.OriginCertFlag), log)
userCreds, err := credentials.Read(c.String(cfdflags.OriginCert), log)
if err != nil {
return "", err
}
client, err := userCreds.Client(c.String("api-url"), buildInfo.UserAgent(), log)
client, err := userCreds.Client(c.String(cfdflags.ApiURL), buildInfo.UserAgent(), log)
if err != nil {
return "", err
}
@ -331,6 +330,7 @@ func Run(c *cli.Context) error {
header["cf-trace-id"] = []string{trace}
}
ctx := c.Context
// nolint: bodyclose
conn, resp, err := websocket.Dial(ctx, u.String(), &websocket.DialOptions{
HTTPHeader: header,
})

View File

@ -16,7 +16,7 @@ import (
"github.com/facebookgo/grace/gracenet"
"github.com/getsentry/sentry-go"
"github.com/google/uuid"
homedir "github.com/mitchellh/go-homedir"
"github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/urfave/cli/v2"
@ -24,6 +24,7 @@ import (
"github.com/cloudflare/cloudflared/cfapi"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/cmd/cloudflared/proxydns"
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
"github.com/cloudflare/cloudflared/config"
@ -47,61 +48,6 @@ import (
const (
sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b:3e8827f6f9f740738eb11138f7bebb68@sentry.io/189878"
// ha-Connections specifies how many connections to make to the edge
haConnectionsFlag = "ha-connections"
// sshPortFlag is the port on localhost the cloudflared ssh server will run on
sshPortFlag = "local-ssh-port"
// sshIdleTimeoutFlag defines the duration a SSH session can remain idle before being closed
sshIdleTimeoutFlag = "ssh-idle-timeout"
// sshMaxTimeoutFlag defines the max duration a SSH session can remain open for
sshMaxTimeoutFlag = "ssh-max-timeout"
// bucketNameFlag is the bucket name to use for the SSH log uploader
bucketNameFlag = "bucket-name"
// regionNameFlag is the AWS region name to use for the SSH log uploader
regionNameFlag = "region-name"
// secretIDFlag is the Secret id of SSH log uploader
secretIDFlag = "secret-id"
// accessKeyIDFlag is the Access key id of SSH log uploader
accessKeyIDFlag = "access-key-id"
// sessionTokenIDFlag is the Session token of SSH log uploader
sessionTokenIDFlag = "session-token"
// s3URLFlag is the S3 URL of SSH log uploader (e.g. don't use AWS s3 and use google storage bucket instead)
s3URLFlag = "s3-url-host"
// hostKeyPath is the path of the dir to save SSH host keys too
hostKeyPath = "host-key-path"
// rpcTimeout is how long to wait for a Capnp RPC request to the edge
rpcTimeout = "rpc-timeout"
// writeStreamTimeout sets if we should have a timeout when writing data to a stream towards the destination (edge/origin).
writeStreamTimeout = "write-stream-timeout"
// quicDisablePathMTUDiscovery sets if QUIC should not perform PTMU discovery and use a smaller (safe) packet size.
// Packets will then be at most 1252 (IPv4) / 1232 (IPv6) bytes in size.
// Note that this may result in packet drops for UDP proxying, since we expect being able to send at least 1280 bytes of inner packets.
quicDisablePathMTUDiscovery = "quic-disable-pmtu-discovery"
// quicConnLevelFlowControlLimit controls the max flow control limit allocated for a QUIC connection. This controls how much data is the
// receiver willing to buffer. Once the limit is reached, the sender will send a DATA_BLOCKED frame to indicate it has more data to write,
// but it's blocked by flow control
quicConnLevelFlowControlLimit = "quic-connection-level-flow-control-limit"
// quicStreamLevelFlowControlLimit is similar to quicConnLevelFlowControlLimit but for each QUIC stream. When the sender is blocked,
// it will send a STREAM_DATA_BLOCKED frame
quicStreamLevelFlowControlLimit = "quic-stream-level-flow-control-limit"
// uiFlag is to enable launching cloudflared in interactive UI mode
uiFlag = "ui"
LogFieldCommand = "command"
LogFieldExpandedPath = "expandedPath"
LogFieldPIDPathname = "pidPathname"
@ -116,7 +62,6 @@ Eg. cloudflared tunnel --url localhost:8080/.
Please note that Quick Tunnels are meant to be ephemeral and should only be used for testing purposes.
For production usage, we recommend creating Named Tunnels. (https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/)
`
connectorLabelFlag = "label"
)
var (
@ -126,14 +71,14 @@ var (
routeFailMsg = fmt.Sprintf("failed to provision routing, please create it manually via Cloudflare dashboard or UI; "+
"most likely you already have a conflicting record there. You can also rerun this command with --%s to overwrite "+
"any existing DNS records for this hostname.", overwriteDNSFlag)
errDeprecatedClassicTunnel = fmt.Errorf("Classic tunnels have been deprecated, please use Named Tunnels. (https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/)")
errDeprecatedClassicTunnel = errors.New("Classic tunnels have been deprecated, please use Named Tunnels. (https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/)")
// TODO: TUN-8756 the list below denotes the flags that do not possess any kind of sensitive information
// however this approach is not maintainble in the long-term.
nonSecretFlagsList = []string{
"config",
"autoupdate-freq",
"no-autoupdate",
"metrics",
cfdflags.AutoUpdateFreq,
cfdflags.NoAutoUpdate,
cfdflags.Metrics,
"pidfile",
"url",
"hello-world",
@ -166,54 +111,55 @@ var (
"bastion",
"proxy-address",
"proxy-port",
"loglevel",
"transport-loglevel",
"logfile",
"log-directory",
"trace-output",
"proxy-dns",
cfdflags.LogLevel,
cfdflags.TransportLogLevel,
cfdflags.LogFile,
cfdflags.LogDirectory,
cfdflags.TraceOutput,
cfdflags.ProxyDns,
"proxy-dns-port",
"proxy-dns-address",
"proxy-dns-upstream",
"proxy-dns-max-upstream-conns",
"proxy-dns-bootstrap",
"is-autoupdated",
"edge",
"region",
"edge-ip-version",
"edge-bind-address",
cfdflags.IsAutoUpdated,
cfdflags.Edge,
cfdflags.Region,
cfdflags.EdgeIpVersion,
cfdflags.EdgeBindAddress,
"cacert",
"hostname",
"id",
"lb-pool",
"api-url",
"metrics-update-freq",
"tag",
cfdflags.LBPool,
cfdflags.ApiURL,
cfdflags.MetricsUpdateFreq,
cfdflags.Tag,
"heartbeat-interval",
"heartbeat-count",
"max-edge-addr-retries",
"retries",
cfdflags.MaxEdgeAddrRetries,
cfdflags.Retries,
"ha-connections",
"rpc-timeout",
"write-stream-timeout",
"quic-disable-pmtu-discovery",
"quic-connection-level-flow-control-limit",
"quic-stream-level-flow-control-limit",
"label",
"grace-period",
cfdflags.ConnectorLabel,
cfdflags.GracePeriod,
"compression-quality",
"use-reconnect-token",
"dial-edge-timeout",
"stdin-control",
"name",
"ui",
cfdflags.Name,
cfdflags.Ui,
"quick-service",
"max-fetch-size",
"post-quantum",
cfdflags.PostQuantum,
"management-diagnostics",
"protocol",
cfdflags.Protocol,
"overwrite-dns",
"help",
cfdflags.MaxActiveFlows,
}
)
@ -262,7 +208,7 @@ then protect with Cloudflare Access).
B) Locally reachable TCP/UDP-based private services to Cloudflare connected private users in the same account, e.g.,
those enrolled to a Zero Trust WARP Client.
You can manage your Tunnels via dash.teams.cloudflare.com. This approach will only require you to run a single command
You can manage your Tunnels via one.dash.cloudflare.com. This approach will only require you to run a single command
later in each machine where you wish to run a Tunnel.
Alternatively, you can manage your Tunnels via the command line. Begin by obtaining a certificate to be able to do so:
@ -298,7 +244,7 @@ func TunnelCommand(c *cli.Context) error {
// --name required
// --url or --hello-world required
// --hostname optional
if name := c.String("name"); name != "" {
if name := c.String(cfdflags.Name); name != "" {
hostname, err := validation.ValidateHostname(c.String("hostname"))
if err != nil {
return errors.Wrap(err, "Invalid hostname provided")
@ -315,7 +261,7 @@ func TunnelCommand(c *cli.Context) error {
// A unauthenticated named tunnel hosted on <random>.<quick-tunnels-service>.com
// We don't support running proxy-dns and a quick tunnel at the same time as the same process
shouldRunQuickTunnel := c.IsSet("url") || c.IsSet(ingress.HelloWorldFlag)
if !c.IsSet("proxy-dns") && c.String("quick-service") != "" && shouldRunQuickTunnel {
if !c.IsSet(cfdflags.ProxyDns) && c.String("quick-service") != "" && shouldRunQuickTunnel {
return RunQuickTunnel(sc)
}
@ -329,7 +275,7 @@ func TunnelCommand(c *cli.Context) error {
return errDeprecatedClassicTunnel
}
if c.IsSet("proxy-dns") {
if c.IsSet(cfdflags.ProxyDns) {
if shouldRunQuickTunnel {
return fmt.Errorf("running a quick tunnel with `proxy-dns` is not supported")
}
@ -376,7 +322,7 @@ func runAdhocNamedTunnel(sc *subcommandContext, name, credentialsOutputPath stri
func routeFromFlag(c *cli.Context) (route cfapi.HostnameRoute, ok bool) {
if hostname := c.String("hostname"); hostname != "" {
if lbPool := c.String("lb-pool"); lbPool != "" {
if lbPool := c.String(cfdflags.LBPool); lbPool != "" {
return cfapi.NewLBRoute(hostname, lbPool), true
}
return cfapi.NewDNSRoute(hostname, c.Bool(overwriteDNSFlagName)), true
@ -406,7 +352,7 @@ func StartServer(
log.Info().Msg(config.ErrNoConfigFile.Error())
}
if c.IsSet("trace-output") {
if c.IsSet(cfdflags.TraceOutput) {
tmpTraceFile, err := os.CreateTemp("", "trace")
if err != nil {
log.Err(err).Msg("Failed to create new temporary file to save trace output")
@ -418,7 +364,7 @@ func StartServer(
if err := tmpTraceFile.Close(); err != nil {
traceLog.Err(err).Msg("Failed to close temporary trace output file")
}
traceOutputFilepath := c.String("trace-output")
traceOutputFilepath := c.String(cfdflags.TraceOutput)
if err := os.Rename(tmpTraceFile.Name(), traceOutputFilepath); err != nil {
traceLog.
Err(err).
@ -448,7 +394,7 @@ func StartServer(
go waitForSignal(graceShutdownC, log)
if c.IsSet("proxy-dns") {
if c.IsSet(cfdflags.ProxyDns) {
dnsReadySignal := make(chan struct{})
wg.Add(1)
go func() {
@ -470,7 +416,7 @@ func StartServer(
go func() {
defer wg.Done()
autoupdater := updater.NewAutoUpdater(
c.Bool("no-autoupdate"), c.Duration("autoupdate-freq"), &listeners, log,
c.Bool(cfdflags.NoAutoUpdate), c.Duration(cfdflags.AutoUpdateFreq), &listeners, log,
)
errC <- autoupdater.Run(ctx)
}()
@ -526,7 +472,7 @@ func StartServer(
c.Bool("management-diagnostics"),
serviceIP,
clientID,
c.String(connectorLabelFlag),
c.String(cfdflags.ConnectorLabel),
logger.ManagementLogger.Log,
logger.ManagementLogger,
)
@ -578,7 +524,7 @@ func StartServer(
errC <- metrics.ServeMetrics(metricsListener, ctx, metricsConfig, log)
}()
reconnectCh := make(chan supervisor.ReconnectSignal, c.Int(haConnectionsFlag))
reconnectCh := make(chan supervisor.ReconnectSignal, c.Int(cfdflags.HaConnections))
if c.IsSet("stdin-control") {
log.Info().Msg("Enabling control through stdin")
go stdinControl(reconnectCh, log)
@ -698,31 +644,31 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
flags = append(flags, []cli.Flag{
credentialsFileFlag,
altsrc.NewBoolFlag(&cli.BoolFlag{
Name: "is-autoupdated",
Name: cfdflags.IsAutoUpdated,
Usage: "Signal the new process that Cloudflare Tunnel connector has been autoupdated",
Value: false,
Hidden: true,
}),
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
Name: "edge",
Name: cfdflags.Edge,
Usage: "Address of the Cloudflare tunnel server. Only works in Cloudflare's internal testing environment.",
EnvVars: []string{"TUNNEL_EDGE"},
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "region",
Name: cfdflags.Region,
Usage: "Cloudflare Edge region to connect to. Omit or set to empty to connect to the global region.",
EnvVars: []string{"TUNNEL_REGION"},
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "edge-ip-version",
Name: cfdflags.EdgeIpVersion,
Usage: "Cloudflare Edge IP address version to connect with. {4, 6, auto}",
EnvVars: []string{"TUNNEL_EDGE_IP_VERSION"},
Value: "4",
Hidden: false,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "edge-bind-address",
Name: cfdflags.EdgeBindAddress,
Usage: "Bind to IP address for outgoing connections to Cloudflare Edge.",
EnvVars: []string{"TUNNEL_EDGE_BIND_ADDRESS"},
Hidden: false,
@ -746,7 +692,7 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "lb-pool",
Name: cfdflags.LBPool,
Usage: "The name of a (new/existing) load balancing pool to add this origin to.",
EnvVars: []string{"TUNNEL_LB_POOL"},
Hidden: shouldHide,
@ -770,21 +716,21 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "api-url",
Name: cfdflags.ApiURL,
Usage: "Base URL for Cloudflare API v4",
EnvVars: []string{"TUNNEL_API_URL"},
Value: "https://api.cloudflare.com/client/v4",
Hidden: true,
}),
altsrc.NewDurationFlag(&cli.DurationFlag{
Name: "metrics-update-freq",
Name: cfdflags.MetricsUpdateFreq,
Usage: "Frequency to update tunnel metrics",
Value: time.Second * 5,
EnvVars: []string{"TUNNEL_METRICS_UPDATE_FREQ"},
Hidden: shouldHide,
}),
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
Name: "tag",
Name: cfdflags.Tag,
Usage: "Custom tags used to identify this tunnel via added HTTP request headers to the origin, in format `KEY=VALUE`. Multiple tags may be specified.",
EnvVars: []string{"TUNNEL_TAG"},
Hidden: true,
@ -803,64 +749,64 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
Hidden: true,
}),
altsrc.NewIntFlag(&cli.IntFlag{
Name: "max-edge-addr-retries",
Name: cfdflags.MaxEdgeAddrRetries,
Usage: "Maximum number of times to retry on edge addrs before falling back to a lower protocol",
Value: 8,
Hidden: true,
}),
// Note TUN-3758 , we use Int because UInt is not supported with altsrc
altsrc.NewIntFlag(&cli.IntFlag{
Name: "retries",
Name: cfdflags.Retries,
Value: 5,
Usage: "Maximum number of retries for connection/protocol errors.",
EnvVars: []string{"TUNNEL_RETRIES"},
Hidden: shouldHide,
}),
altsrc.NewIntFlag(&cli.IntFlag{
Name: haConnectionsFlag,
Name: cfdflags.HaConnections,
Value: 4,
Hidden: true,
}),
altsrc.NewDurationFlag(&cli.DurationFlag{
Name: rpcTimeout,
Name: cfdflags.RpcTimeout,
Value: 5 * time.Second,
Hidden: true,
}),
altsrc.NewDurationFlag(&cli.DurationFlag{
Name: writeStreamTimeout,
Name: cfdflags.WriteStreamTimeout,
EnvVars: []string{"TUNNEL_STREAM_WRITE_TIMEOUT"},
Usage: "Use this option to add a stream write timeout for connections when writing towards the origin or edge. Default is 0 which disables the write timeout.",
Value: 0 * time.Second,
Hidden: true,
}),
altsrc.NewBoolFlag(&cli.BoolFlag{
Name: quicDisablePathMTUDiscovery,
Name: cfdflags.QuicDisablePathMTUDiscovery,
EnvVars: []string{"TUNNEL_DISABLE_QUIC_PMTU"},
Usage: "Use this option to disable PTMU discovery for QUIC connections. This will result in lower packet sizes. Not however, that this may cause instability for UDP proxying.",
Value: false,
Hidden: true,
}),
altsrc.NewIntFlag(&cli.IntFlag{
Name: quicConnLevelFlowControlLimit,
Name: cfdflags.QuicConnLevelFlowControlLimit,
EnvVars: []string{"TUNNEL_QUIC_CONN_LEVEL_FLOW_CONTROL_LIMIT"},
Usage: "Use this option to change the connection-level flow control limit for QUIC transport.",
Value: 30 * (1 << 20), // 30 MB
Hidden: true,
}),
altsrc.NewIntFlag(&cli.IntFlag{
Name: quicStreamLevelFlowControlLimit,
Name: cfdflags.QuicStreamLevelFlowControlLimit,
EnvVars: []string{"TUNNEL_QUIC_STREAM_LEVEL_FLOW_CONTROL_LIMIT"},
Usage: "Use this option to change the connection-level flow control limit for QUIC transport.",
Value: 6 * (1 << 20), // 6 MB
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: connectorLabelFlag,
Name: cfdflags.ConnectorLabel,
Usage: "Use this option to give a meaningful label to a specific connector. When a tunnel starts up, a connector id unique to the tunnel is generated. This is a uuid. To make it easier to identify a connector, we will use the hostname of the machine the tunnel is running on along with the connector ID. This option exists if one wants to have more control over what their individual connectors are called.",
Value: "",
}),
altsrc.NewDurationFlag(&cli.DurationFlag{
Name: "grace-period",
Name: cfdflags.GracePeriod,
Usage: "When cloudflared receives SIGINT/SIGTERM it will stop accepting new requests, wait for in-progress requests to terminate, then shutdown. Waiting for in-progress requests will timeout after this grace period, or when a second SIGTERM/SIGINT is received.",
Value: time.Second * 30,
EnvVars: []string{"TUNNEL_GRACE_PERIOD"},
@ -896,14 +842,14 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
Value: false,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "name",
Name: cfdflags.Name,
Aliases: []string{"n"},
EnvVars: []string{"TUNNEL_NAME"},
Usage: "Stable name to identify the tunnel. Using this flag will create, route and run a tunnel. For production usage, execute each command separately",
Hidden: shouldHide,
}),
altsrc.NewBoolFlag(&cli.BoolFlag{
Name: uiFlag,
Name: cfdflags.Ui,
Usage: "(depreciated) Launch tunnel UI. Tunnel logs are scrollable via 'j', 'k', or arrow keys.",
Value: false,
Hidden: true,
@ -921,7 +867,7 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
Hidden: true,
}),
altsrc.NewBoolFlag(&cli.BoolFlag{
Name: "post-quantum",
Name: cfdflags.PostQuantum,
Usage: "When given creates an experimental post-quantum secure tunnel",
Aliases: []string{"pq"},
EnvVars: []string{"TUNNEL_POST_QUANTUM"},
@ -949,27 +895,27 @@ func configureCloudflaredFlags(shouldHide bool) []cli.Flag {
Hidden: shouldHide,
},
altsrc.NewStringFlag(&cli.StringFlag{
Name: credentials.OriginCertFlag,
Name: cfdflags.OriginCert,
Usage: "Path to the certificate generated for your origin when you run cloudflared login.",
EnvVars: []string{"TUNNEL_ORIGIN_CERT"},
Value: credentials.FindDefaultOriginCertPath(),
Hidden: shouldHide,
}),
altsrc.NewDurationFlag(&cli.DurationFlag{
Name: "autoupdate-freq",
Name: cfdflags.AutoUpdateFreq,
Usage: fmt.Sprintf("Autoupdate frequency. Default is %v.", updater.DefaultCheckUpdateFreq),
Value: updater.DefaultCheckUpdateFreq,
Hidden: shouldHide,
}),
altsrc.NewBoolFlag(&cli.BoolFlag{
Name: "no-autoupdate",
Name: cfdflags.NoAutoUpdate,
Usage: "Disable periodic check for updates, restarting the server with the new version.",
EnvVars: []string{"NO_AUTOUPDATE"},
Value: false,
Hidden: shouldHide,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "metrics",
Name: cfdflags.Metrics,
Value: metrics.GetMetricsDefaultAddress(metrics.Runtime),
Usage: fmt.Sprintf(
`Listen address for metrics reporting. If no address is passed cloudflared will try to bind to %v.
@ -1133,62 +1079,62 @@ func legacyTunnelFlag(msg string) string {
func sshFlags(shouldHide bool) []cli.Flag {
return []cli.Flag{
altsrc.NewStringFlag(&cli.StringFlag{
Name: sshPortFlag,
Name: cfdflags.SshPort,
Usage: "Localhost port that cloudflared SSH server will run on",
Value: "2222",
EnvVars: []string{"LOCAL_SSH_PORT"},
Hidden: true,
}),
altsrc.NewDurationFlag(&cli.DurationFlag{
Name: sshIdleTimeoutFlag,
Name: cfdflags.SshIdleTimeout,
Usage: "Connection timeout after no activity",
EnvVars: []string{"SSH_IDLE_TIMEOUT"},
Hidden: true,
}),
altsrc.NewDurationFlag(&cli.DurationFlag{
Name: sshMaxTimeoutFlag,
Name: cfdflags.SshMaxTimeout,
Usage: "Absolute connection timeout",
EnvVars: []string{"SSH_MAX_TIMEOUT"},
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: bucketNameFlag,
Name: cfdflags.SshLogUploaderBucketName,
Usage: "Bucket name of where to upload SSH logs",
EnvVars: []string{"BUCKET_ID"},
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: regionNameFlag,
Name: cfdflags.SshLogUploaderRegionName,
Usage: "Region name of where to upload SSH logs",
EnvVars: []string{"REGION_ID"},
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: secretIDFlag,
Name: cfdflags.SshLogUploaderSecretID,
Usage: "Secret ID of where to upload SSH logs",
EnvVars: []string{"SECRET_ID"},
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: accessKeyIDFlag,
Name: cfdflags.SshLogUploaderAccessKeyID,
Usage: "Access Key ID of where to upload SSH logs",
EnvVars: []string{"ACCESS_CLIENT_ID"},
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: sessionTokenIDFlag,
Name: cfdflags.SshLogUploaderSessionTokenID,
Usage: "Session Token to use in the configuration of SSH logs uploading",
EnvVars: []string{"SESSION_TOKEN_ID"},
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: s3URLFlag,
Name: cfdflags.SshLogUploaderS3URL,
Usage: "S3 url of where to upload SSH logs",
EnvVars: []string{"S3_URL"},
Hidden: true,
}),
altsrc.NewPathFlag(&cli.PathFlag{
Name: hostKeyPath,
Name: cfdflags.HostKeyPath,
Usage: "Absolute path of directory to save SSH host keys in",
EnvVars: []string{"HOST_KEY_PATH"},
Hidden: true,
@ -1228,7 +1174,7 @@ func sshFlags(shouldHide bool) []cli.Flag {
func configureProxyDNSFlags(shouldHide bool) []cli.Flag {
return []cli.Flag{
altsrc.NewBoolFlag(&cli.BoolFlag{
Name: "proxy-dns",
Name: cfdflags.ProxyDns,
Usage: "Run a DNS over HTTPS proxy server.",
EnvVars: []string{"TUNNEL_DNS"},
Hidden: shouldHide,
@ -1326,7 +1272,7 @@ func nonSecretCliFlags(log *zerolog.Logger, cli *cli.Context, flagInclusionList
}
switch flag {
case logger.LogDirectoryFlag, logger.LogFileFlag:
case cfdflags.LogDirectory, cfdflags.LogFile:
{
absolute, err := filepath.Abs(value)
if err != nil {

View File

@ -18,6 +18,7 @@ import (
"golang.org/x/term"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/edgediscovery"
@ -33,12 +34,26 @@ import (
const (
secretValue = "*****"
icmpFunnelTimeout = time.Second * 10
fedRampRegion = "fed" // const string denoting the region used to connect to FEDRamp servers
)
var (
secretFlags = [2]*altsrc.StringFlag{credentialsContentsFlag, tunnelTokenFlag}
configFlags = []string{"autoupdate-freq", "no-autoupdate", "retries", "protocol", "loglevel", "transport-loglevel", "origincert", "metrics", "metrics-update-freq", "edge-ip-version", "edge-bind-address"}
configFlags = []string{
flags.AutoUpdateFreq,
flags.NoAutoUpdate,
flags.Retries,
flags.Protocol,
flags.LogLevel,
flags.TransportLogLevel,
flags.OriginCert,
flags.Metrics,
flags.MetricsUpdateFreq,
flags.EdgeIpVersion,
flags.EdgeBindAddress,
flags.MaxActiveFlows,
}
)
func logClientOptions(c *cli.Context, log *zerolog.Logger) {
@ -96,8 +111,8 @@ func isSecretEnvVar(key string) bool {
}
func dnsProxyStandAlone(c *cli.Context, namedTunnel *connection.TunnelProperties) bool {
return c.IsSet("proxy-dns") &&
!(c.IsSet("name") || // adhoc-named tunnel
return c.IsSet(flags.ProxyDns) &&
!(c.IsSet(flags.Name) || // adhoc-named tunnel
c.IsSet(ingress.HelloWorldFlag) || // quick or named tunnel
namedTunnel != nil) // named tunnel
}
@ -115,14 +130,15 @@ func prepareTunnelConfig(
return nil, nil, errors.Wrap(err, "can't generate connector UUID")
}
log.Info().Msgf("Generated Connector ID: %s", clientID)
tags, err := NewTagSliceFromCLI(c.StringSlice("tag"))
tags, err := NewTagSliceFromCLI(c.StringSlice(flags.Tag))
if err != nil {
log.Err(err).Msg("Tag parse failure")
return nil, nil, errors.Wrap(err, "Tag parse failure")
}
tags = append(tags, pogs.Tag{Name: "ID", Value: clientID.String()})
transportProtocol := c.String("protocol")
transportProtocol := c.String(flags.Protocol)
isPostQuantumEnforced := c.Bool(flags.PostQuantum)
featureSelector, err := features.NewFeatureSelector(ctx, namedTunnel.Credentials.AccountTag, c.StringSlice("features"), c.Bool("post-quantum"), log)
if err != nil {
@ -150,7 +166,7 @@ func prepareTunnelConfig(
return nil, nil, err
}
protocolSelector, err := connection.NewProtocolSelector(transportProtocol, namedTunnel.Credentials.AccountTag, c.IsSet(TunnelTokenFlag), c.Bool("post-quantum"), edgediscovery.ProtocolPercentage, connection.ResolveTTL, log)
protocolSelector, err := connection.NewProtocolSelector(transportProtocol, namedTunnel.Credentials.AccountTag, c.IsSet(TunnelTokenFlag), isPostQuantumEnforced, edgediscovery.ProtocolPercentage, connection.ResolveTTL, log)
if err != nil {
return nil, nil, err
}
@ -176,11 +192,11 @@ func prepareTunnelConfig(
if err != nil {
return nil, nil, err
}
edgeIPVersion, err := parseConfigIPVersion(c.String("edge-ip-version"))
edgeIPVersion, err := parseConfigIPVersion(c.String(flags.EdgeIpVersion))
if err != nil {
return nil, nil, err
}
edgeBindAddr, err := parseConfigBindAddress(c.String("edge-bind-address"))
edgeBindAddr, err := parseConfigBindAddress(c.String(flags.EdgeBindAddress))
if err != nil {
return nil, nil, err
}
@ -193,36 +209,50 @@ func prepareTunnelConfig(
log.Warn().Str("edgeIPVersion", edgeIPVersion.String()).Err(err).Msg("Overriding edge-ip-version")
}
region := c.String(flags.Region)
endpoint := namedTunnel.Credentials.Endpoint
var resolvedRegion string
// set resolvedRegion to either the region passed as argument
// or to the endpoint in the credentials.
// Region and endpoint are interchangeable
if region != "" && endpoint != "" {
return nil, nil, fmt.Errorf("region provided with a token that has an endpoint")
} else if region != "" {
resolvedRegion = region
} else if endpoint != "" {
resolvedRegion = endpoint
}
tunnelConfig := &supervisor.TunnelConfig{
GracePeriod: gracePeriod,
ReplaceExisting: c.Bool("force"),
ReplaceExisting: c.Bool(flags.Force),
OSArch: info.OSArch(),
ClientID: clientID.String(),
EdgeAddrs: c.StringSlice("edge"),
Region: c.String("region"),
EdgeAddrs: c.StringSlice(flags.Edge),
Region: resolvedRegion,
EdgeIPVersion: edgeIPVersion,
EdgeBindAddr: edgeBindAddr,
HAConnections: c.Int(haConnectionsFlag),
IsAutoupdated: c.Bool("is-autoupdated"),
LBPool: c.String("lb-pool"),
HAConnections: c.Int(flags.HaConnections),
IsAutoupdated: c.Bool(flags.IsAutoUpdated),
LBPool: c.String(flags.LBPool),
Tags: tags,
Log: log,
LogTransport: logTransport,
Observer: observer,
ReportedVersion: info.Version(),
// Note TUN-3758 , we use Int because UInt is not supported with altsrc
Retries: uint(c.Int("retries")), // nolint: gosec
Retries: uint(c.Int(flags.Retries)), // nolint: gosec
RunFromTerminal: isRunningFromTerminal(),
NamedTunnel: namedTunnel,
ProtocolSelector: protocolSelector,
EdgeTLSConfigs: edgeTLSConfigs,
FeatureSelector: featureSelector,
MaxEdgeAddrRetries: uint8(c.Int("max-edge-addr-retries")), // nolint: gosec
RPCTimeout: c.Duration(rpcTimeout),
WriteStreamTimeout: c.Duration(writeStreamTimeout),
DisableQUICPathMTUDiscovery: c.Bool(quicDisablePathMTUDiscovery),
QUICConnectionLevelFlowControlLimit: c.Uint64(quicConnLevelFlowControlLimit),
QUICStreamLevelFlowControlLimit: c.Uint64(quicStreamLevelFlowControlLimit),
MaxEdgeAddrRetries: uint8(c.Int(flags.MaxEdgeAddrRetries)), // nolint: gosec
RPCTimeout: c.Duration(flags.RpcTimeout),
WriteStreamTimeout: c.Duration(flags.WriteStreamTimeout),
DisableQUICPathMTUDiscovery: c.Bool(flags.QuicDisablePathMTUDiscovery),
QUICConnectionLevelFlowControlLimit: c.Uint64(flags.QuicConnLevelFlowControlLimit),
QUICStreamLevelFlowControlLimit: c.Uint64(flags.QuicStreamLevelFlowControlLimit),
}
icmpRouter, err := newICMPRouter(c, log)
if err != nil {
@ -234,7 +264,7 @@ func prepareTunnelConfig(
Ingress: &ingressRules,
WarpRouting: ingress.NewWarpRoutingConfig(&cfg.WarpRouting),
ConfigurationFlags: parseConfigFlags(c),
WriteTimeout: c.Duration(writeStreamTimeout),
WriteTimeout: tunnelConfig.WriteStreamTimeout,
}
return tunnelConfig, orchestratorConfig, nil
}
@ -252,9 +282,9 @@ func parseConfigFlags(c *cli.Context) map[string]string {
}
func gracePeriod(c *cli.Context) (time.Duration, error) {
period := c.Duration("grace-period")
period := c.Duration(flags.GracePeriod)
if period > connection.MaxGracePeriod {
return time.Duration(0), fmt.Errorf("grace-period must be equal or less than %v", connection.MaxGracePeriod)
return time.Duration(0), fmt.Errorf("%s must be equal or less than %v", flags.GracePeriod, connection.MaxGracePeriod)
}
return period, nil
}
@ -337,14 +367,14 @@ func newICMPRouter(c *cli.Context, logger *zerolog.Logger) (ingress.ICMPRouterSe
}
func determineICMPSources(c *cli.Context, logger *zerolog.Logger) (netip.Addr, netip.Addr, error) {
ipv4Src, err := determineICMPv4Src(c.String("icmpv4-src"), logger)
ipv4Src, err := determineICMPv4Src(c.String(flags.ICMPV4Src), logger)
if err != nil {
return netip.Addr{}, netip.Addr{}, errors.Wrap(err, "failed to determine IPv4 source address for ICMP proxy")
}
logger.Info().Msgf("ICMP proxy will use %s as source for IPv4", ipv4Src)
ipv6Src, zone, err := determineICMPv6Src(c.String("icmpv6-src"), logger, ipv4Src)
ipv6Src, zone, err := determineICMPv6Src(c.String(flags.ICMPV6Src), logger, ipv4Src)
if err != nil {
return netip.Addr{}, netip.Addr{}, errors.Wrap(err, "failed to determine IPv6 source address for ICMP proxy")
}

View File

@ -4,6 +4,7 @@ import (
"fmt"
"path/filepath"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/credentials"
@ -57,7 +58,7 @@ func newSearchByID(id uuid.UUID, c *cli.Context, log *zerolog.Logger, fs fileSys
}
func (s searchByID) Path() (string, error) {
originCertPath := s.c.String(credentials.OriginCertFlag)
originCertPath := s.c.String(cfdflags.OriginCert)
originCertLog := s.log.With().
Str("originCertPath", originCertPath).
Logger()

View File

@ -67,7 +67,7 @@ func login(c *cli.Context) error {
path, ok, err := checkForExistingCert()
if ok {
fmt.Fprintf(os.Stdout, "You have an existing certificate at %s which login would overwrite.\nIf this is intentional, please move or delete that file then run this command again.\n", path)
log.Error().Err(err).Msgf("You have an existing certificate at %s which login would overwrite.\nIf this is intentional, please move or delete that file then run this command again.\n", path)
return nil
} else if err != nil {
return err
@ -78,7 +78,8 @@ func login(c *cli.Context) error {
callbackStoreURL = c.String(callbackURLParamName)
)
if c.Bool(fedRAMPParamName) {
isFEDRamp := c.Bool(fedRAMPParamName)
if isFEDRamp {
baseloginURL = fedBaseLoginURL
callbackStoreURL = fedCallbackStoreURL
}
@ -99,7 +100,23 @@ func login(c *cli.Context) error {
log,
)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write the certificate due to the following error:\n%v\n\nYour browser will download the certificate instead. You will have to manually\ncopy it to the following path:\n\n%s\n", err, path)
log.Error().Err(err).Msgf("Failed to write the certificate.\n\nYour browser will download the certificate instead. You will have to manually\ncopy it to the following path:\n\n%s\n", path)
return err
}
cert, err := credentials.DecodeOriginCert(resourceData)
if err != nil {
log.Error().Err(err).Msg("failed to decode origin certificate")
return err
}
if isFEDRamp {
cert.Endpoint = credentials.FedEndpoint
}
resourceData, err = cert.EncodeOriginCert()
if err != nil {
log.Error().Err(err).Msg("failed to encode origin certificate")
return err
}
@ -107,7 +124,7 @@ func login(c *cli.Context) error {
return errors.Wrap(err, fmt.Sprintf("error writing cert to %s", path))
}
fmt.Fprintf(os.Stdout, "You have successfully logged in.\nIf you wish to copy your credentials to a server, they have been saved to:\n%s\n", path)
log.Info().Msgf("You have successfully logged in.\nIf you wish to copy your credentials to a server, they have been saved to:\n%s\n", path)
return nil
}

View File

@ -11,6 +11,7 @@ import (
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/connection"
)
@ -82,13 +83,13 @@ func RunQuickTunnel(sc *subcommandContext) error {
sc.log.Info().Msg(line)
}
if !sc.c.IsSet("protocol") {
sc.c.Set("protocol", "quic")
if !sc.c.IsSet(flags.Protocol) {
_ = sc.c.Set(flags.Protocol, "quic")
}
// Override the number of connections used. Quick tunnels shouldn't be used for production usage,
// so, use a single connection instead.
sc.c.Set(haConnectionsFlag, "1")
_ = sc.c.Set(flags.HaConnections, "1")
return StartServer(
sc.c,
buildInfo,

View File

@ -9,22 +9,26 @@ import (
"strings"
"github.com/google/uuid"
"github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/urfave/cli/v2"
"github.com/cloudflare/cloudflared/cfapi"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/credentials"
"github.com/cloudflare/cloudflared/logger"
)
type errInvalidJSONCredential struct {
const fedRampBaseApiURL = "https://api.fed.cloudflare.com/client/v4"
type invalidJSONCredentialError struct {
err error
path string
}
func (e errInvalidJSONCredential) Error() string {
func (e invalidJSONCredentialError) Error() string {
return "Invalid JSON when parsing tunnel credentials file"
}
@ -51,7 +55,12 @@ func newSubcommandContext(c *cli.Context) (*subcommandContext, error) {
// Returns something that can find the given tunnel's credentials file.
func (sc *subcommandContext) credentialFinder(tunnelID uuid.UUID) CredFinder {
if path := sc.c.String(CredFileFlag); path != "" {
return newStaticPath(path, sc.fs)
// Expand path if CredFileFlag contains `~`
absPath, err := homedir.Expand(path)
if err != nil {
return newStaticPath(path, sc.fs)
}
return newStaticPath(absPath, sc.fs)
}
return newSearchByID(tunnelID, sc.c, sc.log, sc.fs)
}
@ -64,7 +73,16 @@ func (sc *subcommandContext) client() (cfapi.Client, error) {
if err != nil {
return nil, err
}
sc.tunnelstoreClient, err = cred.Client(sc.c.String("api-url"), buildInfo.UserAgent(), sc.log)
var apiURL string
if cred.IsFEDEndpoint() {
sc.log.Info().Str("api-url", fedRampBaseApiURL).Msg("using fedramp base api")
apiURL = fedRampBaseApiURL
} else {
apiURL = sc.c.String(cfdflags.ApiURL)
}
sc.tunnelstoreClient, err = cred.Client(apiURL, buildInfo.UserAgent(), sc.log)
if err != nil {
return nil, err
}
@ -73,7 +91,7 @@ func (sc *subcommandContext) client() (cfapi.Client, error) {
func (sc *subcommandContext) credential() (*credentials.User, error) {
if sc.userCredential == nil {
uc, err := credentials.Read(sc.c.String(credentials.OriginCertFlag), sc.log)
uc, err := credentials.Read(sc.c.String(cfdflags.OriginCert), sc.log)
if err != nil {
return nil, err
}
@ -100,7 +118,7 @@ func (sc *subcommandContext) readTunnelCredentials(credFinder CredFinder) (conne
"You may have accidentally used the filepath to cert.pem, which is generated by `cloudflared tunnel " +
"login`.")
}
return connection.Credentials{}, errInvalidJSONCredential{path: filePath, err: err}
return connection.Credentials{}, invalidJSONCredentialError{path: filePath, err: err}
}
return credentials, nil
}
@ -122,7 +140,7 @@ func (sc *subcommandContext) create(name string, credentialsFilePath string, sec
if err != nil {
return nil, errors.Wrap(err, "Couldn't decode tunnel secret from base64")
}
tunnelSecret = []byte(decodedSecret)
tunnelSecret = decodedSecret
if len(tunnelSecret) < 32 {
return nil, errors.New("Decoded tunnel secret must be at least 32 bytes long")
}
@ -160,7 +178,7 @@ func (sc *subcommandContext) create(name string, credentialsFilePath string, sec
errorLines = append(errorLines, fmt.Sprintf("Cloudflared tried to delete the tunnel for you, but encountered an error. You should use `cloudflared tunnel delete %v` to delete the tunnel yourself, because the tunnel can't be run without the tunnelfile.", tunnel.ID))
errorLines = append(errorLines, fmt.Sprintf("The delete tunnel error is: %v", deleteErr))
} else {
errorLines = append(errorLines, fmt.Sprintf("The tunnel was deleted, because the tunnel can't be run without the credentials file"))
errorLines = append(errorLines, "The tunnel was deleted, because the tunnel can't be run without the credentials file")
}
errorMsg := strings.Join(errorLines, "\n")
return nil, errors.New(errorMsg)
@ -189,7 +207,7 @@ func (sc *subcommandContext) list(filter *cfapi.TunnelFilter) ([]*cfapi.Tunnel,
}
func (sc *subcommandContext) delete(tunnelIDs []uuid.UUID) error {
forceFlagSet := sc.c.Bool("force")
forceFlagSet := sc.c.Bool(cfdflags.Force)
client, err := sc.client()
if err != nil {
@ -229,7 +247,7 @@ func (sc *subcommandContext) findCredentials(tunnelID uuid.UUID) (connection.Cre
var err error
if credentialsContents := sc.c.String(CredContentsFlag); credentialsContents != "" {
if err = json.Unmarshal([]byte(credentialsContents), &credentials); err != nil {
err = errInvalidJSONCredential{path: "TUNNEL_CRED_CONTENTS", err: err}
err = invalidJSONCredentialError{path: "TUNNEL_CRED_CONTENTS", err: err}
}
} else {
credFinder := sc.credentialFinder(tunnelID)
@ -245,7 +263,7 @@ func (sc *subcommandContext) findCredentials(tunnelID uuid.UUID) (connection.Cre
func (sc *subcommandContext) run(tunnelID uuid.UUID) error {
credentials, err := sc.findCredentials(tunnelID)
if err != nil {
if e, ok := err.(errInvalidJSONCredential); ok {
if e, ok := err.(invalidJSONCredentialError); ok {
sc.log.Error().Msgf("The credentials file at %s contained invalid JSON. This is probably caused by passing the wrong filepath. Reminder: the credentials file is a .json file created via `cloudflared tunnel create`.", e.path)
sc.log.Error().Msgf("Invalid JSON when parsing credentials file: %s", e.err.Error())
}

View File

@ -16,15 +16,16 @@ import (
"time"
"github.com/google/uuid"
homedir "github.com/mitchellh/go-homedir"
"github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
"github.com/urfave/cli/v2/altsrc"
"golang.org/x/net/idna"
yaml "gopkg.in/yaml.v3"
"gopkg.in/yaml.v3"
"github.com/cloudflare/cloudflared/cfapi"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/connection"
@ -48,7 +49,6 @@ const (
noDiagNetworkFlagName = "no-diag-network"
diagContainerIDFlagName = "diag-container-id"
diagPodFlagName = "diag-pod-id"
metricsFlagName = "metrics"
LogFieldTunnelID = "tunnelID"
)
@ -60,7 +60,7 @@ var (
Usage: "Include deleted tunnels in the list",
}
listNameFlag = &cli.StringFlag{
Name: "name",
Name: flags.Name,
Aliases: []string{"n"},
Usage: "List tunnels with the given `NAME`",
}
@ -108,7 +108,7 @@ var (
EnvVars: []string{"TUNNEL_LIST_INVERT_SORT"},
}
featuresFlag = altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
Name: "features",
Name: flags.Features,
Aliases: []string{"F"},
Usage: "Opt into various features that are still being developed or tested.",
})
@ -130,14 +130,14 @@ var (
EnvVars: []string{"TUNNEL_TOKEN"},
})
forceDeleteFlag = &cli.BoolFlag{
Name: "force",
Name: flags.Force,
Aliases: []string{"f"},
Usage: "Deletes a tunnel even if tunnel is connected and it has dependencies associated to it. (eg. IP routes)." +
" It is not possible to delete tunnels that have connections or non-deleted dependencies, without this flag.",
EnvVars: []string{"TUNNEL_RUN_FORCE_OVERWRITE"},
}
selectProtocolFlag = altsrc.NewStringFlag(&cli.StringFlag{
Name: "protocol",
Name: flags.Protocol,
Value: connection.AutoSelectFlag,
Aliases: []string{"p"},
Usage: fmt.Sprintf("Protocol implementation to connect with Cloudflare's edge network. %s", connection.AvailableProtocolFlagMessage),
@ -145,7 +145,7 @@ var (
Hidden: true,
})
postQuantumFlag = altsrc.NewBoolFlag(&cli.BoolFlag{
Name: "post-quantum",
Name: flags.PostQuantum,
Usage: "When given creates an experimental post-quantum secure tunnel",
Aliases: []string{"pq"},
EnvVars: []string{"TUNNEL_POST_QUANTUM"},
@ -181,17 +181,17 @@ var (
EnvVars: []string{"TUNNEL_CREATE_SECRET"},
}
icmpv4SrcFlag = &cli.StringFlag{
Name: "icmpv4-src",
Name: flags.ICMPV4Src,
Usage: "Source address to send/receive ICMPv4 messages. If not provided cloudflared will dial a local address to determine the source IP or fallback to 0.0.0.0.",
EnvVars: []string{"TUNNEL_ICMPV4_SRC"},
}
icmpv6SrcFlag = &cli.StringFlag{
Name: "icmpv6-src",
Name: flags.ICMPV6Src,
Usage: "Source address and the interface name to send/receive ICMPv6 messages. If not provided cloudflared will dial a local address to determine the source IP or fallback to ::.",
EnvVars: []string{"TUNNEL_ICMPV6_SRC"},
}
metricsFlag = &cli.StringFlag{
Name: metricsFlagName,
Name: flags.Metrics,
Usage: "The metrics server address i.e.: 127.0.0.1:12345. If your instance is running in a Docker/Kubernetes environment you need to setup port forwarding for your application.",
Value: "",
}
@ -230,6 +230,11 @@ var (
Usage: "Network diagnostics won't be performed",
Value: false,
}
maxActiveFlowsFlag = &cli.Uint64Flag{
Name: flags.MaxActiveFlows,
Usage: "Overrides the remote configuration for max active private network flows (TCP/UDP) that this cloudflared instance supports",
EnvVars: []string{"TUNNEL_MAX_ACTIVE_FLOWS"},
}
)
func buildCreateCommand() *cli.Command {
@ -332,7 +337,7 @@ func listCommand(c *cli.Context) error {
if !c.Bool("show-deleted") {
filter.NoDeleted()
}
if name := c.String("name"); name != "" {
if name := c.String(flags.Name); name != "" {
filter.ByName(name)
}
if namePrefix := c.String("name-prefix"); namePrefix != "" {
@ -462,9 +467,9 @@ func buildReadyCommand() *cli.Command {
}
func readyCommand(c *cli.Context) error {
metricsOpts := c.String("metrics")
if !c.IsSet("metrics") {
return fmt.Errorf("--metrics has to be provided")
metricsOpts := c.String(flags.Metrics)
if !c.IsSet(flags.Metrics) {
return errors.New("--metrics has to be provided")
}
requestURL := fmt.Sprintf("http://%s/ready", metricsOpts)
@ -705,6 +710,7 @@ func buildRunCommand() *cli.Command {
tunnelTokenFlag,
icmpv4SrcFlag,
icmpv6SrcFlag,
maxActiveFlowsFlag,
}
flags = append(flags, configureProxyFlags(false)...)
return &cli.Command{
@ -1073,7 +1079,7 @@ func diagCommand(ctx *cli.Context) error {
log := sctx.log
options := diagnostic.Options{
KnownAddresses: metrics.GetMetricsKnownAddresses(metrics.Runtime),
Address: sctx.c.String(metricsFlagName),
Address: sctx.c.String(flags.Metrics),
ContainerID: sctx.c.String(diagContainerIDFlagName),
PodID: sctx.c.String(diagPodFlagName),
Toggles: diagnostic.Toggles{

View File

@ -32,7 +32,7 @@ func buildRouteIPSubcommand() *cli.Command {
UsageText: "cloudflared tunnel [--config FILEPATH] route COMMAND [arguments...]",
Description: `cloudflared can provision routes for any IP space in your corporate network. Users enrolled in
your Cloudflare for Teams organization can reach those IPs through the Cloudflare WARP
client. You can then configure L7/L4 filtering on https://dash.teams.cloudflare.com to
client. You can then configure L7/L4 filtering on https://one.dash.cloudflare.com to
determine who can reach certain routes.
By default IP routes all exist within a single virtual network. If you use the same IP
space(s) in different physical private networks, all meant to be reachable via IP routes,

View File

@ -15,13 +15,14 @@ import (
"golang.org/x/term"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/logger"
)
const (
DefaultCheckUpdateFreq = time.Hour * 24
noUpdateInShellMessage = "cloudflared will not automatically update when run from the shell. To enable auto-updates, run cloudflared as a service: https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/as-a-service/"
noUpdateInShellMessage = "cloudflared will not automatically update when run from the shell. To enable auto-updates, run cloudflared as a service: https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configure-tunnels/local-management/as-a-service/"
noUpdateOnWindowsMessage = "cloudflared will not automatically update on Windows systems."
noUpdateManagedPackageMessage = "cloudflared will not automatically update if installed by a package manager."
isManagedInstallFile = ".installedFromPackageManager"
@ -38,6 +39,7 @@ var (
// BinaryUpdated implements ExitCoder interface, the app will exit with status code 11
// https://pkg.go.dev/github.com/urfave/cli/v2?tab=doc#ExitCoder
// nolint: errname
type statusSuccess struct {
newVersion string
}
@ -50,16 +52,16 @@ func (u *statusSuccess) ExitCode() int {
return 11
}
// UpdateErr implements ExitCoder interface, the app will exit with status code 10
type statusErr struct {
// statusError implements ExitCoder interface, the app will exit with status code 10
type statusError struct {
err error
}
func (e *statusErr) Error() string {
func (e *statusError) Error() string {
return fmt.Sprintf("failed to update cloudflared: %v", e.err)
}
func (e *statusErr) ExitCode() int {
func (e *statusError) ExitCode() int {
return 10
}
@ -79,7 +81,7 @@ type UpdateOutcome struct {
}
func (uo *UpdateOutcome) noUpdate() bool {
return uo.Error == nil && uo.Updated == false
return uo.Error == nil && !uo.Updated
}
func Init(info *cliutil.BuildInfo) {
@ -153,7 +155,7 @@ func Update(c *cli.Context) error {
log.Info().Msg("cloudflared is set to update from staging")
}
isForced := c.Bool("force")
isForced := c.Bool(cfdflags.Force)
if isForced {
log.Info().Msg("cloudflared is set to upgrade to the latest publish version regardless of the current version")
}
@ -166,7 +168,7 @@ func Update(c *cli.Context) error {
intendedVersion: c.String("version"),
})
if updateOutcome.Error != nil {
return &statusErr{updateOutcome.Error}
return &statusError{updateOutcome.Error}
}
if updateOutcome.noUpdate() {
@ -252,7 +254,7 @@ func (a *AutoUpdater) Run(ctx context.Context) error {
pid, err := a.listeners.StartProcess()
if err != nil {
a.log.Err(err).Msg("Unable to restart server automatically")
return &statusErr{err: err}
return &statusError{err: err}
}
// stop old process after autoupdate. Otherwise we create a new process
// after each update

View File

@ -26,7 +26,7 @@ import (
const (
windowsServiceName = "Cloudflared"
windowsServiceDescription = "Cloudflared agent"
windowsServiceUrl = "https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/as-a-service/windows/"
windowsServiceUrl = "https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configure-tunnels/local-management/as-a-service/windows/"
recoverActionDelay = time.Second * 20
failureCountResetPeriod = time.Hour * 24

View File

@ -60,6 +60,7 @@ type Credentials struct {
AccountTag string
TunnelSecret []byte
TunnelID uuid.UUID
Endpoint string
}
func (c *Credentials) Auth() pogs.TunnelAuth {
@ -74,13 +75,16 @@ type TunnelToken struct {
AccountTag string `json:"a"`
TunnelSecret []byte `json:"s"`
TunnelID uuid.UUID `json:"t"`
Endpoint string `json:"e,omitempty"`
}
func (t TunnelToken) Credentials() Credentials {
// nolint: gosimple
return Credentials{
AccountTag: t.AccountTag,
TunnelSecret: t.TunnelSecret,
TunnelID: t.TunnelID,
Endpoint: t.Endpoint,
}
}

View File

@ -9,6 +9,7 @@ import (
const (
logFieldOriginCertPath = "originCertPath"
FedEndpoint = "fed"
)
type User struct {
@ -32,6 +33,10 @@ func (c User) CertPath() string {
return c.certPath
}
func (c User) IsFEDEndpoint() bool {
return c.cert.Endpoint == FedEndpoint
}
// Client uses the user credentials to create a Cloudflare API client
func (c *User) Client(apiURL string, userAgent string, log *zerolog.Logger) (cfapi.Client, error) {
if apiURL == "" {
@ -45,7 +50,6 @@ func (c *User) Client(apiURL string, userAgent string, log *zerolog.Logger) (cfa
userAgent,
log,
)
if err != nil {
return nil, err
}

View File

@ -1,11 +1,13 @@
package credentials
import (
"bytes"
"encoding/json"
"encoding/pem"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/mitchellh/go-homedir"
"github.com/rs/zerolog"
@ -15,19 +17,30 @@ import (
const (
DefaultCredentialFile = "cert.pem"
OriginCertFlag = "origincert"
)
type namedTunnelToken struct {
type OriginCert struct {
ZoneID string `json:"zoneID"`
AccountID string `json:"accountID"`
APIToken string `json:"apiToken"`
Endpoint string `json:"endpoint,omitempty"`
}
type OriginCert struct {
ZoneID string
APIToken string
AccountID string
func (oc *OriginCert) UnmarshalJSON(data []byte) error {
var aux struct {
ZoneID string `json:"zoneID"`
AccountID string `json:"accountID"`
APIToken string `json:"apiToken"`
Endpoint string `json:"endpoint,omitempty"`
}
if err := json.Unmarshal(data, &aux); err != nil {
return fmt.Errorf("error parsing OriginCert: %v", err)
}
oc.ZoneID = aux.ZoneID
oc.AccountID = aux.AccountID
oc.APIToken = aux.APIToken
oc.Endpoint = strings.ToLower(aux.Endpoint)
return nil
}
// FindDefaultOriginCertPath returns the first path that contains a cert.pem file. If none of the
@ -42,40 +55,56 @@ func FindDefaultOriginCertPath() string {
return ""
}
func DecodeOriginCert(blocks []byte) (*OriginCert, error) {
return decodeOriginCert(blocks)
}
func (cert *OriginCert) EncodeOriginCert() ([]byte, error) {
if cert == nil {
return nil, fmt.Errorf("originCert cannot be nil")
}
buffer, err := json.Marshal(cert)
if err != nil {
return nil, fmt.Errorf("originCert marshal failed: %v", err)
}
block := pem.Block{
Type: "ARGO TUNNEL TOKEN",
Headers: map[string]string{},
Bytes: buffer,
}
var out bytes.Buffer
err = pem.Encode(&out, &block)
if err != nil {
return nil, fmt.Errorf("pem encoding failed: %v", err)
}
return out.Bytes(), nil
}
func decodeOriginCert(blocks []byte) (*OriginCert, error) {
if len(blocks) == 0 {
return nil, fmt.Errorf("Cannot decode empty certificate")
return nil, fmt.Errorf("cannot decode empty certificate")
}
originCert := OriginCert{}
block, rest := pem.Decode(blocks)
for {
if block == nil {
break
}
for block != nil {
switch block.Type {
case "PRIVATE KEY", "CERTIFICATE":
// this is for legacy purposes.
break
case "ARGO TUNNEL TOKEN":
if originCert.ZoneID != "" || originCert.APIToken != "" {
return nil, fmt.Errorf("Found multiple tokens in the certificate")
return nil, fmt.Errorf("found multiple tokens in the certificate")
}
// The token is a string,
// Try the newer JSON format
ntt := namedTunnelToken{}
if err := json.Unmarshal(block.Bytes, &ntt); err == nil {
originCert.ZoneID = ntt.ZoneID
originCert.APIToken = ntt.APIToken
originCert.AccountID = ntt.AccountID
}
_ = json.Unmarshal(block.Bytes, &originCert)
default:
return nil, fmt.Errorf("Unknown block %s in the certificate", block.Type)
return nil, fmt.Errorf("unknown block %s in the certificate", block.Type)
}
block, rest = pem.Decode(rest)
}
if originCert.ZoneID == "" || originCert.APIToken == "" {
return nil, fmt.Errorf("Missing token in the certificate")
return nil, fmt.Errorf("missing token in the certificate")
}
return &originCert, nil

View File

@ -16,27 +16,25 @@ const (
originCertFile = "cert.pem"
)
var (
nopLog = zerolog.Nop().With().Logger()
)
var nopLog = zerolog.Nop().With().Logger()
func TestLoadOriginCert(t *testing.T) {
cert, err := decodeOriginCert([]byte{})
assert.Equal(t, fmt.Errorf("Cannot decode empty certificate"), err)
assert.Equal(t, fmt.Errorf("cannot decode empty certificate"), err)
assert.Nil(t, cert)
blocks, err := os.ReadFile("test-cert-unknown-block.pem")
assert.NoError(t, err)
require.NoError(t, err)
cert, err = decodeOriginCert(blocks)
assert.Equal(t, fmt.Errorf("Unknown block RSA PRIVATE KEY in the certificate"), err)
assert.Equal(t, fmt.Errorf("unknown block RSA PRIVATE KEY in the certificate"), err)
assert.Nil(t, cert)
}
func TestJSONArgoTunnelTokenEmpty(t *testing.T) {
blocks, err := os.ReadFile("test-cert-no-token.pem")
assert.NoError(t, err)
require.NoError(t, err)
cert, err := decodeOriginCert(blocks)
assert.Equal(t, fmt.Errorf("Missing token in the certificate"), err)
assert.Equal(t, fmt.Errorf("missing token in the certificate"), err)
assert.Nil(t, cert)
}
@ -52,51 +50,21 @@ func TestJSONArgoTunnelToken(t *testing.T) {
func CloudflareTunnelTokenTest(t *testing.T, path string) {
blocks, err := os.ReadFile(path)
assert.NoError(t, err)
require.NoError(t, err)
cert, err := decodeOriginCert(blocks)
assert.NoError(t, err)
require.NoError(t, err)
assert.NotNil(t, cert)
assert.Equal(t, "7b0a4d77dfb881c1a3b7d61ea9443e19", cert.ZoneID)
key := "test-service-key"
assert.Equal(t, key, cert.APIToken)
}
type mockFile struct {
path string
data []byte
err error
}
type mockFileSystem struct {
files map[string]mockFile
}
func newMockFileSystem(files ...mockFile) *mockFileSystem {
fs := mockFileSystem{map[string]mockFile{}}
for _, f := range files {
fs.files[f.path] = f
}
return &fs
}
func (fs *mockFileSystem) ReadFile(path string) ([]byte, error) {
if f, ok := fs.files[path]; ok {
return f.data, f.err
}
return nil, os.ErrNotExist
}
func (fs *mockFileSystem) ValidFilePath(path string) bool {
_, exists := fs.files[path]
return exists
}
func TestFindOriginCert_Valid(t *testing.T) {
file, err := os.ReadFile("test-cloudflare-tunnel-cert-json.pem")
require.NoError(t, err)
dir := t.TempDir()
certPath := filepath.Join(dir, originCertFile)
os.WriteFile(certPath, file, fs.ModePerm)
_ = os.WriteFile(certPath, file, fs.ModePerm)
path, err := FindOriginCert(certPath, &nopLog)
require.NoError(t, err)
require.Equal(t, certPath, path)
@ -108,3 +76,28 @@ func TestFindOriginCert_Missing(t *testing.T) {
_, err := FindOriginCert(certPath, &nopLog)
require.Error(t, err)
}
func TestEncodeDecodeOriginCert(t *testing.T) {
cert := OriginCert{
ZoneID: "zone",
AccountID: "account",
APIToken: "token",
Endpoint: "FED",
}
blocks, err := cert.EncodeOriginCert()
require.NoError(t, err)
decodedCert, err := DecodeOriginCert(blocks)
require.NoError(t, err)
assert.NotNil(t, cert)
assert.Equal(t, "zone", decodedCert.ZoneID)
assert.Equal(t, "account", decodedCert.AccountID)
assert.Equal(t, "token", decodedCert.APIToken)
assert.Equal(t, FedEndpoint, decodedCert.Endpoint)
}
func TestEncodeDecodeNilOriginCert(t *testing.T) {
var cert *OriginCert
blocks, err := cert.EncodeOriginCert()
assert.Equal(t, fmt.Errorf("originCert cannot be nil"), err)
require.Nil(t, blocks)
}

View File

@ -87,3 +87,4 @@ M2i4QoOFcSKIG+v4SuvgEJHgG8vGvxh2qlSxnMWuPV+7/1P5ATLqDj1PlKms+BNR
y7sc5AT9PclkL3Y9MNzOu0LXyBkGYcl8M0EQfLv9VPbWT+NXiMg/O2CHiT02pAAz
uQicoQq3yzeQh20wtrtaXzTNmA==
-----END RSA PRIVATE KEY-----

View File

@ -9,7 +9,7 @@ import (
"net/url"
"strconv"
"github.com/cloudflare/cloudflared/logger"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
)
type httpClient struct {
@ -86,12 +86,12 @@ func (client *httpClient) GetLogConfiguration(ctx context.Context) (*LogConfigur
return nil, fmt.Errorf("error convertin pid to int: %w", err)
}
logFile, exists := data[logger.LogFileFlag]
logFile, exists := data[cfdflags.LogFile]
if exists {
return &LogConfiguration{logFile, "", uid}, nil
}
logDirectory, exists := data[logger.LogDirectoryFlag]
logDirectory, exists := data[cfdflags.LogDirectory]
if exists {
return &LogConfiguration{"", logDirectory, uid}, nil
}

10
go.mod
View File

@ -36,11 +36,11 @@ require (
go.opentelemetry.io/proto/otlp v1.2.0
go.uber.org/automaxprocs v1.4.0
go.uber.org/mock v0.5.0
golang.org/x/crypto v0.24.0
golang.org/x/crypto v0.31.0
golang.org/x/net v0.26.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.21.0
golang.org/x/term v0.21.0
golang.org/x/sync v0.10.0
golang.org/x/sys v0.28.0
golang.org/x/term v0.27.0
google.golang.org/protobuf v1.34.1
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1
@ -87,7 +87,7 @@ require (
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/tools v0.22.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect

20
go.sum
View File

@ -222,8 +222,8 @@ go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
@ -242,8 +242,8 @@ golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -254,19 +254,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=

View File

@ -15,6 +15,7 @@ import (
"golang.org/x/term"
"gopkg.in/natefinch/lumberjack.v2"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/management"
)
@ -22,14 +23,6 @@ const (
EnableTerminalLog = false
DisableTerminalLog = true
LogLevelFlag = "loglevel"
LogFileFlag = "logfile"
LogDirectoryFlag = "log-directory"
LogTransportLevelFlag = "transport-loglevel"
LogSSHDirectoryFlag = "log-directory"
LogSSHLevelFlag = "log-level"
dirPermMode = 0744 // rwxr--r--
filePermMode = 0644 // rw-r--r--
@ -136,15 +129,15 @@ func newZerolog(loggerConfig *Config) *zerolog.Logger {
}
func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
return createFromContext(c, LogTransportLevelFlag, LogDirectoryFlag, disableTerminal)
return createFromContext(c, cfdflags.TransportLogLevel, cfdflags.LogDirectory, disableTerminal)
}
func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
return createFromContext(c, LogLevelFlag, LogDirectoryFlag, disableTerminal)
return createFromContext(c, cfdflags.LogLevel, cfdflags.LogDirectory, disableTerminal)
}
func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
return createFromContext(c, LogSSHLevelFlag, LogSSHDirectoryFlag, disableTerminal)
return createFromContext(c, cfdflags.LogLevelSSH, cfdflags.LogDirectory, disableTerminal)
}
func createFromContext(
@ -154,7 +147,7 @@ func createFromContext(
disableTerminal bool,
) *zerolog.Logger {
logLevel := c.String(logLevelFlagName)
logFile := c.String(LogFileFlag)
logFile := c.String(cfdflags.LogFile)
logDirectory := c.String(logDirectoryFlagName)
loggerConfig := CreateConfig(
@ -166,7 +159,7 @@ func createFromContext(
log := newZerolog(loggerConfig)
if incompatibleFlagsSet := logFile != "" && logDirectory != ""; incompatibleFlagsSet {
log.Error().Msgf("Your config includes values for both %s (%s) and %s (%s), but they are incompatible. %s takes precedence.", LogFileFlag, logFile, logDirectoryFlagName, logDirectory, LogFileFlag)
log.Error().Msgf("Your config includes values for both %s (%s) and %s (%s), but they are incompatible. %s takes precedence.", cfdflags.LogFile, logFile, logDirectoryFlagName, logDirectory, cfdflags.LogFile)
}
return log
}
@ -205,7 +198,6 @@ var (
func createFileWriter(config FileConfig) (io.Writer, error) {
singleFileInit.once.Do(func() {
var logFile io.Writer
fullpath := config.Fullpath()

View File

@ -129,7 +129,7 @@ func (e *LogEventType) UnmarshalJSON(data []byte) error {
// LogLevel corresponds to the zerolog logging levels
// "panic", "fatal", and "trace" are exempt from this list as they are rarely used and, at least
// the the first two are limited to failure conditions that lead to cloudflared shutting down.
// the first two are limited to failure conditions that lead to cloudflared shutting down.
type LogLevel int8
const (

View File

@ -4,16 +4,17 @@ import (
"context"
"encoding/json"
"fmt"
"strconv"
"sync"
"sync/atomic"
"github.com/pkg/errors"
pkgerrors "github.com/pkg/errors"
"github.com/rs/zerolog"
cfdflow "github.com/cloudflare/cloudflared/flow"
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/connection"
cfdflow "github.com/cloudflare/cloudflared/flow"
"github.com/cloudflare/cloudflared/ingress"
"github.com/cloudflare/cloudflared/proxy"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
@ -117,6 +118,30 @@ func (o *Orchestrator) UpdateConfig(version int32, config []byte) *pogs.UpdateCo
}
}
// overrideRemoteWarpRoutingWithLocalValues overrides the ingress.WarpRoutingConfig that comes from the remote with
// the local values if there is any.
func (o *Orchestrator) overrideRemoteWarpRoutingWithLocalValues(remoteWarpRouting *ingress.WarpRoutingConfig) error {
return o.overrideMaxActiveFlows(o.config.ConfigurationFlags[flags.MaxActiveFlows], remoteWarpRouting)
}
// overrideMaxActiveFlows checks the local configuration flags, and if a value is found for the flags.MaxActiveFlows
// overrides the value that comes on the remote ingress.WarpRoutingConfig with the local value.
func (o *Orchestrator) overrideMaxActiveFlows(maxActiveFlowsLocalConfig string, remoteWarpRouting *ingress.WarpRoutingConfig) error {
// If max active flows isn't defined locally just use the remote value
if maxActiveFlowsLocalConfig == "" {
return nil
}
maxActiveFlowsLocalOverride, err := strconv.ParseUint(maxActiveFlowsLocalConfig, 10, 64)
if err != nil {
return pkgerrors.Wrapf(err, "failed to parse %s", flags.MaxActiveFlows)
}
// Override the value that comes from the remote with the local value
remoteWarpRouting.MaxActiveFlows = maxActiveFlowsLocalOverride
return nil
}
// The caller is responsible to make sure there is no concurrent access
func (o *Orchestrator) updateIngress(ingressRules ingress.Ingress, warpRouting ingress.WarpRoutingConfig) error {
select {
@ -125,6 +150,11 @@ func (o *Orchestrator) updateIngress(ingressRules ingress.Ingress, warpRouting i
default:
}
// Overrides the local values, onto the remote values of the warp routing configuration
if err := o.overrideRemoteWarpRoutingWithLocalValues(&warpRouting); err != nil {
return pkgerrors.Wrap(err, "failed to merge local overrides into warp routing configuration")
}
// Assign the internal ingress rules to the parsed ingress
ingressRules.InternalRules = o.internalRules
@ -139,7 +169,7 @@ func (o *Orchestrator) updateIngress(ingressRules ingress.Ingress, warpRouting i
// The downside is minimized because none of the ingress.OriginService implementation have that requirement
proxyShutdownC := make(chan struct{})
if err := ingressRules.StartOrigins(o.log, proxyShutdownC); err != nil {
return errors.Wrap(err, "failed to start origin")
return pkgerrors.Wrap(err, "failed to start origin")
}
// Update the flow limit since the configuration might have changed

View File

@ -16,8 +16,11 @@ import (
"github.com/google/uuid"
gows "github.com/gorilla/websocket"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/ingress"
@ -106,25 +109,25 @@ func TestUpdateConfiguration(t *testing.T) {
require.Len(t, configV2.Ingress.Rules, 3)
// originRequest of this ingress rule overrides global default
require.Equal(t, config.CustomDuration{Duration: time.Second * 10}, configV2.Ingress.Rules[0].Config.ConnectTimeout)
require.Equal(t, true, configV2.Ingress.Rules[0].Config.NoTLSVerify)
require.True(t, configV2.Ingress.Rules[0].Config.NoTLSVerify)
// Inherited from global default
require.Equal(t, true, configV2.Ingress.Rules[0].Config.NoHappyEyeballs)
require.True(t, configV2.Ingress.Rules[0].Config.NoHappyEyeballs)
// Validate ingress rule 1
require.Equal(t, "jira.tunnel.org", configV2.Ingress.Rules[1].Hostname)
require.True(t, configV2.Ingress.Rules[1].Matches("jira.tunnel.org", "/users"))
require.Equal(t, "http://172.32.20.6:80", configV2.Ingress.Rules[1].Service.String())
// originRequest of this ingress rule overrides global default
require.Equal(t, config.CustomDuration{Duration: time.Second * 30}, configV2.Ingress.Rules[1].Config.ConnectTimeout)
require.Equal(t, true, configV2.Ingress.Rules[1].Config.NoTLSVerify)
require.True(t, configV2.Ingress.Rules[1].Config.NoTLSVerify)
// Inherited from global default
require.Equal(t, true, configV2.Ingress.Rules[1].Config.NoHappyEyeballs)
require.True(t, configV2.Ingress.Rules[1].Config.NoHappyEyeballs)
// Validate ingress rule 2, it's the catch-all rule
require.True(t, configV2.Ingress.Rules[2].Matches("blogs.tunnel.io", "/2022/02/10"))
// Inherited from global default
require.Equal(t, config.CustomDuration{Duration: time.Second * 90}, configV2.Ingress.Rules[2].Config.ConnectTimeout)
require.Equal(t, false, configV2.Ingress.Rules[2].Config.NoTLSVerify)
require.Equal(t, true, configV2.Ingress.Rules[2].Config.NoHappyEyeballs)
require.Equal(t, configV2.WarpRouting.ConnectTimeout.Duration, 10*time.Second)
require.False(t, configV2.Ingress.Rules[2].Config.NoTLSVerify)
require.True(t, configV2.Ingress.Rules[2].Config.NoHappyEyeballs)
require.Equal(t, 10*time.Second, configV2.WarpRouting.ConnectTimeout.Duration)
originProxyV2, err := orchestrator.GetOriginProxy()
require.NoError(t, err)
@ -317,7 +320,7 @@ func TestConcurrentUpdateAndRead(t *testing.T) {
go func(i int, originProxy connection.OriginProxy) {
defer wg.Done()
resp, err := proxyHTTP(originProxy, hostname)
require.NoError(t, err, "proxyHTTP %d failed %v", i, err)
assert.NoError(t, err, "proxyHTTP %d failed %v", i, err)
defer resp.Body.Close()
var warpRoutingDisabled bool
@ -326,16 +329,16 @@ func TestConcurrentUpdateAndRead(t *testing.T) {
// v1 proxy, warp enabled
case 200:
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, t.Name(), string(body))
assert.NoError(t, err)
assert.Equal(t, t.Name(), string(body))
warpRoutingDisabled = false
// v2 proxy, warp disabled
case 204:
require.Greater(t, i, concurrentRequests/4)
assert.Greater(t, i, concurrentRequests/4)
warpRoutingDisabled = true
// v3 proxy, warp enabled
case 418:
require.Greater(t, i, concurrentRequests/2)
assert.Greater(t, i, concurrentRequests/2)
warpRoutingDisabled = false
}
@ -358,11 +361,10 @@ func TestConcurrentUpdateAndRead(t *testing.T) {
err = proxyTCP(ctx, originProxy, tcpOrigin.Addr().String(), w, pr)
if warpRoutingDisabled {
require.Error(t, err, "expect proxyTCP %d to return error", i)
assert.Error(t, err, "expect proxyTCP %d to return error", i)
} else {
require.NoError(t, err, "proxyTCP %d failed %v", i, err)
assert.NoError(t, err, "proxyTCP %d failed %v", i, err)
}
}(i, originProxy)
if i == concurrentRequests/4 {
@ -388,6 +390,57 @@ func TestConcurrentUpdateAndRead(t *testing.T) {
wg.Wait()
}
// TestOverrideWarpRoutingConfigWithLocalValues tests that if a value is defined in the Config.ConfigurationFlags,
// it will override the value that comes from the remote result.
func TestOverrideWarpRoutingConfigWithLocalValues(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
assertMaxActiveFlows := func(orchestrator *Orchestrator, expectedValue uint64) {
configJson, err := orchestrator.GetConfigJSON()
require.NoError(t, err)
var result map[string]interface{}
err = json.Unmarshal(configJson, &result)
require.NoError(t, err)
warpRouting := result["warp-routing"].(map[string]interface{})
require.EqualValues(t, expectedValue, warpRouting["maxActiveFlows"])
}
remoteValue := uint64(100)
remoteIngress := ingress.Ingress{}
remoteWarpConfig := ingress.WarpRoutingConfig{
MaxActiveFlows: remoteValue,
}
remoteConfig := &Config{
Ingress: &remoteIngress,
WarpRouting: remoteWarpConfig,
ConfigurationFlags: map[string]string{},
}
orchestrator, err := NewOrchestrator(ctx, remoteConfig, testTags, []ingress.Rule{}, &testLogger)
require.NoError(t, err)
assertMaxActiveFlows(orchestrator, remoteValue)
// Add a local override for the maxActiveFlows
localValue := uint64(500)
remoteConfig.ConfigurationFlags[flags.MaxActiveFlows] = fmt.Sprintf("%d", localValue)
// Force a configuration refresh
err = orchestrator.updateIngress(remoteIngress, remoteWarpConfig)
require.NoError(t, err)
// Check the value being used is the local one
assertMaxActiveFlows(orchestrator, localValue)
// Remove local override for the maxActiveFlows
delete(remoteConfig.ConfigurationFlags, flags.MaxActiveFlows)
// Force a configuration refresh
err = orchestrator.updateIngress(remoteIngress, remoteWarpConfig)
require.NoError(t, err)
// Check the value being used is now the remote again
assertMaxActiveFlows(orchestrator, remoteValue)
}
func proxyHTTP(originProxy connection.OriginProxy, hostname string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s", hostname), nil)
if err != nil {
@ -409,15 +462,16 @@ func proxyHTTP(originProxy connection.OriginProxy, hostname string) (*http.Respo
return w.Result(), nil
}
// nolint: testifylint // this is used inside go routines so it can't use `require.`
func tcpEyeball(t *testing.T, reqWriter io.WriteCloser, body string, respReadWriter *respReadWriteFlusher) {
writeN, err := reqWriter.Write([]byte(body))
require.NoError(t, err)
assert.NoError(t, err)
readBuffer := make([]byte, writeN)
n, err := respReadWriter.Read(readBuffer)
require.NoError(t, err)
require.Equal(t, body, string(readBuffer[:n]))
require.Equal(t, writeN, n)
assert.NoError(t, err)
assert.Equal(t, body, string(readBuffer[:n]))
assert.Equal(t, writeN, n)
}
func proxyTCP(ctx context.Context, originProxy connection.OriginProxy, originAddr string, w http.ResponseWriter, reqBody io.ReadCloser) error {
@ -458,14 +512,15 @@ func serveTCPOrigin(t *testing.T, tcpOrigin net.Listener, wg *sync.WaitGroup) {
}
}
// nolint: testifylint // this is used inside go routines so it can't use `require.`
func echoTCP(t *testing.T, conn net.Conn) {
readBuf := make([]byte, 1000)
readN, err := conn.Read(readBuf)
require.NoError(t, err)
assert.NoError(t, err)
writeN, err := conn.Write(readBuf[:readN])
require.NoError(t, err)
require.Equal(t, readN, writeN)
assert.NoError(t, err)
assert.Equal(t, readN, writeN)
}
type validateHostHandler struct {
@ -479,16 +534,17 @@ func (vhh *validateHostHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(vhh.body))
_, _ = w.Write([]byte(vhh.body))
}
// nolint: testifylint // this is used inside go routines so it can't use `require.`
func updateWithValidation(t *testing.T, orchestrator *Orchestrator, version int32, config []byte) {
resp := orchestrator.UpdateConfig(version, config)
require.NoError(t, resp.Err)
require.Equal(t, version, resp.LastAppliedVersion)
assert.NoError(t, resp.Err)
assert.Equal(t, version, resp.LastAppliedVersion)
}
// TestClosePreviousProxies makes sure proxies started in the pervious configuration version are shutdown
// TestClosePreviousProxies makes sure proxies started in the previous configuration version are shutdown
func TestClosePreviousProxies(t *testing.T) {
var (
hostname = "hello.tunnel1.org"
@ -532,6 +588,7 @@ func TestClosePreviousProxies(t *testing.T) {
originProxyV1, err := orchestrator.GetOriginProxy()
require.NoError(t, err)
// nolint: bodyclose
resp, err := proxyHTTP(originProxyV1, hostname)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
@ -540,12 +597,14 @@ func TestClosePreviousProxies(t *testing.T) {
originProxyV2, err := orchestrator.GetOriginProxy()
require.NoError(t, err)
// nolint: bodyclose
resp, err = proxyHTTP(originProxyV2, hostname)
require.NoError(t, err)
require.Equal(t, http.StatusTeapot, resp.StatusCode)
// The hello-world server in config v1 should have been stopped. We wait a bit since it's closed asynchronously.
time.Sleep(time.Millisecond * 10)
// nolint: bodyclose
resp, err = proxyHTTP(originProxyV1, hostname)
require.Error(t, err)
require.Nil(t, resp)
@ -557,6 +616,7 @@ func TestClosePreviousProxies(t *testing.T) {
require.NoError(t, err)
require.NotEqual(t, originProxyV1, originProxyV3)
// nolint: bodyclose
resp, err = proxyHTTP(originProxyV3, hostname)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
@ -566,6 +626,7 @@ func TestClosePreviousProxies(t *testing.T) {
// Wait for proxies to shutdown
time.Sleep(time.Millisecond * 10)
// nolint: bodyclose
resp, err = proxyHTTP(originProxyV3, hostname)
require.Error(t, err)
require.Nil(t, resp)
@ -622,7 +683,7 @@ func TestPersistentConnection(t *testing.T) {
go func() {
defer wg.Done()
conn, err := tcpOrigin.Accept()
require.NoError(t, err)
assert.NoError(t, err)
defer conn.Close()
// Expect 3 TCP messages
@ -630,26 +691,26 @@ func TestPersistentConnection(t *testing.T) {
echoTCP(t, conn)
}
}()
// Simulate cloudflared recieving a TCP connection
// Simulate cloudflared receiving a TCP connection
go func() {
defer wg.Done()
require.NoError(t, proxyTCP(ctx, originProxy, tcpOrigin.Addr().String(), tcpRespReadWriter, tcpReqReader))
assert.NoError(t, proxyTCP(ctx, originProxy, tcpOrigin.Addr().String(), tcpRespReadWriter, tcpReqReader))
}()
// Simulate cloudflared recieving a WS connection
// Simulate cloudflared receiving a WS connection
go func() {
defer wg.Done()
req, err := http.NewRequest(http.MethodGet, hostname, wsReqReader)
require.NoError(t, err)
assert.NoError(t, err)
// ProxyHTTP will add Connection, Upgrade and Sec-Websocket-Version headers
req.Header.Add("Sec-WebSocket-Key", "dGhlIHNhbXBsZSBub25jZQ==")
log := zerolog.Nop()
respWriter, err := connection.NewHTTP2RespWriter(req, wsRespReadWriter, connection.TypeWebsocket, &log)
require.NoError(t, err)
assert.NoError(t, err)
err = originProxy.ProxyHTTP(respWriter, tracing.NewTracedHTTPRequest(req, 0, &log), true)
require.NoError(t, err)
assert.NoError(t, err)
}()
// Simulate eyeball WS and TCP connections

View File

@ -99,7 +99,7 @@ func (b *BackoffHandler) Backoff(ctx context.Context) bool {
}
}
// Sets a grace period within which the the backoff timer is maintained. After the grace
// Sets a grace period within which the backoff timer is maintained. After the grace
// period expires, the number of retries & backoff duration is reset.
func (b *BackoffHandler) SetGracePeriod() time.Duration {
maxTimeToWait := b.GetBaseTime() * 2 << (b.retries + 1)

View File

@ -247,9 +247,7 @@ func (s *Supervisor) startFirstTunnel(
ctx context.Context,
connectedSignal *signal.Signal,
) {
var (
err error
)
var err error
const firstConnIndex = 0
isStaticEdge := len(s.config.EdgeAddrs) > 0
defer func() {
@ -300,9 +298,7 @@ func (s *Supervisor) startTunnel(
index int,
connectedSignal *signal.Signal,
) {
var (
err error
)
var err error
defer func() {
s.tunnelErrors <- tunnelError{index: index, err: err}
}()

View File

@ -70,7 +70,6 @@ func RunTransfer(transferURL *url.URL, appAUD, resourceName, key, value string,
}
return resourceData, nil
}
// BuildRequestURL creates a request suitable for a resource transfer.

4
vendor/golang.org/x/crypto/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,7 @@
// Deprecated: any new system should use AES (from crypto/aes, if necessary in
// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
// golang.org/x/crypto/chacha20poly1305).
package blowfish // import "golang.org/x/crypto/blowfish"
package blowfish
// The code is a port of Bruce Schneier's C implementation.
// See https://www.schneier.com/blowfish.html.

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego
//go:build (!arm64 && !s390x && !ppc64 && !ppc64le) || !gc || purego
package chacha20

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
package chacha20

View File

@ -19,7 +19,7 @@
// The differences in this and the original implementation are
// due to the calling conventions and initialization of constants.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
#include "textflag.h"
@ -36,32 +36,68 @@
// for VPERMXOR
#define MASK R18
DATA consts<>+0x00(SB)/8, $0x3320646e61707865
DATA consts<>+0x08(SB)/8, $0x6b20657479622d32
DATA consts<>+0x10(SB)/8, $0x0000000000000001
DATA consts<>+0x18(SB)/8, $0x0000000000000000
DATA consts<>+0x20(SB)/8, $0x0000000000000004
DATA consts<>+0x28(SB)/8, $0x0000000000000000
DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d
DATA consts<>+0x38(SB)/8, $0x0203000106070405
DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c
DATA consts<>+0x48(SB)/8, $0x0102030005060704
DATA consts<>+0x50(SB)/8, $0x6170786561707865
DATA consts<>+0x58(SB)/8, $0x6170786561707865
DATA consts<>+0x60(SB)/8, $0x3320646e3320646e
DATA consts<>+0x68(SB)/8, $0x3320646e3320646e
DATA consts<>+0x70(SB)/8, $0x79622d3279622d32
DATA consts<>+0x78(SB)/8, $0x79622d3279622d32
DATA consts<>+0x80(SB)/8, $0x6b2065746b206574
DATA consts<>+0x88(SB)/8, $0x6b2065746b206574
DATA consts<>+0x90(SB)/8, $0x0000000100000000
DATA consts<>+0x98(SB)/8, $0x0000000300000002
DATA consts<>+0xa0(SB)/8, $0x5566774411223300
DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88
DATA consts<>+0xb0(SB)/8, $0x6677445522330011
DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899
DATA consts<>+0x00(SB)/4, $0x61707865
DATA consts<>+0x04(SB)/4, $0x3320646e
DATA consts<>+0x08(SB)/4, $0x79622d32
DATA consts<>+0x0c(SB)/4, $0x6b206574
DATA consts<>+0x10(SB)/4, $0x00000001
DATA consts<>+0x14(SB)/4, $0x00000000
DATA consts<>+0x18(SB)/4, $0x00000000
DATA consts<>+0x1c(SB)/4, $0x00000000
DATA consts<>+0x20(SB)/4, $0x00000004
DATA consts<>+0x24(SB)/4, $0x00000000
DATA consts<>+0x28(SB)/4, $0x00000000
DATA consts<>+0x2c(SB)/4, $0x00000000
DATA consts<>+0x30(SB)/4, $0x0e0f0c0d
DATA consts<>+0x34(SB)/4, $0x0a0b0809
DATA consts<>+0x38(SB)/4, $0x06070405
DATA consts<>+0x3c(SB)/4, $0x02030001
DATA consts<>+0x40(SB)/4, $0x0d0e0f0c
DATA consts<>+0x44(SB)/4, $0x090a0b08
DATA consts<>+0x48(SB)/4, $0x05060704
DATA consts<>+0x4c(SB)/4, $0x01020300
DATA consts<>+0x50(SB)/4, $0x61707865
DATA consts<>+0x54(SB)/4, $0x61707865
DATA consts<>+0x58(SB)/4, $0x61707865
DATA consts<>+0x5c(SB)/4, $0x61707865
DATA consts<>+0x60(SB)/4, $0x3320646e
DATA consts<>+0x64(SB)/4, $0x3320646e
DATA consts<>+0x68(SB)/4, $0x3320646e
DATA consts<>+0x6c(SB)/4, $0x3320646e
DATA consts<>+0x70(SB)/4, $0x79622d32
DATA consts<>+0x74(SB)/4, $0x79622d32
DATA consts<>+0x78(SB)/4, $0x79622d32
DATA consts<>+0x7c(SB)/4, $0x79622d32
DATA consts<>+0x80(SB)/4, $0x6b206574
DATA consts<>+0x84(SB)/4, $0x6b206574
DATA consts<>+0x88(SB)/4, $0x6b206574
DATA consts<>+0x8c(SB)/4, $0x6b206574
DATA consts<>+0x90(SB)/4, $0x00000000
DATA consts<>+0x94(SB)/4, $0x00000001
DATA consts<>+0x98(SB)/4, $0x00000002
DATA consts<>+0x9c(SB)/4, $0x00000003
DATA consts<>+0xa0(SB)/4, $0x11223300
DATA consts<>+0xa4(SB)/4, $0x55667744
DATA consts<>+0xa8(SB)/4, $0x99aabb88
DATA consts<>+0xac(SB)/4, $0xddeeffcc
DATA consts<>+0xb0(SB)/4, $0x22330011
DATA consts<>+0xb4(SB)/4, $0x66774455
DATA consts<>+0xb8(SB)/4, $0xaabb8899
DATA consts<>+0xbc(SB)/4, $0xeeffccdd
GLOBL consts<>(SB), RODATA, $0xc0
#ifdef GOARCH_ppc64
#define BE_XXBRW_INIT() \
LVSL (R0)(R0), V24 \
VSPLTISB $3, V25 \
VXOR V24, V25, V24 \
#define BE_XXBRW(vr) VPERM vr, vr, V24, vr
#else
#define BE_XXBRW_INIT()
#define BE_XXBRW(vr)
#endif
//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
MOVD out+0(FP), OUT
@ -94,6 +130,8 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
// Clear V27
VXOR V27, V27, V27
BE_XXBRW_INIT()
// V28
LXVW4X (CONSTBASE)(R11), VS60
@ -299,6 +337,11 @@ loop_vsx:
VADDUWM V8, V18, V8
VADDUWM V12, V19, V12
BE_XXBRW(V0)
BE_XXBRW(V4)
BE_XXBRW(V8)
BE_XXBRW(V12)
CMPU LEN, $64
BLT tail_vsx
@ -327,6 +370,11 @@ loop_vsx:
VADDUWM V9, V18, V8
VADDUWM V13, V19, V12
BE_XXBRW(V0)
BE_XXBRW(V4)
BE_XXBRW(V8)
BE_XXBRW(V12)
CMPU LEN, $64
BLT tail_vsx
@ -334,8 +382,8 @@ loop_vsx:
LXVW4X (INP)(R8), VS60
LXVW4X (INP)(R9), VS61
LXVW4X (INP)(R10), VS62
VXOR V27, V0, V27
VXOR V27, V0, V27
VXOR V28, V4, V28
VXOR V29, V8, V29
VXOR V30, V12, V30
@ -354,6 +402,11 @@ loop_vsx:
VADDUWM V10, V18, V8
VADDUWM V14, V19, V12
BE_XXBRW(V0)
BE_XXBRW(V4)
BE_XXBRW(V8)
BE_XXBRW(V12)
CMPU LEN, $64
BLT tail_vsx
@ -381,6 +434,11 @@ loop_vsx:
VADDUWM V11, V18, V8
VADDUWM V15, V19, V12
BE_XXBRW(V0)
BE_XXBRW(V4)
BE_XXBRW(V8)
BE_XXBRW(V12)
CMPU LEN, $64
BLT tail_vsx
@ -408,9 +466,9 @@ loop_vsx:
done_vsx:
// Increment counter by number of 64 byte blocks
MOVD (CNT), R14
MOVWZ (CNT), R14
ADD BLOCKS, R14
MOVD R14, (CNT)
MOVWZ R14, (CNT)
RET
tail_vsx:

View File

@ -5,7 +5,7 @@
// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its
// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and
// draft-irtf-cfrg-xchacha-01.
package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305"
package chacha20poly1305
import (
"crypto/cipher"

File diff suppressed because it is too large Load Diff

View File

@ -6,9 +6,11 @@
// performs scalar multiplication on the elliptic curve known as Curve25519.
// See RFC 7748.
//
// Starting in Go 1.20, this package is a wrapper for the X25519 implementation
// This package is a wrapper for the X25519 implementation
// in the crypto/ecdh package.
package curve25519 // import "golang.org/x/crypto/curve25519"
package curve25519
import "crypto/ecdh"
// ScalarMult sets dst to the product scalar * point.
//
@ -16,7 +18,13 @@ package curve25519 // import "golang.org/x/crypto/curve25519"
// zeroes, irrespective of the scalar. Instead, use the X25519 function, which
// will return an error.
func ScalarMult(dst, scalar, point *[32]byte) {
scalarMult(dst, scalar, point)
if _, err := x25519(dst, scalar[:], point[:]); err != nil {
// The only error condition for x25519 when the inputs are 32 bytes long
// is if the output would have been the all-zero value.
for i := range dst {
dst[i] = 0
}
}
}
// ScalarBaseMult sets dst to the product scalar * base where base is the
@ -25,7 +33,12 @@ func ScalarMult(dst, scalar, point *[32]byte) {
// It is recommended to use the X25519 function with Basepoint instead, as
// copying into fixed size arrays can lead to unexpected bugs.
func ScalarBaseMult(dst, scalar *[32]byte) {
scalarBaseMult(dst, scalar)
curve := ecdh.X25519()
priv, err := curve.NewPrivateKey(scalar[:])
if err != nil {
panic("curve25519: internal error: scalarBaseMult was not 32 bytes")
}
copy(dst[:], priv.PublicKey().Bytes())
}
const (
@ -57,3 +70,21 @@ func X25519(scalar, point []byte) ([]byte, error) {
var dst [32]byte
return x25519(&dst, scalar, point)
}
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
curve := ecdh.X25519()
pub, err := curve.NewPublicKey(point)
if err != nil {
return nil, err
}
priv, err := curve.NewPrivateKey(scalar)
if err != nil {
return nil, err
}
out, err := priv.ECDH(pub)
if err != nil {
return nil, err
}
copy(dst[:], out)
return dst[:], nil
}

View File

@ -1,105 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.20
package curve25519
import (
"crypto/subtle"
"errors"
"strconv"
"golang.org/x/crypto/curve25519/internal/field"
)
func scalarMult(dst, scalar, point *[32]byte) {
var e [32]byte
copy(e[:], scalar[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element
x1.SetBytes(point[:])
x2.One()
x3.Set(&x1)
z3.One()
swap := 0
for pos := 254; pos >= 0; pos-- {
b := e[pos/8] >> uint(pos&7)
b &= 1
swap ^= int(b)
x2.Swap(&x3, swap)
z2.Swap(&z3, swap)
swap = int(b)
tmp0.Subtract(&x3, &z3)
tmp1.Subtract(&x2, &z2)
x2.Add(&x2, &z2)
z2.Add(&x3, &z3)
z3.Multiply(&tmp0, &x2)
z2.Multiply(&z2, &tmp1)
tmp0.Square(&tmp1)
tmp1.Square(&x2)
x3.Add(&z3, &z2)
z2.Subtract(&z3, &z2)
x2.Multiply(&tmp1, &tmp0)
tmp1.Subtract(&tmp1, &tmp0)
z2.Square(&z2)
z3.Mult32(&tmp1, 121666)
x3.Square(&x3)
tmp0.Add(&tmp0, &z3)
z3.Multiply(&x1, &z2)
z2.Multiply(&tmp1, &tmp0)
}
x2.Swap(&x3, swap)
z2.Swap(&z3, swap)
z2.Invert(&z2)
x2.Multiply(&x2, &z2)
copy(dst[:], x2.Bytes())
}
func scalarBaseMult(dst, scalar *[32]byte) {
checkBasepoint()
scalarMult(dst, scalar, &basePoint)
}
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
var in [32]byte
if l := len(scalar); l != 32 {
return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32")
}
if l := len(point); l != 32 {
return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32")
}
copy(in[:], scalar)
if &point[0] == &Basepoint[0] {
scalarBaseMult(dst, &in)
} else {
var base, zero [32]byte
copy(base[:], point)
scalarMult(dst, &in, &base)
if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
return nil, errors.New("bad input point: low order point")
}
}
return dst[:], nil
}
func checkBasepoint() {
if subtle.ConstantTimeCompare(Basepoint, []byte{
0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}) != 1 {
panic("curve25519: global Basepoint value was modified")
}
}

View File

@ -1,46 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.20
package curve25519
import "crypto/ecdh"
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
curve := ecdh.X25519()
pub, err := curve.NewPublicKey(point)
if err != nil {
return nil, err
}
priv, err := curve.NewPrivateKey(scalar)
if err != nil {
return nil, err
}
out, err := priv.ECDH(pub)
if err != nil {
return nil, err
}
copy(dst[:], out)
return dst[:], nil
}
func scalarMult(dst, scalar, point *[32]byte) {
if _, err := x25519(dst, scalar[:], point[:]); err != nil {
// The only error condition for x25519 when the inputs are 32 bytes long
// is if the output would have been the all-zero value.
for i := range dst {
dst[i] = 0
}
}
}
func scalarBaseMult(dst, scalar *[32]byte) {
curve := ecdh.X25519()
priv, err := curve.NewPrivateKey(scalar[:])
if err != nil {
panic("curve25519: internal error: scalarBaseMult was not 32 bytes")
}
copy(dst[:], priv.PublicKey().Bytes())
}

View File

@ -1,7 +0,0 @@
This package is kept in sync with crypto/ed25519/internal/edwards25519/field in
the standard library.
If there are any changes in the standard library that need to be synced to this
package, run sync.sh. It will not overwrite any local changes made since the
previous sync, so it's ok to land changes in this package first, and then sync
to the standard library later.

View File

@ -1,416 +0,0 @@
// Copyright (c) 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package field implements fast arithmetic modulo 2^255-19.
package field
import (
"crypto/subtle"
"encoding/binary"
"math/bits"
)
// Element represents an element of the field GF(2^255-19). Note that this
// is not a cryptographically secure group, and should only be used to interact
// with edwards25519.Point coordinates.
//
// This type works similarly to math/big.Int, and all arguments and receivers
// are allowed to alias.
//
// The zero value is a valid zero element.
type Element struct {
// An element t represents the integer
// t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
//
// Between operations, all limbs are expected to be lower than 2^52.
l0 uint64
l1 uint64
l2 uint64
l3 uint64
l4 uint64
}
const maskLow51Bits uint64 = (1 << 51) - 1
var feZero = &Element{0, 0, 0, 0, 0}
// Zero sets v = 0, and returns v.
func (v *Element) Zero() *Element {
*v = *feZero
return v
}
var feOne = &Element{1, 0, 0, 0, 0}
// One sets v = 1, and returns v.
func (v *Element) One() *Element {
*v = *feOne
return v
}
// reduce reduces v modulo 2^255 - 19 and returns it.
func (v *Element) reduce() *Element {
v.carryPropagate()
// After the light reduction we now have a field element representation
// v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
// If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
// generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
c := (v.l0 + 19) >> 51
c = (v.l1 + c) >> 51
c = (v.l2 + c) >> 51
c = (v.l3 + c) >> 51
c = (v.l4 + c) >> 51
// If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
// effectively applying the reduction identity to the carry.
v.l0 += 19 * c
v.l1 += v.l0 >> 51
v.l0 = v.l0 & maskLow51Bits
v.l2 += v.l1 >> 51
v.l1 = v.l1 & maskLow51Bits
v.l3 += v.l2 >> 51
v.l2 = v.l2 & maskLow51Bits
v.l4 += v.l3 >> 51
v.l3 = v.l3 & maskLow51Bits
// no additional carry
v.l4 = v.l4 & maskLow51Bits
return v
}
// Add sets v = a + b, and returns v.
func (v *Element) Add(a, b *Element) *Element {
v.l0 = a.l0 + b.l0
v.l1 = a.l1 + b.l1
v.l2 = a.l2 + b.l2
v.l3 = a.l3 + b.l3
v.l4 = a.l4 + b.l4
// Using the generic implementation here is actually faster than the
// assembly. Probably because the body of this function is so simple that
// the compiler can figure out better optimizations by inlining the carry
// propagation. TODO
return v.carryPropagateGeneric()
}
// Subtract sets v = a - b, and returns v.
func (v *Element) Subtract(a, b *Element) *Element {
// We first add 2 * p, to guarantee the subtraction won't underflow, and
// then subtract b (which can be up to 2^255 + 2^13 * 19).
v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
return v.carryPropagate()
}
// Negate sets v = -a, and returns v.
func (v *Element) Negate(a *Element) *Element {
return v.Subtract(feZero, a)
}
// Invert sets v = 1/z mod p, and returns v.
//
// If z == 0, Invert returns v = 0.
func (v *Element) Invert(z *Element) *Element {
// Inversion is implemented as exponentiation with exponent p 2. It uses the
// same sequence of 255 squarings and 11 multiplications as [Curve25519].
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
z2.Square(z) // 2
t.Square(&z2) // 4
t.Square(&t) // 8
z9.Multiply(&t, z) // 9
z11.Multiply(&z9, &z2) // 11
t.Square(&z11) // 22
z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
t.Square(&z2_5_0) // 2^6 - 2^1
for i := 0; i < 4; i++ {
t.Square(&t) // 2^10 - 2^5
}
z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
t.Square(&z2_10_0) // 2^11 - 2^1
for i := 0; i < 9; i++ {
t.Square(&t) // 2^20 - 2^10
}
z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
t.Square(&z2_20_0) // 2^21 - 2^1
for i := 0; i < 19; i++ {
t.Square(&t) // 2^40 - 2^20
}
t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
t.Square(&t) // 2^41 - 2^1
for i := 0; i < 9; i++ {
t.Square(&t) // 2^50 - 2^10
}
z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
t.Square(&z2_50_0) // 2^51 - 2^1
for i := 0; i < 49; i++ {
t.Square(&t) // 2^100 - 2^50
}
z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
t.Square(&z2_100_0) // 2^101 - 2^1
for i := 0; i < 99; i++ {
t.Square(&t) // 2^200 - 2^100
}
t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
t.Square(&t) // 2^201 - 2^1
for i := 0; i < 49; i++ {
t.Square(&t) // 2^250 - 2^50
}
t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
t.Square(&t) // 2^251 - 2^1
t.Square(&t) // 2^252 - 2^2
t.Square(&t) // 2^253 - 2^3
t.Square(&t) // 2^254 - 2^4
t.Square(&t) // 2^255 - 2^5
return v.Multiply(&t, &z11) // 2^255 - 21
}
// Set sets v = a, and returns v.
func (v *Element) Set(a *Element) *Element {
*v = *a
return v
}
// SetBytes sets v to x, which must be a 32-byte little-endian encoding.
//
// Consistent with RFC 7748, the most significant bit (the high bit of the
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
// are accepted. Note that this is laxer than specified by RFC 8032.
func (v *Element) SetBytes(x []byte) *Element {
if len(x) != 32 {
panic("edwards25519: invalid field element input size")
}
// Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
v.l0 = binary.LittleEndian.Uint64(x[0:8])
v.l0 &= maskLow51Bits
// Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
v.l1 &= maskLow51Bits
// Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
v.l2 &= maskLow51Bits
// Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
v.l3 &= maskLow51Bits
// Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51).
// Note: not bytes 25:33, shift 4, to avoid overread.
v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
v.l4 &= maskLow51Bits
return v
}
// Bytes returns the canonical 32-byte little-endian encoding of v.
func (v *Element) Bytes() []byte {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
var out [32]byte
return v.bytes(&out)
}
func (v *Element) bytes(out *[32]byte) []byte {
t := *v
t.reduce()
var buf [8]byte
for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
bitsOffset := i * 51
binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
for i, bb := range buf {
off := bitsOffset/8 + i
if off >= len(out) {
break
}
out[off] |= bb
}
}
return out[:]
}
// Equal returns 1 if v and u are equal, and 0 otherwise.
func (v *Element) Equal(u *Element) int {
sa, sv := u.Bytes(), v.Bytes()
return subtle.ConstantTimeCompare(sa, sv)
}
// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
// Select sets v to a if cond == 1, and to b if cond == 0.
func (v *Element) Select(a, b *Element, cond int) *Element {
m := mask64Bits(cond)
v.l0 = (m & a.l0) | (^m & b.l0)
v.l1 = (m & a.l1) | (^m & b.l1)
v.l2 = (m & a.l2) | (^m & b.l2)
v.l3 = (m & a.l3) | (^m & b.l3)
v.l4 = (m & a.l4) | (^m & b.l4)
return v
}
// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
func (v *Element) Swap(u *Element, cond int) {
m := mask64Bits(cond)
t := m & (v.l0 ^ u.l0)
v.l0 ^= t
u.l0 ^= t
t = m & (v.l1 ^ u.l1)
v.l1 ^= t
u.l1 ^= t
t = m & (v.l2 ^ u.l2)
v.l2 ^= t
u.l2 ^= t
t = m & (v.l3 ^ u.l3)
v.l3 ^= t
u.l3 ^= t
t = m & (v.l4 ^ u.l4)
v.l4 ^= t
u.l4 ^= t
}
// IsNegative returns 1 if v is negative, and 0 otherwise.
func (v *Element) IsNegative() int {
return int(v.Bytes()[0] & 1)
}
// Absolute sets v to |u|, and returns v.
func (v *Element) Absolute(u *Element) *Element {
return v.Select(new(Element).Negate(u), u, u.IsNegative())
}
// Multiply sets v = x * y, and returns v.
func (v *Element) Multiply(x, y *Element) *Element {
feMul(v, x, y)
return v
}
// Square sets v = x * x, and returns v.
func (v *Element) Square(x *Element) *Element {
feSquare(v, x)
return v
}
// Mult32 sets v = x * y, and returns v.
func (v *Element) Mult32(x *Element, y uint32) *Element {
x0lo, x0hi := mul51(x.l0, y)
x1lo, x1hi := mul51(x.l1, y)
x2lo, x2hi := mul51(x.l2, y)
x3lo, x3hi := mul51(x.l3, y)
x4lo, x4hi := mul51(x.l4, y)
v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
v.l1 = x1lo + x0hi
v.l2 = x2lo + x1hi
v.l3 = x3lo + x2hi
v.l4 = x4lo + x3hi
// The hi portions are going to be only 32 bits, plus any previous excess,
// so we can skip the carry propagation.
return v
}
// mul51 returns lo + hi * 2⁵¹ = a * b.
func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
mh, ml := bits.Mul64(a, uint64(b))
lo = ml & maskLow51Bits
hi = (mh << 13) | (ml >> 51)
return
}
// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
func (v *Element) Pow22523(x *Element) *Element {
var t0, t1, t2 Element
t0.Square(x) // x^2
t1.Square(&t0) // x^4
t1.Square(&t1) // x^8
t1.Multiply(x, &t1) // x^9
t0.Multiply(&t0, &t1) // x^11
t0.Square(&t0) // x^22
t0.Multiply(&t1, &t0) // x^31
t1.Square(&t0) // x^62
for i := 1; i < 5; i++ { // x^992
t1.Square(&t1)
}
t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
t1.Square(&t0) // 2^11 - 2
for i := 1; i < 10; i++ { // 2^20 - 2^10
t1.Square(&t1)
}
t1.Multiply(&t1, &t0) // 2^20 - 1
t2.Square(&t1) // 2^21 - 2
for i := 1; i < 20; i++ { // 2^40 - 2^20
t2.Square(&t2)
}
t1.Multiply(&t2, &t1) // 2^40 - 1
t1.Square(&t1) // 2^41 - 2
for i := 1; i < 10; i++ { // 2^50 - 2^10
t1.Square(&t1)
}
t0.Multiply(&t1, &t0) // 2^50 - 1
t1.Square(&t0) // 2^51 - 2
for i := 1; i < 50; i++ { // 2^100 - 2^50
t1.Square(&t1)
}
t1.Multiply(&t1, &t0) // 2^100 - 1
t2.Square(&t1) // 2^101 - 2
for i := 1; i < 100; i++ { // 2^200 - 2^100
t2.Square(&t2)
}
t1.Multiply(&t2, &t1) // 2^200 - 1
t1.Square(&t1) // 2^201 - 2
for i := 1; i < 50; i++ { // 2^250 - 2^50
t1.Square(&t1)
}
t0.Multiply(&t1, &t0) // 2^250 - 1
t0.Square(&t0) // 2^251 - 2
t0.Square(&t0) // 2^252 - 4
return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
}
// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
var sqrtM1 = &Element{1718705420411056, 234908883556509,
2233514472574048, 2117202627021982, 765476049583133}
// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
//
// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
// and returns r and 0.
func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) {
var a, b Element
// r = (u * v3) * (u * v7)^((p-5)/8)
v2 := a.Square(v)
uv3 := b.Multiply(u, b.Multiply(v2, v))
uv7 := a.Multiply(uv3, a.Square(v2))
r.Multiply(uv3, r.Pow22523(uv7))
check := a.Multiply(v, a.Square(r)) // check = v * r^2
uNeg := b.Negate(u)
correctSignSqrt := check.Equal(u)
flippedSignSqrt := check.Equal(uNeg)
flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1))
rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r
// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI)
r.Absolute(r) // Choose the nonnegative square root.
return r, correctSignSqrt | flippedSignSqrt
}

View File

@ -1,15 +0,0 @@
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
//go:build amd64 && gc && !purego
package field
// feMul sets out = a * b. It works like feMulGeneric.
//
//go:noescape
func feMul(out *Element, a *Element, b *Element)
// feSquare sets out = a * a. It works like feSquareGeneric.
//
//go:noescape
func feSquare(out *Element, a *Element)

View File

@ -1,378 +0,0 @@
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
//go:build amd64 && gc && !purego
#include "textflag.h"
// func feMul(out *Element, a *Element, b *Element)
TEXT ·feMul(SB), NOSPLIT, $0-24
MOVQ a+8(FP), CX
MOVQ b+16(FP), BX
// r0 = a0×b0
MOVQ (CX), AX
MULQ (BX)
MOVQ AX, DI
MOVQ DX, SI
// r0 += 19×a1×b4
MOVQ 8(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(BX)
ADDQ AX, DI
ADCQ DX, SI
// r0 += 19×a2×b3
MOVQ 16(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 24(BX)
ADDQ AX, DI
ADCQ DX, SI
// r0 += 19×a3×b2
MOVQ 24(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 16(BX)
ADDQ AX, DI
ADCQ DX, SI
// r0 += 19×a4×b1
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 8(BX)
ADDQ AX, DI
ADCQ DX, SI
// r1 = a0×b1
MOVQ (CX), AX
MULQ 8(BX)
MOVQ AX, R9
MOVQ DX, R8
// r1 += a1×b0
MOVQ 8(CX), AX
MULQ (BX)
ADDQ AX, R9
ADCQ DX, R8
// r1 += 19×a2×b4
MOVQ 16(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(BX)
ADDQ AX, R9
ADCQ DX, R8
// r1 += 19×a3×b3
MOVQ 24(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 24(BX)
ADDQ AX, R9
ADCQ DX, R8
// r1 += 19×a4×b2
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 16(BX)
ADDQ AX, R9
ADCQ DX, R8
// r2 = a0×b2
MOVQ (CX), AX
MULQ 16(BX)
MOVQ AX, R11
MOVQ DX, R10
// r2 += a1×b1
MOVQ 8(CX), AX
MULQ 8(BX)
ADDQ AX, R11
ADCQ DX, R10
// r2 += a2×b0
MOVQ 16(CX), AX
MULQ (BX)
ADDQ AX, R11
ADCQ DX, R10
// r2 += 19×a3×b4
MOVQ 24(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(BX)
ADDQ AX, R11
ADCQ DX, R10
// r2 += 19×a4×b3
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 24(BX)
ADDQ AX, R11
ADCQ DX, R10
// r3 = a0×b3
MOVQ (CX), AX
MULQ 24(BX)
MOVQ AX, R13
MOVQ DX, R12
// r3 += a1×b2
MOVQ 8(CX), AX
MULQ 16(BX)
ADDQ AX, R13
ADCQ DX, R12
// r3 += a2×b1
MOVQ 16(CX), AX
MULQ 8(BX)
ADDQ AX, R13
ADCQ DX, R12
// r3 += a3×b0
MOVQ 24(CX), AX
MULQ (BX)
ADDQ AX, R13
ADCQ DX, R12
// r3 += 19×a4×b4
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(BX)
ADDQ AX, R13
ADCQ DX, R12
// r4 = a0×b4
MOVQ (CX), AX
MULQ 32(BX)
MOVQ AX, R15
MOVQ DX, R14
// r4 += a1×b3
MOVQ 8(CX), AX
MULQ 24(BX)
ADDQ AX, R15
ADCQ DX, R14
// r4 += a2×b2
MOVQ 16(CX), AX
MULQ 16(BX)
ADDQ AX, R15
ADCQ DX, R14
// r4 += a3×b1
MOVQ 24(CX), AX
MULQ 8(BX)
ADDQ AX, R15
ADCQ DX, R14
// r4 += a4×b0
MOVQ 32(CX), AX
MULQ (BX)
ADDQ AX, R15
ADCQ DX, R14
// First reduction chain
MOVQ $0x0007ffffffffffff, AX
SHLQ $0x0d, DI, SI
SHLQ $0x0d, R9, R8
SHLQ $0x0d, R11, R10
SHLQ $0x0d, R13, R12
SHLQ $0x0d, R15, R14
ANDQ AX, DI
IMUL3Q $0x13, R14, R14
ADDQ R14, DI
ANDQ AX, R9
ADDQ SI, R9
ANDQ AX, R11
ADDQ R8, R11
ANDQ AX, R13
ADDQ R10, R13
ANDQ AX, R15
ADDQ R12, R15
// Second reduction chain (carryPropagate)
MOVQ DI, SI
SHRQ $0x33, SI
MOVQ R9, R8
SHRQ $0x33, R8
MOVQ R11, R10
SHRQ $0x33, R10
MOVQ R13, R12
SHRQ $0x33, R12
MOVQ R15, R14
SHRQ $0x33, R14
ANDQ AX, DI
IMUL3Q $0x13, R14, R14
ADDQ R14, DI
ANDQ AX, R9
ADDQ SI, R9
ANDQ AX, R11
ADDQ R8, R11
ANDQ AX, R13
ADDQ R10, R13
ANDQ AX, R15
ADDQ R12, R15
// Store output
MOVQ out+0(FP), AX
MOVQ DI, (AX)
MOVQ R9, 8(AX)
MOVQ R11, 16(AX)
MOVQ R13, 24(AX)
MOVQ R15, 32(AX)
RET
// func feSquare(out *Element, a *Element)
TEXT ·feSquare(SB), NOSPLIT, $0-16
MOVQ a+8(FP), CX
// r0 = l0×l0
MOVQ (CX), AX
MULQ (CX)
MOVQ AX, SI
MOVQ DX, BX
// r0 += 38×l1×l4
MOVQ 8(CX), AX
IMUL3Q $0x26, AX, AX
MULQ 32(CX)
ADDQ AX, SI
ADCQ DX, BX
// r0 += 38×l2×l3
MOVQ 16(CX), AX
IMUL3Q $0x26, AX, AX
MULQ 24(CX)
ADDQ AX, SI
ADCQ DX, BX
// r1 = 2×l0×l1
MOVQ (CX), AX
SHLQ $0x01, AX
MULQ 8(CX)
MOVQ AX, R8
MOVQ DX, DI
// r1 += 38×l2×l4
MOVQ 16(CX), AX
IMUL3Q $0x26, AX, AX
MULQ 32(CX)
ADDQ AX, R8
ADCQ DX, DI
// r1 += 19×l3×l3
MOVQ 24(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 24(CX)
ADDQ AX, R8
ADCQ DX, DI
// r2 = 2×l0×l2
MOVQ (CX), AX
SHLQ $0x01, AX
MULQ 16(CX)
MOVQ AX, R10
MOVQ DX, R9
// r2 += l1×l1
MOVQ 8(CX), AX
MULQ 8(CX)
ADDQ AX, R10
ADCQ DX, R9
// r2 += 38×l3×l4
MOVQ 24(CX), AX
IMUL3Q $0x26, AX, AX
MULQ 32(CX)
ADDQ AX, R10
ADCQ DX, R9
// r3 = 2×l0×l3
MOVQ (CX), AX
SHLQ $0x01, AX
MULQ 24(CX)
MOVQ AX, R12
MOVQ DX, R11
// r3 += 2×l1×l2
MOVQ 8(CX), AX
IMUL3Q $0x02, AX, AX
MULQ 16(CX)
ADDQ AX, R12
ADCQ DX, R11
// r3 += 19×l4×l4
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(CX)
ADDQ AX, R12
ADCQ DX, R11
// r4 = 2×l0×l4
MOVQ (CX), AX
SHLQ $0x01, AX
MULQ 32(CX)
MOVQ AX, R14
MOVQ DX, R13
// r4 += 2×l1×l3
MOVQ 8(CX), AX
IMUL3Q $0x02, AX, AX
MULQ 24(CX)
ADDQ AX, R14
ADCQ DX, R13
// r4 += l2×l2
MOVQ 16(CX), AX
MULQ 16(CX)
ADDQ AX, R14
ADCQ DX, R13
// First reduction chain
MOVQ $0x0007ffffffffffff, AX
SHLQ $0x0d, SI, BX
SHLQ $0x0d, R8, DI
SHLQ $0x0d, R10, R9
SHLQ $0x0d, R12, R11
SHLQ $0x0d, R14, R13
ANDQ AX, SI
IMUL3Q $0x13, R13, R13
ADDQ R13, SI
ANDQ AX, R8
ADDQ BX, R8
ANDQ AX, R10
ADDQ DI, R10
ANDQ AX, R12
ADDQ R9, R12
ANDQ AX, R14
ADDQ R11, R14
// Second reduction chain (carryPropagate)
MOVQ SI, BX
SHRQ $0x33, BX
MOVQ R8, DI
SHRQ $0x33, DI
MOVQ R10, R9
SHRQ $0x33, R9
MOVQ R12, R11
SHRQ $0x33, R11
MOVQ R14, R13
SHRQ $0x33, R13
ANDQ AX, SI
IMUL3Q $0x13, R13, R13
ADDQ R13, SI
ANDQ AX, R8
ADDQ BX, R8
ANDQ AX, R10
ADDQ DI, R10
ANDQ AX, R12
ADDQ R9, R12
ANDQ AX, R14
ADDQ R11, R14
// Store output
MOVQ out+0(FP), AX
MOVQ SI, (AX)
MOVQ R8, 8(AX)
MOVQ R10, 16(AX)
MOVQ R12, 24(AX)
MOVQ R14, 32(AX)
RET

View File

@ -1,11 +0,0 @@
// Copyright (c) 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 || !gc || purego
package field
func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
func feSquare(v, x *Element) { feSquareGeneric(v, x) }

View File

@ -1,15 +0,0 @@
// Copyright (c) 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build arm64 && gc && !purego
package field
//go:noescape
func carryPropagate(v *Element)
func (v *Element) carryPropagate() *Element {
carryPropagate(v)
return v
}

View File

@ -1,42 +0,0 @@
// Copyright (c) 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build arm64 && gc && !purego
#include "textflag.h"
// carryPropagate works exactly like carryPropagateGeneric and uses the
// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
// avoids loading R0-R4 twice and uses LDP and STP.
//
// See https://golang.org/issues/43145 for the main compiler issue.
//
// func carryPropagate(v *Element)
TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
MOVD v+0(FP), R20
LDP 0(R20), (R0, R1)
LDP 16(R20), (R2, R3)
MOVD 32(R20), R4
AND $0x7ffffffffffff, R0, R10
AND $0x7ffffffffffff, R1, R11
AND $0x7ffffffffffff, R2, R12
AND $0x7ffffffffffff, R3, R13
AND $0x7ffffffffffff, R4, R14
ADD R0>>51, R11, R11
ADD R1>>51, R12, R12
ADD R2>>51, R13, R13
ADD R3>>51, R14, R14
// R4>>51 * 19 + R10 -> R10
LSR $51, R4, R21
MOVD $19, R22
MADD R22, R10, R21, R10
STP (R10, R11), 0(R20)
STP (R12, R13), 16(R20)
MOVD R14, 32(R20)
RET

View File

@ -1,11 +0,0 @@
// Copyright (c) 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !arm64 || !gc || purego
package field
func (v *Element) carryPropagate() *Element {
return v.carryPropagateGeneric()
}

View File

@ -1,264 +0,0 @@
// Copyright (c) 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package field
import "math/bits"
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
// bits.Mul64 and bits.Add64 intrinsics.
type uint128 struct {
lo, hi uint64
}
// mul64 returns a * b.
func mul64(a, b uint64) uint128 {
hi, lo := bits.Mul64(a, b)
return uint128{lo, hi}
}
// addMul64 returns v + a * b.
func addMul64(v uint128, a, b uint64) uint128 {
hi, lo := bits.Mul64(a, b)
lo, c := bits.Add64(lo, v.lo, 0)
hi, _ = bits.Add64(hi, v.hi, c)
return uint128{lo, hi}
}
// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
func shiftRightBy51(a uint128) uint64 {
return (a.hi << (64 - 51)) | (a.lo >> 51)
}
func feMulGeneric(v, a, b *Element) {
a0 := a.l0
a1 := a.l1
a2 := a.l2
a3 := a.l3
a4 := a.l4
b0 := b.l0
b1 := b.l1
b2 := b.l2
b3 := b.l3
b4 := b.l4
// Limb multiplication works like pen-and-paper columnar multiplication, but
// with 51-bit limbs instead of digits.
//
// a4 a3 a2 a1 a0 x
// b4 b3 b2 b1 b0 =
// ------------------------
// a4b0 a3b0 a2b0 a1b0 a0b0 +
// a4b1 a3b1 a2b1 a1b1 a0b1 +
// a4b2 a3b2 a2b2 a1b2 a0b2 +
// a4b3 a3b3 a2b3 a1b3 a0b3 +
// a4b4 a3b4 a2b4 a1b4 a0b4 =
// ----------------------------------------------
// r8 r7 r6 r5 r4 r3 r2 r1 r0
//
// We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
// reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
// r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
//
// Reduction can be carried out simultaneously to multiplication. For
// example, we do not compute r5: whenever the result of a multiplication
// belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
//
// a4b0 a3b0 a2b0 a1b0 a0b0 +
// a3b1 a2b1 a1b1 a0b1 19×a4b1 +
// a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
// a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
// a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
// --------------------------------------
// r4 r3 r2 r1 r0
//
// Finally we add up the columns into wide, overlapping limbs.
a1_19 := a1 * 19
a2_19 := a2 * 19
a3_19 := a3 * 19
a4_19 := a4 * 19
// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
r0 := mul64(a0, b0)
r0 = addMul64(r0, a1_19, b4)
r0 = addMul64(r0, a2_19, b3)
r0 = addMul64(r0, a3_19, b2)
r0 = addMul64(r0, a4_19, b1)
// r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
r1 := mul64(a0, b1)
r1 = addMul64(r1, a1, b0)
r1 = addMul64(r1, a2_19, b4)
r1 = addMul64(r1, a3_19, b3)
r1 = addMul64(r1, a4_19, b2)
// r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
r2 := mul64(a0, b2)
r2 = addMul64(r2, a1, b1)
r2 = addMul64(r2, a2, b0)
r2 = addMul64(r2, a3_19, b4)
r2 = addMul64(r2, a4_19, b3)
// r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
r3 := mul64(a0, b3)
r3 = addMul64(r3, a1, b2)
r3 = addMul64(r3, a2, b1)
r3 = addMul64(r3, a3, b0)
r3 = addMul64(r3, a4_19, b4)
// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
r4 := mul64(a0, b4)
r4 = addMul64(r4, a1, b3)
r4 = addMul64(r4, a2, b2)
r4 = addMul64(r4, a3, b1)
r4 = addMul64(r4, a4, b0)
// After the multiplication, we need to reduce (carry) the five coefficients
// to obtain a result with limbs that are at most slightly larger than 2⁵¹,
// to respect the Element invariant.
//
// Overall, the reduction works the same as carryPropagate, except with
// wider inputs: we take the carry for each coefficient by shifting it right
// by 51, and add it to the limb above it. The top carry is multiplied by 19
// according to the reduction identity and added to the lowest limb.
//
// The largest coefficient (r0) will be at most 111 bits, which guarantees
// that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
//
// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
// r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
// r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
// r0 < 2⁷ × 2⁵² × 2⁵²
// r0 < 2¹¹¹
//
// Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
// 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
// allows us to easily apply the reduction identity.
//
// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
// r4 < 5 × 2⁵² × 2⁵²
// r4 < 2¹⁰⁷
//
c0 := shiftRightBy51(r0)
c1 := shiftRightBy51(r1)
c2 := shiftRightBy51(r2)
c3 := shiftRightBy51(r3)
c4 := shiftRightBy51(r4)
rr0 := r0.lo&maskLow51Bits + c4*19
rr1 := r1.lo&maskLow51Bits + c0
rr2 := r2.lo&maskLow51Bits + c1
rr3 := r3.lo&maskLow51Bits + c2
rr4 := r4.lo&maskLow51Bits + c3
// Now all coefficients fit into 64-bit registers but are still too large to
// be passed around as a Element. We therefore do one last carry chain,
// where the carries will be small enough to fit in the wiggle room above 2⁵¹.
*v = Element{rr0, rr1, rr2, rr3, rr4}
v.carryPropagate()
}
func feSquareGeneric(v, a *Element) {
l0 := a.l0
l1 := a.l1
l2 := a.l2
l3 := a.l3
l4 := a.l4
// Squaring works precisely like multiplication above, but thanks to its
// symmetry we get to group a few terms together.
//
// l4 l3 l2 l1 l0 x
// l4 l3 l2 l1 l0 =
// ------------------------
// l4l0 l3l0 l2l0 l1l0 l0l0 +
// l4l1 l3l1 l2l1 l1l1 l0l1 +
// l4l2 l3l2 l2l2 l1l2 l0l2 +
// l4l3 l3l3 l2l3 l1l3 l0l3 +
// l4l4 l3l4 l2l4 l1l4 l0l4 =
// ----------------------------------------------
// r8 r7 r6 r5 r4 r3 r2 r1 r0
//
// l4l0 l3l0 l2l0 l1l0 l0l0 +
// l3l1 l2l1 l1l1 l0l1 19×l4l1 +
// l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
// l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
// l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
// --------------------------------------
// r4 r3 r2 r1 r0
//
// With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
// only three Mul64 and four Add64, instead of five and eight.
l0_2 := l0 * 2
l1_2 := l1 * 2
l1_38 := l1 * 38
l2_38 := l2 * 38
l3_38 := l3 * 38
l3_19 := l3 * 19
l4_19 := l4 * 19
// r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
r0 := mul64(l0, l0)
r0 = addMul64(r0, l1_38, l4)
r0 = addMul64(r0, l2_38, l3)
// r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
r1 := mul64(l0_2, l1)
r1 = addMul64(r1, l2_38, l4)
r1 = addMul64(r1, l3_19, l3)
// r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
r2 := mul64(l0_2, l2)
r2 = addMul64(r2, l1, l1)
r2 = addMul64(r2, l3_38, l4)
// r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
r3 := mul64(l0_2, l3)
r3 = addMul64(r3, l1_2, l2)
r3 = addMul64(r3, l4_19, l4)
// r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
r4 := mul64(l0_2, l4)
r4 = addMul64(r4, l1_2, l3)
r4 = addMul64(r4, l2, l2)
c0 := shiftRightBy51(r0)
c1 := shiftRightBy51(r1)
c2 := shiftRightBy51(r2)
c3 := shiftRightBy51(r3)
c4 := shiftRightBy51(r4)
rr0 := r0.lo&maskLow51Bits + c4*19
rr1 := r1.lo&maskLow51Bits + c0
rr2 := r2.lo&maskLow51Bits + c1
rr3 := r3.lo&maskLow51Bits + c2
rr4 := r4.lo&maskLow51Bits + c3
*v = Element{rr0, rr1, rr2, rr3, rr4}
v.carryPropagate()
}
// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline
func (v *Element) carryPropagateGeneric() *Element {
c0 := v.l0 >> 51
c1 := v.l1 >> 51
c2 := v.l2 >> 51
c3 := v.l3 >> 51
c4 := v.l4 >> 51
v.l0 = v.l0&maskLow51Bits + c4*19
v.l1 = v.l1&maskLow51Bits + c0
v.l2 = v.l2&maskLow51Bits + c1
v.l3 = v.l3&maskLow51Bits + c2
v.l4 = v.l4&maskLow51Bits + c3
return v
}

View File

@ -1 +0,0 @@
b0c49ae9f59d233526f8934262c5bbbe14d4358d

View File

@ -1,19 +0,0 @@
#! /bin/bash
set -euo pipefail
cd "$(git rev-parse --show-toplevel)"
STD_PATH=src/crypto/ed25519/internal/edwards25519/field
LOCAL_PATH=curve25519/internal/field
LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint)
git fetch https://go.googlesource.com/go master
if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then
echo "No changes."
else
NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint)
echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..."
git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \
git apply -3 --directory=$LOCAL_PATH
fi

View File

@ -8,7 +8,7 @@
// HKDF is a cryptographic key derivation function (KDF) with the goal of
// expanding limited input keying material into one or more cryptographically
// strong secret keys.
package hkdf // import "golang.org/x/crypto/hkdf"
package hkdf
import (
"crypto/hmac"

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego
package poly1305

View File

@ -1,108 +1,93 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT.
//go:build gc && !purego
#include "textflag.h"
#define POLY1305_ADD(msg, h0, h1, h2) \
ADDQ 0(msg), h0; \
ADCQ 8(msg), h1; \
ADCQ $1, h2; \
LEAQ 16(msg), msg
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
MOVQ r0, AX; \
MULQ h0; \
MOVQ AX, t0; \
MOVQ DX, t1; \
MOVQ r0, AX; \
MULQ h1; \
ADDQ AX, t1; \
ADCQ $0, DX; \
MOVQ r0, t2; \
IMULQ h2, t2; \
ADDQ DX, t2; \
\
MOVQ r1, AX; \
MULQ h0; \
ADDQ AX, t1; \
ADCQ $0, DX; \
MOVQ DX, h0; \
MOVQ r1, t3; \
IMULQ h2, t3; \
MOVQ r1, AX; \
MULQ h1; \
ADDQ AX, t2; \
ADCQ DX, t3; \
ADDQ h0, t2; \
ADCQ $0, t3; \
\
MOVQ t0, h0; \
MOVQ t1, h1; \
MOVQ t2, h2; \
ANDQ $3, h2; \
MOVQ t2, t0; \
ANDQ $0xFFFFFFFFFFFFFFFC, t0; \
ADDQ t0, h0; \
ADCQ t3, h1; \
ADCQ $0, h2; \
SHRQ $2, t3, t2; \
SHRQ $2, t3; \
ADDQ t2, h0; \
ADCQ t3, h1; \
ADCQ $0, h2
// func update(state *[7]uint64, msg []byte)
// func update(state *macState, msg []byte)
TEXT ·update(SB), $0-32
MOVQ state+0(FP), DI
MOVQ msg_base+8(FP), SI
MOVQ msg_len+16(FP), R15
MOVQ 0(DI), R8 // h0
MOVQ 8(DI), R9 // h1
MOVQ 16(DI), R10 // h2
MOVQ 24(DI), R11 // r0
MOVQ 32(DI), R12 // r1
CMPQ R15, $16
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
CMPQ R15, $0x10
JB bytes_between_0_and_15
loop:
POLY1305_ADD(SI, R8, R9, R10)
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ $0x01, R10
LEAQ 16(SI), SI
multiply:
POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
SUBQ $16, R15
CMPQ R15, $16
JAE loop
MOVQ R11, AX
MULQ R8
MOVQ AX, BX
MOVQ DX, CX
MOVQ R11, AX
MULQ R9
ADDQ AX, CX
ADCQ $0x00, DX
MOVQ R11, R13
IMULQ R10, R13
ADDQ DX, R13
MOVQ R12, AX
MULQ R8
ADDQ AX, CX
ADCQ $0x00, DX
MOVQ DX, R8
MOVQ R12, R14
IMULQ R10, R14
MOVQ R12, AX
MULQ R9
ADDQ AX, R13
ADCQ DX, R14
ADDQ R8, R13
ADCQ $0x00, R14
MOVQ BX, R8
MOVQ CX, R9
MOVQ R13, R10
ANDQ $0x03, R10
MOVQ R13, BX
ANDQ $-4, BX
ADDQ BX, R8
ADCQ R14, R9
ADCQ $0x00, R10
SHRQ $0x02, R14, R13
SHRQ $0x02, R14
ADDQ R13, R8
ADCQ R14, R9
ADCQ $0x00, R10
SUBQ $0x10, R15
CMPQ R15, $0x10
JAE loop
bytes_between_0_and_15:
TESTQ R15, R15
JZ done
MOVQ $1, BX
MOVQ $0x00000001, BX
XORQ CX, CX
XORQ R13, R13
ADDQ R15, SI
flush_buffer:
SHLQ $8, BX, CX
SHLQ $8, BX
SHLQ $0x08, BX, CX
SHLQ $0x08, BX
MOVB -1(SI), R13
XORQ R13, BX
DECQ SI
DECQ R15
JNZ flush_buffer
ADDQ BX, R8
ADCQ CX, R9
ADCQ $0, R10
MOVQ $16, R15
ADCQ $0x00, R10
MOVQ $0x00000010, R15
JMP multiply
done:
MOVQ R8, 0(DI)
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
RET

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
package poly1305

View File

@ -2,15 +2,25 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
//go:build gc && !purego && (ppc64 || ppc64le)
#include "textflag.h"
// This was ported from the amd64 implementation.
#ifdef GOARCH_ppc64le
#define LE_MOVD MOVD
#define LE_MOVWZ MOVWZ
#define LE_MOVHZ MOVHZ
#else
#define LE_MOVD MOVDBR
#define LE_MOVWZ MOVWBR
#define LE_MOVHZ MOVHBR
#endif
#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \
MOVD (msg), t0; \
MOVD 8(msg), t1; \
LE_MOVD (msg)( R0), t0; \
LE_MOVD (msg)(R24), t1; \
MOVD $1, t2; \
ADDC t0, h0, h0; \
ADDE t1, h1, h1; \
@ -50,10 +60,6 @@
ADDE t3, h1, h1; \
ADDZE h2
DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
GLOBL ·poly1305Mask<>(SB), RODATA, $16
// func update(state *[7]uint64, msg []byte)
TEXT ·update(SB), $0-32
MOVD state+0(FP), R3
@ -66,6 +72,8 @@ TEXT ·update(SB), $0-32
MOVD 24(R3), R11 // r0
MOVD 32(R3), R12 // r1
MOVD $8, R24
CMP R5, $16
BLT bytes_between_0_and_15
@ -94,7 +102,7 @@ flush_buffer:
// Greater than 8 -- load the rightmost remaining bytes in msg
// and put into R17 (h1)
MOVD (R4)(R21), R17
LE_MOVD (R4)(R21), R17
MOVD $16, R22
// Find the offset to those bytes
@ -118,7 +126,7 @@ just1:
BLT less8
// Exactly 8
MOVD (R4), R16
LE_MOVD (R4), R16
CMP R17, $0
@ -133,7 +141,7 @@ less8:
MOVD $0, R22 // shift count
CMP R5, $4
BLT less4
MOVWZ (R4), R16
LE_MOVWZ (R4), R16
ADD $4, R4
ADD $-4, R5
MOVD $32, R22
@ -141,7 +149,7 @@ less8:
less4:
CMP R5, $2
BLT less2
MOVHZ (R4), R21
LE_MOVHZ (R4), R21
SLD R22, R21, R21
OR R16, R21, R16
ADD $16, R22

View File

@ -35,7 +35,7 @@ Anonymous sealing/opening is an extension of NaCl defined by and interoperable
with libsodium:
https://libsodium.gitbook.io/doc/public-key_cryptography/sealed_boxes.
*/
package box // import "golang.org/x/crypto/nacl/box"
package box
import (
cryptorand "crypto/rand"

View File

@ -32,7 +32,7 @@ chunk size.
This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
*/
package secretbox // import "golang.org/x/crypto/nacl/secretbox"
package secretbox
import (
"golang.org/x/crypto/internal/alias"

View File

@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
choose, you can pass the `New` functions from the different SHA packages to
pbkdf2.Key.
*/
package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
package pbkdf2
import (
"crypto/hmac"

View File

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package salsa provides low-level access to functions in the Salsa family.
package salsa // import "golang.org/x/crypto/salsa20/salsa"
package salsa
import "math/bits"

File diff suppressed because it is too large Load Diff

View File

@ -71,6 +71,10 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
for auth := AuthMethod(new(noneAuth)); auth != nil; {
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions)
if err != nil {
// On disconnect, return error immediately
if _, ok := err.(*disconnectMsg); ok {
return err
}
// We return the error later if there is no other method left to
// try.
ok = authFailure
@ -551,6 +555,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
}
gotMsgExtInfo := false
gotUserAuthInfoRequest := false
for {
packet, err := c.readPacket()
if err != nil {
@ -581,6 +586,9 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
if msg.PartialSuccess {
return authPartialSuccess, msg.Methods, nil
}
if !gotUserAuthInfoRequest {
return authFailure, msg.Methods, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
}
return authFailure, msg.Methods, nil
case msgUserAuthSuccess:
return authSuccess, nil, nil
@ -592,6 +600,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
if err := Unmarshal(packet, &msg); err != nil {
return authFailure, nil, err
}
gotUserAuthInfoRequest = true
// Manually unpack the prompt/echo pairs.
rest := msg.Prompts

View File

@ -20,4 +20,4 @@ References:
This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise.
*/
package ssh // import "golang.org/x/crypto/ssh"
package ssh

View File

@ -488,7 +488,49 @@ func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob)
// Signatures in PKCS1v15 must match the key's modulus in
// length. However with SSH, some signers provide RSA
// signatures which are missing the MSB 0's of the bignum
// represented. With ssh-rsa signatures, this is encouraged by
// the spec (even though e.g. OpenSSH will give the full
// length unconditionally). With rsa-sha2-* signatures, the
// verifier is allowed to support these, even though they are
// out of spec. See RFC 4253 Section 6.6 for ssh-rsa and RFC
// 8332 Section 3 for rsa-sha2-* details.
//
// In practice:
// * OpenSSH always allows "short" signatures:
// https://github.com/openssh/openssh-portable/blob/V_9_8_P1/ssh-rsa.c#L526
// but always generates padded signatures:
// https://github.com/openssh/openssh-portable/blob/V_9_8_P1/ssh-rsa.c#L439
//
// * PuTTY versions 0.81 and earlier will generate short
// signatures for all RSA signature variants. Note that
// PuTTY is embedded in other software, such as WinSCP and
// FileZilla. At the time of writing, a patch has been
// applied to PuTTY to generate padded signatures for
// rsa-sha2-*, but not yet released:
// https://git.tartarus.org/?p=simon/putty.git;a=commitdiff;h=a5bcf3d384e1bf15a51a6923c3724cbbee022d8e
//
// * SSH.NET versions 2024.0.0 and earlier will generate short
// signatures for all RSA signature variants, fixed in 2024.1.0:
// https://github.com/sshnet/SSH.NET/releases/tag/2024.1.0
//
// As a result, we pad these up to the key size by inserting
// leading 0's.
//
// Note that support for short signatures with rsa-sha2-* may
// be removed in the future due to such signatures not being
// allowed by the spec.
blob := sig.Blob
keySize := (*rsa.PublicKey)(r).Size()
if len(blob) < keySize {
padded := make([]byte, keySize)
copy(padded[keySize-len(blob):], blob)
blob = padded
}
return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, blob)
}
func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {

View File

@ -149,7 +149,7 @@ func (s *ServerConfig) AddHostKey(key Signer) {
}
// cachedPubKey contains the results of querying whether a public key is
// acceptable for a user.
// acceptable for a user. This is a FIFO cache.
type cachedPubKey struct {
user string
pubKeyData []byte
@ -157,7 +157,13 @@ type cachedPubKey struct {
perms *Permissions
}
const maxCachedPubKeys = 16
// maxCachedPubKeys is the number of cache entries we store.
//
// Due to consistent misuse of the PublicKeyCallback API, we have reduced this
// to 1, such that the only key in the cache is the most recently seen one. This
// forces the behavior that the last call to PublicKeyCallback will always be
// with the key that is used for authentication.
const maxCachedPubKeys = 1
// pubKeyCache caches tests for public keys. Since SSH clients
// will query whether a public key is acceptable before attempting to
@ -179,9 +185,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
// add adds the given tuple to the cache.
func (c *pubKeyCache) add(candidate cachedPubKey) {
if len(c.keys) < maxCachedPubKeys {
c.keys = append(c.keys, candidate)
if len(c.keys) >= maxCachedPubKeys {
c.keys = c.keys[1:]
}
c.keys = append(c.keys, candidate)
}
// ServerConn is an authenticated SSH connection, as seen from the
@ -510,8 +517,8 @@ userAuthLoop:
if err := s.transport.writePacket(Marshal(discMsg)); err != nil {
return nil, err
}
return nil, discMsg
authErrs = append(authErrs, discMsg)
return nil, &ServerAuthError{Errors: authErrs}
}
var userAuthReq userAuthRequestMsg

4
vendor/golang.org/x/sync/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

4
vendor/golang.org/x/sys/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

17
vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin && amd64 && gc
#include "textflag.h"
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctlbyname(SB)
GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB)

21
vendor/golang.org/x/sys/cpu/cpu.go generated vendored
View File

@ -105,6 +105,8 @@ var ARM64 struct {
HasSVE bool // Scalable Vector Extensions
HasSVE2 bool // Scalable Vector Extensions 2
HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
HasDIT bool // Data Independent Timing support
HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions
_ CacheLinePad
}
@ -199,6 +201,25 @@ var S390X struct {
_ CacheLinePad
}
// RISCV64 contains the supported CPU features and performance characteristics for riscv64
// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate
// the presence of RISC-V extensions.
//
// It is safe to assume that all the RV64G extensions are supported and so they are omitted from
// this structure. As riscv64 Go programs require at least RV64G, the code that populates
// this structure cannot run successfully if some of the RV64G extensions are missing.
// The struct is padded to avoid false sharing.
var RISCV64 struct {
_ CacheLinePad
HasFastMisaligned bool // Fast misaligned accesses
HasC bool // Compressed instruction-set extension
HasV bool // Vector extension compatible with RVV 1.0
HasZba bool // Address generation instructions extension
HasZbb bool // Basic bit-manipulation extension
HasZbs bool // Single-bit instructions extension
_ CacheLinePad
}
func init() {
archInit()
initOptions()

View File

@ -38,6 +38,8 @@ func initOptions() {
{Name: "dcpop", Feature: &ARM64.HasDCPOP},
{Name: "asimddp", Feature: &ARM64.HasASIMDDP},
{Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
{Name: "dit", Feature: &ARM64.HasDIT},
{Name: "i8mm", Feature: &ARM64.HasI8MM},
}
}
@ -145,6 +147,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
ARM64.HasLRCPC = true
}
switch extractBits(isar1, 52, 55) {
case 1:
ARM64.HasI8MM = true
}
// ID_AA64PFR0_EL1
switch extractBits(pfr0, 16, 19) {
case 0:
@ -168,6 +175,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
parseARM64SVERegister(getzfr0())
}
switch extractBits(pfr0, 48, 51) {
case 1:
ARM64.HasDIT = true
}
}
func parseARM64SVERegister(zfr0 uint64) {

61
vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin && amd64 && gc
package cpu
// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl
// call (see issue 43089). It also restricts AVX512 support for Darwin to
// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233).
//
// Background:
// Darwin implements a special mechanism to economize on thread state when
// AVX512 specific registers are not in use. This scheme minimizes state when
// preempting threads that haven't yet used any AVX512 instructions, but adds
// special requirements to check for AVX512 hardware support at runtime (e.g.
// via sysctl call or commpage inspection). See issue 43089 and link below for
// full background:
// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240
//
// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0
// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption
// of the AVX512 mask registers (K0-K7) upon signal return. For this reason
// AVX512 is considered unsafe to use on Darwin for kernel versions prior to
// 21.3.0, where a fix has been confirmed. See issue 49233 for full background.
func darwinSupportsAVX512() bool {
return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0)
}
// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies
func darwinKernelVersionCheck(major, minor, patch int) bool {
var release [256]byte
err := darwinOSRelease(&release)
if err != nil {
return false
}
var mmp [3]int
c := 0
Loop:
for _, b := range release[:] {
switch {
case b >= '0' && b <= '9':
mmp[c] = 10*mmp[c] + int(b-'0')
case b == '.':
c++
if c > 2 {
return false
}
case b == 0:
break Loop
default:
return false
}
}
if c != 2 {
return false
}
return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch)
}

View File

@ -6,10 +6,10 @@
package cpu
// cpuid is implemented in cpu_x86.s for gc compiler
// cpuid is implemented in cpu_gc_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func xgetbv() (eax, edx uint32)

View File

@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24
RET
// func xgetbv() (eax, edx uint32)
TEXT ·xgetbv(SB),NOSPLIT,$0-8
TEXT ·xgetbv(SB), NOSPLIT, $0-8
MOVL $0, CX
XGETBV
MOVL AX, eax+0(FP)

View File

@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) {
gccgoXgetbv(&a, &d)
return a, d
}
// gccgo doesn't build on Darwin, per:
// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76
func darwinSupportsAVX512() bool {
return false
}

View File

@ -35,8 +35,10 @@ const (
hwcap_SHA512 = 1 << 21
hwcap_SVE = 1 << 22
hwcap_ASIMDFHM = 1 << 23
hwcap_DIT = 1 << 24
hwcap2_SVE2 = 1 << 1
hwcap2_I8MM = 1 << 13
)
// linuxKernelCanEmulateCPUID reports whether we're running
@ -106,9 +108,11 @@ func doinit() {
ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512)
ARM64.HasSVE = isSet(hwCap, hwcap_SVE)
ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM)
ARM64.HasDIT = isSet(hwCap, hwcap_DIT)
// HWCAP2 feature bits
ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2)
ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM)
}
func isSet(hwc uint, value uint) bool {

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x
//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64
package cpu

137
vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go generated vendored Normal file
View File

@ -0,0 +1,137 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"syscall"
"unsafe"
)
// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe
// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available.
//
// A note on detection of the Vector extension using HWCAP.
//
// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5.
// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe
// syscall is not available then neither is the Vector extension (which needs kernel support).
// The riscv_hwprobe syscall should then be all we need to detect the Vector extension.
// However, some RISC-V board manufacturers ship boards with an older kernel on top of which
// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe
// patches. These kernels advertise support for the Vector extension using HWCAP. Falling
// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not
// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option.
//
// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by
// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board
// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified
// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use
// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector
// extension are binary incompatible. HWCAP can then not be used in isolation to populate the
// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0.
//
// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector
// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype
// register. This check would allow us to safely detect version 1.0 of the Vector extension
// with HWCAP, if riscv_hwprobe were not available. However, the check cannot
// be added until the assembler supports the Vector instructions.
//
// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the
// extensions it advertises support for are explicitly versioned. It's also worth noting that
// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba.
// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority
// of RISC-V extensions.
//
// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information.
// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must
// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall
// here.
const (
// Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4
riscv_HWPROBE_IMA_C = 0x2
riscv_HWPROBE_IMA_V = 0x4
riscv_HWPROBE_EXT_ZBA = 0x8
riscv_HWPROBE_EXT_ZBB = 0x10
riscv_HWPROBE_EXT_ZBS = 0x20
riscv_HWPROBE_KEY_CPUPERF_0 = 0x5
riscv_HWPROBE_MISALIGNED_FAST = 0x3
riscv_HWPROBE_MISALIGNED_MASK = 0x7
)
const (
// sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go.
sys_RISCV_HWPROBE = 258
)
// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
type riscvHWProbePairs struct {
key int64
value uint64
}
const (
// CPU features
hwcap_RISCV_ISA_C = 1 << ('C' - 'A')
)
func doinit() {
// A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key
// field should be initialised with one of the key constants defined above, e.g.,
// RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value.
// If the kernel does not recognise a key it will set the key field to -1 and the value field to 0.
pairs := []riscvHWProbePairs{
{riscv_HWPROBE_KEY_IMA_EXT_0, 0},
{riscv_HWPROBE_KEY_CPUPERF_0, 0},
}
// This call only indicates that extensions are supported if they are implemented on all cores.
if riscvHWProbe(pairs, 0) {
if pairs[0].key != -1 {
v := uint(pairs[0].value)
RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C)
RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V)
RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA)
RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB)
RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS)
}
if pairs[1].key != -1 {
v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK
RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST
}
}
// Let's double check with HWCAP if the C extension does not appear to be supported.
// This may happen if we're running on a kernel older than 6.4.
if !RISCV64.HasC {
RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C)
}
}
func isSet(hwc uint, value uint) bool {
return hwc&value != 0
}
// riscvHWProbe is a simplified version of the generated wrapper function found in
// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the
// cpuCount and cpus parameters which we do not need. We always want to pass 0 for
// these parameters here so the kernel only reports the extensions that are present
// on all cores.
func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool {
var _zero uintptr
var p0 unsafe.Pointer
if len(pairs) > 0 {
p0 = unsafe.Pointer(&pairs[0])
} else {
p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0)
return e1 == 0
}

11
vendor/golang.org/x/sys/cpu/cpu_other_x86.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc))
package cpu
func darwinSupportsAVX512() bool {
panic("only implemented for gc && amd64 && darwin")
}

View File

@ -8,4 +8,13 @@ package cpu
const cacheLineSize = 64
func initOptions() {}
func initOptions() {
options = []option{
{Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned},
{Name: "c", Feature: &RISCV64.HasC},
{Name: "v", Feature: &RISCV64.HasV},
{Name: "zba", Feature: &RISCV64.HasZba},
{Name: "zbb", Feature: &RISCV64.HasZbb},
{Name: "zbs", Feature: &RISCV64.HasZbs},
}
}

View File

@ -92,10 +92,8 @@ func archInit() {
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
if runtime.GOOS == "darwin" {
// Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers.
// Since users can't rely on mask register contents, let's not advertise AVX-512 support.
// See issue 49233.
osSupportsAVX512 = false
// Darwin requires special AVX512 checks, see cpu_darwin_x86.go
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
} else {
// Check if OPMASK and ZMM registers have OS support.
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)

98
vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Minimal copy of x/sys/unix so the cpu package can make a
// system call on Darwin without depending on x/sys/unix.
//go:build darwin && amd64 && gc
package cpu
import (
"syscall"
"unsafe"
)
type _C_int int32
// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419
func darwinOSRelease(release *[256]byte) error {
// from x/sys/unix/zerrors_openbsd_amd64.go
const (
CTL_KERN = 0x1
KERN_OSRELEASE = 0x2
)
mib := []_C_int{CTL_KERN, KERN_OSRELEASE}
n := unsafe.Sizeof(*release)
return sysctl(mib, &release[0], &n, nil, 0)
}
type Errno = syscall.Errno
var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes.
// from x/sys/unix/zsyscall_darwin_amd64.go L791-807
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
if _, _, err := syscall_syscall6(
libc_sysctl_trampoline_addr,
uintptr(_p0),
uintptr(len(mib)),
uintptr(unsafe.Pointer(old)),
uintptr(unsafe.Pointer(oldlen)),
uintptr(unsafe.Pointer(new)),
uintptr(newlen),
); err != 0 {
return err
}
return nil
}
var libc_sysctl_trampoline_addr uintptr
// adapted from internal/cpu/cpu_arm64_darwin.go
func darwinSysctlEnabled(name []byte) bool {
out := int32(0)
nout := unsafe.Sizeof(out)
if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil {
return false
}
return out > 0
}
//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
var libc_sysctlbyname_trampoline_addr uintptr
// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix
func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {
if _, _, err := syscall_syscall6(
libc_sysctlbyname_trampoline_addr,
uintptr(unsafe.Pointer(name)),
uintptr(unsafe.Pointer(old)),
uintptr(unsafe.Pointer(oldlen)),
uintptr(unsafe.Pointer(new)),
uintptr(newlen),
0,
); err != 0 {
return err
}
return nil
}
//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib"
// Implemented in the runtime package (runtime/sys_darwin.go)
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
//go:linkname syscall_syscall6 syscall.syscall6

View File

@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these
into a common file for each OS.
The merge is performed in the following steps:
1. Construct the set of common code that is idential in all architecture-specific files.
1. Construct the set of common code that is identical in all architecture-specific files.
2. Write this common code to the merged file.
3. Remove the common code from all architecture-specific files.

View File

@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
return &value, err
}
// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC
// association for the network device specified by ifname.
func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) {
ifr, err := NewIfreq(ifname)
if err != nil {
return nil, err
}
value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO}
ifrd := ifr.withData(unsafe.Pointer(&value))
err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
return &value, err
}
// IoctlGetHwTstamp retrieves the hardware timestamping configuration
// for the network device specified by ifname.
func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) {
ifr, err := NewIfreq(ifname)
if err != nil {
return nil, err
}
value := HwTstampConfig{}
ifrd := ifr.withData(unsafe.Pointer(&value))
err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd)
return &value, err
}
// IoctlSetHwTstamp updates the hardware timestamping configuration for
// the network device specified by ifname.
func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error {
ifr, err := NewIfreq(ifname)
if err != nil {
return err
}
ifrd := ifr.withData(unsafe.Pointer(cfg))
return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd)
}
// FdToClockID derives the clock ID from the file descriptor number
// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is
// suitable for system calls like ClockGettime.
func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) }
// IoctlPtpClockGetcaps returns the description of a given PTP device.
func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) {
var value PtpClockCaps
err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpSysOffsetPrecise returns a description of the clock
// offset compared to the system clock.
func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) {
var value PtpSysOffsetPrecise
err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpSysOffsetExtended returns an extended description of the
// clock offset compared to the system clock. The samples parameter
// specifies the desired number of measurements.
func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) {
value := PtpSysOffsetExtended{Samples: uint32(samples)}
err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpPinGetfunc returns the configuration of the specified
// I/O pin on given PTP device.
func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) {
value := PtpPinDesc{Index: uint32(index)}
err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value))
return &value, err
}
// IoctlPtpPinSetfunc updates configuration of the specified PTP
// I/O pin.
func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error {
return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd))
}
// IoctlPtpPeroutRequest configures the periodic output mode of the
// PTP I/O pins.
func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error {
return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r))
}
// IoctlPtpExttsRequest configures the external timestamping mode
// of the PTP I/O pins.
func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error {
return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r))
}
// IoctlGetWatchdogInfo fetches information about a watchdog device from the
// Linux watchdog API. For more information, see:
// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.

View File

@ -58,6 +58,7 @@ includes_Darwin='
#define _DARWIN_USE_64_BIT_INODE
#define __APPLE_USE_RFC_3542
#include <stdint.h>
#include <sys/stdio.h>
#include <sys/attr.h>
#include <sys/clonefile.h>
#include <sys/kern_control.h>
@ -157,6 +158,16 @@ includes_Linux='
#endif
#define _GNU_SOURCE
// See the description in unix/linux/types.go
#if defined(__ARM_EABI__) || \
(defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \
(defined(__powerpc__) && (!defined(__powerpc64__)))
# ifdef _TIME_BITS
# undef _TIME_BITS
# endif
# define _TIME_BITS 32
#endif
// <sys/ioctl.h> is broken on powerpc64, as it fails to include definitions of
// these structures. We just include them copied from <bits/termios.h>.
#if defined(__powerpc__)
@ -255,6 +266,7 @@ struct ltchars {
#include <linux/nsfs.h>
#include <linux/perf_event.h>
#include <linux/pps.h>
#include <linux/ptp_clock.h>
#include <linux/ptrace.h>
#include <linux/random.h>
#include <linux/reboot.h>
@ -526,6 +538,7 @@ ccflags="$@"
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ ||
$2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ ||
$2 ~ /^NFC_.*_(MAX)?SIZE$/ ||
$2 ~ /^PTP_/ ||
$2 ~ /^RAW_PAYLOAD_/ ||
$2 ~ /^[US]F_/ ||
$2 ~ /^TP_STATUS_/ ||
@ -551,6 +564,7 @@ ccflags="$@"
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
$2 ~ /^(CONNECT|SAE)_/ ||
$2 ~ /^FIORDCHK$/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
@ -654,7 +668,7 @@ errors=$(
signals=$(
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort
)
@ -664,7 +678,7 @@ echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
sort >_error.grep
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort >_signal.grep
echo '// mkerrors.sh' "$@"

View File

@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [
func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
return mapper.Mremap(oldData, newLength, flags)
}
func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) {
xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr))
return unsafe.Pointer(xaddr), err
}

View File

@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int,
var status _C_int
var r Pid_t
err = ERESTART
// AIX wait4 may return with ERESTART errno, while the processus is still
// AIX wait4 may return with ERESTART errno, while the process is still
// active.
for err == ERESTART {
r, err = wait4(Pid_t(pid), &status, options, rusage)

View File

@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error {
return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq))
}
//sys renamexNp(from string, to string, flag uint32) (err error)
func RenamexNp(from string, to string, flag uint32) (err error) {
return renamexNp(from, to, flag)
}
//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error)
func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) {
return renameatxNp(fromfd, from, tofd, to, flag)
}
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL
func Uname(uname *Utsname) error {
@ -542,6 +554,55 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
}
}
//sys pthread_chdir_np(path string) (err error)
func PthreadChdir(path string) (err error) {
return pthread_chdir_np(path)
}
//sys pthread_fchdir_np(fd int) (err error)
func PthreadFchdir(fd int) (err error) {
return pthread_fchdir_np(fd)
}
// Connectx calls connectx(2) to initiate a connection on a socket.
//
// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument.
//
// - srcIf is the optional source interface index. 0 means unspecified.
// - srcAddr is the optional source address. nil means unspecified.
// - dstAddr is the destination address.
//
// On success, Connectx returns the number of bytes enqueued for transmission.
func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) {
endpoints := SaEndpoints{
Srcif: srcIf,
}
if srcAddr != nil {
addrp, addrlen, err := srcAddr.sockaddr()
if err != nil {
return 0, err
}
endpoints.Srcaddr = (*RawSockaddr)(addrp)
endpoints.Srcaddrlen = uint32(addrlen)
}
if dstAddr != nil {
addrp, addrlen, err := dstAddr.sockaddr()
if err != nil {
return 0, err
}
endpoints.Dstaddr = (*RawSockaddr)(addrp)
endpoints.Dstaddrlen = uint32(addrlen)
}
err = connectx(fd, &endpoints, associd, flags, iov, &n, connid)
return
}
//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)

Some files were not shown because too many files have changed in this diff Show More