Fix CVE-2025-47913: upgrade golang.org/x/crypto to v0.48.0

Upgrade golang.org/x/crypto from v0.38.0 to v0.48.0 to resolve
CVE-2025-47913 (GO-2025-4116), a denial-of-service vulnerability in
golang.org/x/crypto/ssh/agent where SSH clients receiving
SSH_AGENT_SUCCESS when expecting a typed response will panic and cause
early termination of the client process. The fix was introduced in
v0.43.0.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
limkevin.chao 2026-02-23 14:14:13 +01:00
parent 059f4d9898
commit 3406709ee7
209 changed files with 32045 additions and 74728 deletions

18
go.mod
View File

@ -1,6 +1,6 @@
module github.com/cloudflare/cloudflared module github.com/cloudflare/cloudflared
go 1.24 go 1.24.0
require ( require (
github.com/coreos/go-oidc/v3 v3.10.0 github.com/coreos/go-oidc/v3 v3.10.0
@ -34,11 +34,11 @@ require (
go.opentelemetry.io/proto/otlp v1.2.0 go.opentelemetry.io/proto/otlp v1.2.0
go.uber.org/automaxprocs v1.6.0 go.uber.org/automaxprocs v1.6.0
go.uber.org/mock v0.5.1 go.uber.org/mock v0.5.1
golang.org/x/crypto v0.38.0 golang.org/x/crypto v0.48.0
golang.org/x/net v0.40.0 golang.org/x/net v0.49.0
golang.org/x/sync v0.14.0 golang.org/x/sync v0.19.0
golang.org/x/sys v0.33.0 golang.org/x/sys v0.41.0
golang.org/x/term v0.32.0 golang.org/x/term v0.40.0
google.golang.org/protobuf v1.36.6 google.golang.org/protobuf v1.36.6
gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
@ -83,10 +83,10 @@ require (
go.opentelemetry.io/otel/metric v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
golang.org/x/arch v0.4.0 // indirect golang.org/x/arch v0.4.0 // indirect
golang.org/x/mod v0.24.0 // indirect golang.org/x/mod v0.32.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/text v0.25.0 // indirect golang.org/x/text v0.34.0 // indirect
golang.org/x/tools v0.32.0 // indirect golang.org/x/tools v0.41.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 // indirect
google.golang.org/grpc v1.72.2 // indirect google.golang.org/grpc v1.72.2 // indirect

32
go.sum
View File

@ -227,21 +227,21 @@ golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -249,20 +249,20 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -12,6 +12,8 @@ import (
// XOF defines the interface to hash functions that // XOF defines the interface to hash functions that
// support arbitrary-length output. // support arbitrary-length output.
//
// New callers should prefer the standard library [hash.XOF].
type XOF interface { type XOF interface {
// Write absorbs more data into the hash's state. It panics if called // Write absorbs more data into the hash's state. It panics if called
// after Read. // after Read.
@ -47,6 +49,8 @@ const maxOutputLength = (1 << 32) * 64
// //
// A non-nil key turns the hash into a MAC. The key must between // A non-nil key turns the hash into a MAC. The key must between
// zero and 32 bytes long. // zero and 32 bytes long.
//
// The result can be safely interface-upgraded to [hash.XOF].
func NewXOF(size uint32, key []byte) (XOF, error) { func NewXOF(size uint32, key []byte) (XOF, error) {
if len(key) > Size { if len(key) > Size {
return nil, errKeySize return nil, errKeySize
@ -93,6 +97,10 @@ func (x *xof) Clone() XOF {
return &clone return &clone
} }
func (x *xof) BlockSize() int {
return x.d.BlockSize()
}
func (x *xof) Reset() { func (x *xof) Reset() {
x.cfg[0] = byte(Size) x.cfg[0] = byte(Size)
binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length

11
vendor/golang.org/x/crypto/blake2b/go125.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.25
package blake2b
import "hash"
var _ hash.XOF = (*xof)(nil)

View File

@ -29,7 +29,7 @@ loop:
MOVD $NUM_ROUNDS, R21 MOVD $NUM_ROUNDS, R21
VLD1 (R11), [V30.S4, V31.S4] VLD1 (R11), [V30.S4, V31.S4]
// load contants // load constants
// VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
WORD $0x4D60E940 WORD $0x4D60E940

View File

@ -38,6 +38,9 @@ type chacha20poly1305 struct {
// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. // New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key.
func New(key []byte) (cipher.AEAD, error) { func New(key []byte) (cipher.AEAD, error) {
if fips140Enforced() {
return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
}
if len(key) != KeySize { if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length") return nil, errors.New("chacha20poly1305: bad key length")
} }

View File

@ -56,7 +56,10 @@ func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []
ret, out := sliceForAppend(dst, len(plaintext)+16) ret, out := sliceForAppend(dst, len(plaintext)+16)
if alias.InexactOverlap(out, plaintext) { if alias.InexactOverlap(out, plaintext) {
panic("chacha20poly1305: invalid buffer overlap") panic("chacha20poly1305: invalid buffer overlap of output and input")
}
if alias.AnyOverlap(out, additionalData) {
panic("chacha20poly1305: invalid buffer overlap of output and additional data")
} }
chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData)
return ret return ret
@ -73,7 +76,10 @@ func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) (
ciphertext = ciphertext[:len(ciphertext)-16] ciphertext = ciphertext[:len(ciphertext)-16]
ret, out := sliceForAppend(dst, len(ciphertext)) ret, out := sliceForAppend(dst, len(ciphertext))
if alias.InexactOverlap(out, ciphertext) { if alias.InexactOverlap(out, ciphertext) {
panic("chacha20poly1305: invalid buffer overlap") panic("chacha20poly1305: invalid buffer overlap of output and input")
}
if alias.AnyOverlap(out, additionalData) {
panic("chacha20poly1305: invalid buffer overlap of output and additional data")
} }
if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) {
for i := range out { for i := range out {

View File

@ -31,7 +31,10 @@ func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []b
ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize)
ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] ciphertext, tag := out[:len(plaintext)], out[len(plaintext):]
if alias.InexactOverlap(out, plaintext) { if alias.InexactOverlap(out, plaintext) {
panic("chacha20poly1305: invalid buffer overlap") panic("chacha20poly1305: invalid buffer overlap of output and input")
}
if alias.AnyOverlap(out, additionalData) {
panic("chacha20poly1305: invalid buffer overlap of output and additional data")
} }
var polyKey [32]byte var polyKey [32]byte
@ -67,7 +70,10 @@ func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []
ret, out := sliceForAppend(dst, len(ciphertext)) ret, out := sliceForAppend(dst, len(ciphertext))
if alias.InexactOverlap(out, ciphertext) { if alias.InexactOverlap(out, ciphertext) {
panic("chacha20poly1305: invalid buffer overlap") panic("chacha20poly1305: invalid buffer overlap of output and input")
}
if alias.AnyOverlap(out, additionalData) {
panic("chacha20poly1305: invalid buffer overlap of output and additional data")
} }
if !p.Verify(tag) { if !p.Verify(tag) {
for i := range out { for i := range out {

View File

@ -0,0 +1,9 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.26
package chacha20poly1305
func fips140Enforced() bool { return false }

View File

@ -0,0 +1,11 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.26
package chacha20poly1305
import "crypto/fips140"
func fips140Enforced() bool { return fips140.Enforced() }

View File

@ -22,6 +22,9 @@ type xchacha20poly1305 struct {
// preferred when nonce uniqueness cannot be trivially ensured, or whenever // preferred when nonce uniqueness cannot be trivially ensured, or whenever
// nonces are randomly generated. // nonces are randomly generated.
func NewX(key []byte) (cipher.AEAD, error) { func NewX(key []byte) (cipher.AEAD, error) {
if fips140Enforced() {
return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
}
if len(key) != KeySize { if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length") return nil, errors.New("chacha20poly1305: bad key length")
} }

View File

@ -3,11 +3,14 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package curve25519 provides an implementation of the X25519 function, which // Package curve25519 provides an implementation of the X25519 function, which
// performs scalar multiplication on the elliptic curve known as Curve25519. // performs scalar multiplication on the elliptic curve known as Curve25519
// See RFC 7748. // according to [RFC 7748].
// //
// This package is a wrapper for the X25519 implementation // The curve25519 package is a wrapper for the X25519 implementation in the
// in the crypto/ecdh package. // crypto/ecdh package. It is [frozen] and is not accepting new features.
//
// [RFC 7748]: https://datatracker.ietf.org/doc/html/rfc7748
// [frozen]: https://go.dev/wiki/Frozen
package curve25519 package curve25519
import "crypto/ecdh" import "crypto/ecdh"
@ -36,7 +39,7 @@ func ScalarBaseMult(dst, scalar *[32]byte) {
curve := ecdh.X25519() curve := ecdh.X25519()
priv, err := curve.NewPrivateKey(scalar[:]) priv, err := curve.NewPrivateKey(scalar[:])
if err != nil { if err != nil {
panic("curve25519: internal error: scalarBaseMult was not 32 bytes") panic("curve25519: " + err.Error())
} }
copy(dst[:], priv.PublicKey().Bytes()) copy(dst[:], priv.PublicKey().Bytes())
} }

View File

@ -3,6 +3,10 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package salsa provides low-level access to functions in the Salsa family. // Package salsa provides low-level access to functions in the Salsa family.
//
// Deprecated: this package exposes unsafe low-level operations. New applications
// should consider using the AEAD construction in golang.org/x/crypto/chacha20poly1305
// instead. Existing users should migrate to golang.org/x/crypto/salsa20.
package salsa package salsa
import "math/bits" import "math/bits"

View File

@ -21,7 +21,12 @@ import (
// field. // field.
const ( const (
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" // Deprecated: DSA is only supported at insecure key sizes, and was removed
// from major implementations.
CertAlgoDSAv01 = InsecureCertAlgoDSAv01
// Deprecated: DSA is only supported at insecure key sizes, and was removed
// from major implementations.
InsecureCertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
@ -228,7 +233,11 @@ func parseCert(in []byte, privAlgo string) (*Certificate, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
// The Type() function is intended to return only certificate key types, but
// we use certKeyAlgoNames anyway for safety, to match [Certificate.Type].
if _, ok := certKeyAlgoNames[k.Type()]; ok {
return nil, fmt.Errorf("ssh: the signature key type %q is invalid for certificates", k.Type())
}
c.SignatureKey = k c.SignatureKey = k
c.Signature, rest, ok = parseSignatureBody(g.Signature) c.Signature, rest, ok = parseSignatureBody(g.Signature)
if !ok || len(rest) > 0 { if !ok || len(rest) > 0 {
@ -296,16 +305,13 @@ type CertChecker struct {
SupportedCriticalOptions []string SupportedCriticalOptions []string
// IsUserAuthority should return true if the key is recognized as an // IsUserAuthority should return true if the key is recognized as an
// authority for the given user certificate. This allows for // authority for user certificate. This must be set if this CertChecker
// certificates to be signed by other certificates. This must be set // will be checking user certificates.
// if this CertChecker will be checking user certificates.
IsUserAuthority func(auth PublicKey) bool IsUserAuthority func(auth PublicKey) bool
// IsHostAuthority should report whether the key is recognized as // IsHostAuthority should report whether the key is recognized as
// an authority for this host. This allows for certificates to be // an authority for this host. This must be set if this CertChecker
// signed by other keys, and for those other keys to only be valid // will be checking host certificates.
// signers for particular hostnames. This must be set if this
// CertChecker will be checking host certificates.
IsHostAuthority func(auth PublicKey, address string) bool IsHostAuthority func(auth PublicKey, address string) bool
// Clock is used for verifying time stamps. If nil, time.Now // Clock is used for verifying time stamps. If nil, time.Now
@ -442,12 +448,19 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
// SignCert signs the certificate with an authority, setting the Nonce, // SignCert signs the certificate with an authority, setting the Nonce,
// SignatureKey, and Signature fields. If the authority implements the // SignatureKey, and Signature fields. If the authority implements the
// MultiAlgorithmSigner interface the first algorithm in the list is used. This // MultiAlgorithmSigner interface the first algorithm in the list is used. This
// is useful if you want to sign with a specific algorithm. // is useful if you want to sign with a specific algorithm. As specified in
// [SSH-CERTS], Section 2.1.1, authority can't be a [Certificate].
func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
c.Nonce = make([]byte, 32) c.Nonce = make([]byte, 32)
if _, err := io.ReadFull(rand, c.Nonce); err != nil { if _, err := io.ReadFull(rand, c.Nonce); err != nil {
return err return err
} }
// The Type() function is intended to return only certificate key types, but
// we use certKeyAlgoNames anyway for safety, to match [Certificate.Type].
if _, ok := certKeyAlgoNames[authority.PublicKey().Type()]; ok {
return fmt.Errorf("ssh: certificates cannot be used as authority (public key type %q)",
authority.PublicKey().Type())
}
c.SignatureKey = authority.PublicKey() c.SignatureKey = authority.PublicKey()
if v, ok := authority.(MultiAlgorithmSigner); ok { if v, ok := authority.(MultiAlgorithmSigner); ok {
@ -488,7 +501,7 @@ var certKeyAlgoNames = map[string]string{
CertAlgoRSAv01: KeyAlgoRSA, CertAlgoRSAv01: KeyAlgoRSA,
CertAlgoRSASHA256v01: KeyAlgoRSASHA256, CertAlgoRSASHA256v01: KeyAlgoRSASHA256,
CertAlgoRSASHA512v01: KeyAlgoRSASHA512, CertAlgoRSASHA512v01: KeyAlgoRSASHA512,
CertAlgoDSAv01: KeyAlgoDSA, InsecureCertAlgoDSAv01: InsecureKeyAlgoDSA,
CertAlgoECDSA256v01: KeyAlgoECDSA256, CertAlgoECDSA256v01: KeyAlgoECDSA256,
CertAlgoECDSA384v01: KeyAlgoECDSA384, CertAlgoECDSA384v01: KeyAlgoECDSA384,
CertAlgoECDSA521v01: KeyAlgoECDSA521, CertAlgoECDSA521v01: KeyAlgoECDSA521,

View File

@ -8,6 +8,7 @@ import (
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/des" "crypto/des"
"crypto/fips140"
"crypto/rc4" "crypto/rc4"
"crypto/subtle" "crypto/subtle"
"encoding/binary" "encoding/binary"
@ -15,6 +16,7 @@ import (
"fmt" "fmt"
"hash" "hash"
"io" "io"
"slices"
"golang.org/x/crypto/chacha20" "golang.org/x/crypto/chacha20"
"golang.org/x/crypto/internal/poly1305" "golang.org/x/crypto/internal/poly1305"
@ -58,11 +60,11 @@ func newRC4(key, iv []byte) (cipher.Stream, error) {
type cipherMode struct { type cipherMode struct {
keySize int keySize int
ivSize int ivSize int
create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) create func(key, iv []byte, macKey []byte, algs DirectionAlgorithms) (packetCipher, error)
} }
func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) {
return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { return func(key, iv, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) {
stream, err := createFunc(key, iv) stream, err := createFunc(key, iv)
if err != nil { if err != nil {
return nil, err return nil, err
@ -93,41 +95,41 @@ func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream,
} }
// cipherModes documents properties of supported ciphers. Ciphers not included // cipherModes documents properties of supported ciphers. Ciphers not included
// are not supported and will not be negotiated, even if explicitly requested in // are not supported and will not be negotiated, even if explicitly configured.
// ClientConfig.Crypto.Ciphers. // When FIPS mode is enabled, only FIPS-approved algorithms are included.
var cipherModes = map[string]*cipherMode{ var cipherModes = map[string]*cipherMode{}
// Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms
// are defined in the order specified in the RFC.
"aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)},
"aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)},
"aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)},
// Ciphers from RFC 4345, which introduces security-improved arcfour ciphers. func init() {
// They are defined in the order specified in the RFC. cipherModes[CipherAES128CTR] = &cipherMode{16, aes.BlockSize, streamCipherMode(0, newAESCTR)}
"arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, cipherModes[CipherAES192CTR] = &cipherMode{24, aes.BlockSize, streamCipherMode(0, newAESCTR)}
"arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, cipherModes[CipherAES256CTR] = &cipherMode{32, aes.BlockSize, streamCipherMode(0, newAESCTR)}
// Use of GCM with arbitrary IVs is not allowed in FIPS 140-only mode,
// we'll wire it up to NewGCMForSSH in Go 1.26.
//
// For now it means we'll work with fips140=on but not fips140=only.
cipherModes[CipherAES128GCM] = &cipherMode{16, 12, newGCMCipher}
cipherModes[CipherAES256GCM] = &cipherMode{32, 12, newGCMCipher}
// Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. if fips140.Enabled() {
// Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and defaultCiphers = slices.DeleteFunc(defaultCiphers, func(algo string) bool {
// RC4) has problems with weak keys, and should be used with caution." _, ok := cipherModes[algo]
// RFC 4345 introduces improved versions of Arcfour. return !ok
"arcfour": {16, 0, streamCipherMode(0, newRC4)}, })
return
// AEAD ciphers }
gcm128CipherID: {16, 12, newGCMCipher},
gcm256CipherID: {32, 12, newGCMCipher},
chacha20Poly1305ID: {64, 0, newChaCha20Cipher},
cipherModes[CipherChaCha20Poly1305] = &cipherMode{64, 0, newChaCha20Cipher}
// Insecure ciphers not included in the default configuration.
cipherModes[InsecureCipherRC4128] = &cipherMode{16, 0, streamCipherMode(1536, newRC4)}
cipherModes[InsecureCipherRC4256] = &cipherMode{32, 0, streamCipherMode(1536, newRC4)}
cipherModes[InsecureCipherRC4] = &cipherMode{16, 0, streamCipherMode(0, newRC4)}
// CBC mode is insecure and so is not included in the default config. // CBC mode is insecure and so is not included in the default config.
// (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it. // needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if // You should expect that an active attacker can recover plaintext if
// you do. // you do.
aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, cipherModes[InsecureCipherAES128CBC] = &cipherMode{16, aes.BlockSize, newAESCBCCipher}
cipherModes[InsecureCipherTripleDESCBC] = &cipherMode{24, des.BlockSize, newTripleDESCBCCipher}
// 3des-cbc is insecure and is not included in the default
// config.
tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher},
} }
// prefixLen is the length of the packet prefix that contains the packet length // prefixLen is the length of the packet prefix that contains the packet length
@ -307,7 +309,7 @@ type gcmCipher struct {
buf []byte buf []byte
} }
func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs DirectionAlgorithms) (packetCipher, error) {
c, err := aes.NewCipher(key) c, err := aes.NewCipher(key)
if err != nil { if err != nil {
return nil, err return nil, err
@ -429,7 +431,7 @@ type cbcCipher struct {
oracleCamouflage uint32 oracleCamouflage uint32
} }
func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) {
cbc := &cbcCipher{ cbc := &cbcCipher{
mac: macModes[algs.MAC].new(macKey), mac: macModes[algs.MAC].new(macKey),
decrypter: cipher.NewCBCDecrypter(c, iv), decrypter: cipher.NewCBCDecrypter(c, iv),
@ -443,7 +445,7 @@ func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorith
return cbc, nil return cbc, nil
} }
func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { func newAESCBCCipher(key, iv, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) {
c, err := aes.NewCipher(key) c, err := aes.NewCipher(key)
if err != nil { if err != nil {
return nil, err return nil, err
@ -457,7 +459,7 @@ func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCi
return cbc, nil return cbc, nil
} }
func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { func newTripleDESCBCCipher(key, iv, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) {
c, err := des.NewTripleDESCipher(key) c, err := des.NewTripleDESCipher(key)
if err != nil { if err != nil {
return nil, err return nil, err
@ -635,8 +637,6 @@ func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader
return nil return nil
} }
const chacha20Poly1305ID = "chacha20-poly1305@openssh.com"
// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com // chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com
// AEAD, which is described here: // AEAD, which is described here:
// //
@ -650,7 +650,7 @@ type chacha20Poly1305Cipher struct {
buf []byte buf []byte
} }
func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs DirectionAlgorithms) (packetCipher, error) {
if len(key) != 64 { if len(key) != 64 {
panic(len(key)) panic(len(key))
} }

View File

@ -110,6 +110,7 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e
} }
c.sessionID = c.transport.getSessionID() c.sessionID = c.transport.getSessionID()
c.algorithms = c.transport.getAlgorithms()
return c.clientAuthenticate(config) return c.clientAuthenticate(config)
} }

View File

@ -9,6 +9,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"slices"
"strings" "strings"
) )
@ -83,7 +84,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
// success // success
return nil return nil
} else if ok == authFailure { } else if ok == authFailure {
if m := auth.method(); !contains(tried, m) { if m := auth.method(); !slices.Contains(tried, m) {
tried = append(tried, m) tried = append(tried, m)
} }
} }
@ -97,7 +98,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
findNext: findNext:
for _, a := range config.Auth { for _, a := range config.Auth {
candidateMethod := a.method() candidateMethod := a.method()
if contains(tried, candidateMethod) { if slices.Contains(tried, candidateMethod) {
continue continue
} }
for _, meth := range methods { for _, meth := range methods {
@ -117,15 +118,6 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
} }
func contains(list []string, e string) bool {
for _, s := range list {
if s == e {
return true
}
}
return false
}
// An AuthMethod represents an instance of an RFC 4252 authentication method. // An AuthMethod represents an instance of an RFC 4252 authentication method.
type AuthMethod interface { type AuthMethod interface {
// auth authenticates user over transport t. // auth authenticates user over transport t.
@ -255,7 +247,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Fallback to use if there is no "server-sig-algs" extension or a // Fallback to use if there is no "server-sig-algs" extension or a
// common algorithm cannot be found. We use the public key format if the // common algorithm cannot be found. We use the public key format if the
// MultiAlgorithmSigner supports it, otherwise we return an error. // MultiAlgorithmSigner supports it, otherwise we return an error.
if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) { if !slices.Contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v", return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v",
underlyingAlgo(keyFormat), keyFormat, as.Algorithms()) underlyingAlgo(keyFormat), keyFormat, as.Algorithms())
} }
@ -284,12 +276,12 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Filter algorithms based on those supported by MultiAlgorithmSigner. // Filter algorithms based on those supported by MultiAlgorithmSigner.
var keyAlgos []string var keyAlgos []string
for _, algo := range algorithmsForKeyFormat(keyFormat) { for _, algo := range algorithmsForKeyFormat(keyFormat) {
if contains(as.Algorithms(), underlyingAlgo(algo)) { if slices.Contains(as.Algorithms(), underlyingAlgo(algo)) {
keyAlgos = append(keyAlgos, algo) keyAlgos = append(keyAlgos, algo)
} }
} }
algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos, true)
if err != nil { if err != nil {
// If there is no overlap, return the fallback algorithm to support // If there is no overlap, return the fallback algorithm to support
// servers that fail to list all supported algorithms. // servers that fail to list all supported algorithms.
@ -334,7 +326,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// the key try to use the obtained algorithm as if "server-sig-algs" had // the key try to use the obtained algorithm as if "server-sig-algs" had
// not been implemented if supported from the algorithm signer. // not been implemented if supported from the algorithm signer.
if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 { if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 {
if contains(as.Algorithms(), KeyAlgoRSA) { if slices.Contains(as.Algorithms(), KeyAlgoRSA) {
// We retry using the compat algorithm after all signers have // We retry using the compat algorithm after all signers have
// been tried normally. // been tried normally.
signers = append(signers, &multiAlgorithmSigner{ signers = append(signers, &multiAlgorithmSigner{
@ -385,7 +377,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// contain the "publickey" method, do not attempt to authenticate with any // contain the "publickey" method, do not attempt to authenticate with any
// other keys. According to RFC 4252 Section 7, the latter can occur when // other keys. According to RFC 4252 Section 7, the latter can occur when
// additional authentication methods are required. // additional authentication methods are required.
if success == authSuccess || !contains(methods, cb.method()) { if success == authSuccess || !slices.Contains(methods, cb.method()) {
return success, methods, err return success, methods, err
} }
} }
@ -434,7 +426,7 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
// servers send the key type instead. OpenSSH allows any algorithm // servers send the key type instead. OpenSSH allows any algorithm
// that matches the public key, so we do the same. // that matches the public key, so we do the same.
// https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709 // https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709
if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { if !slices.Contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
return false, nil return false, nil
} }
if !bytes.Equal(msg.PubKey, pubKey) { if !bytes.Equal(msg.PubKey, pubKey) {

View File

@ -6,10 +6,12 @@ package ssh
import ( import (
"crypto" "crypto"
"crypto/fips140"
"crypto/rand" "crypto/rand"
"fmt" "fmt"
"io" "io"
"math" "math"
"slices"
"sync" "sync"
_ "crypto/sha1" _ "crypto/sha1"
@ -24,88 +26,298 @@ const (
serviceSSH = "ssh-connection" serviceSSH = "ssh-connection"
) )
// supportedCiphers lists ciphers we support but might not recommend. // The ciphers currently or previously implemented by this library, to use in
var supportedCiphers = []string{ // [Config.Ciphers]. For a list, see the [Algorithms.Ciphers] returned by
"aes128-ctr", "aes192-ctr", "aes256-ctr", // [SupportedAlgorithms] or [InsecureAlgorithms].
"aes128-gcm@openssh.com", gcm256CipherID, const (
chacha20Poly1305ID, CipherAES128GCM = "aes128-gcm@openssh.com"
"arcfour256", "arcfour128", "arcfour", CipherAES256GCM = "aes256-gcm@openssh.com"
aes128cbcID, CipherChaCha20Poly1305 = "chacha20-poly1305@openssh.com"
tripledescbcID, CipherAES128CTR = "aes128-ctr"
} CipherAES192CTR = "aes192-ctr"
CipherAES256CTR = "aes256-ctr"
InsecureCipherAES128CBC = "aes128-cbc"
InsecureCipherTripleDESCBC = "3des-cbc"
InsecureCipherRC4 = "arcfour"
InsecureCipherRC4128 = "arcfour128"
InsecureCipherRC4256 = "arcfour256"
)
// preferredCiphers specifies the default preference for ciphers. // The key exchanges currently or previously implemented by this library, to use
var preferredCiphers = []string{ // in [Config.KeyExchanges]. For a list, see the
"aes128-gcm@openssh.com", gcm256CipherID, // [Algorithms.KeyExchanges] returned by [SupportedAlgorithms] or
chacha20Poly1305ID, // [InsecureAlgorithms].
"aes128-ctr", "aes192-ctr", "aes256-ctr", const (
} InsecureKeyExchangeDH1SHA1 = "diffie-hellman-group1-sha1"
InsecureKeyExchangeDH14SHA1 = "diffie-hellman-group14-sha1"
KeyExchangeDH14SHA256 = "diffie-hellman-group14-sha256"
KeyExchangeDH16SHA512 = "diffie-hellman-group16-sha512"
KeyExchangeECDHP256 = "ecdh-sha2-nistp256"
KeyExchangeECDHP384 = "ecdh-sha2-nistp384"
KeyExchangeECDHP521 = "ecdh-sha2-nistp521"
KeyExchangeCurve25519 = "curve25519-sha256"
InsecureKeyExchangeDHGEXSHA1 = "diffie-hellman-group-exchange-sha1"
KeyExchangeDHGEXSHA256 = "diffie-hellman-group-exchange-sha256"
// KeyExchangeMLKEM768X25519 is supported from Go 1.24.
KeyExchangeMLKEM768X25519 = "mlkem768x25519-sha256"
// supportedKexAlgos specifies the supported key-exchange algorithms in // An alias for KeyExchangeCurve25519SHA256. This kex ID will be added if
// preference order. // KeyExchangeCurve25519SHA256 is requested for backward compatibility with
var supportedKexAlgos = []string{ // OpenSSH versions up to 7.2.
kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, keyExchangeCurve25519LibSSH = "curve25519-sha256@libssh.org"
// P384 and P521 are not constant-time yet, but since we don't )
// reuse ephemeral keys, using them for ECDH should be OK.
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1,
kexAlgoDH1SHA1,
}
// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden // The message authentication code (MAC) currently or previously implemented by
// for the server half. // this library, to use in [Config.MACs]. For a list, see the
var serverForbiddenKexAlgos = map[string]struct{}{ // [Algorithms.MACs] returned by [SupportedAlgorithms] or
kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests // [InsecureAlgorithms].
kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests const (
} HMACSHA256ETM = "hmac-sha2-256-etm@openssh.com"
HMACSHA512ETM = "hmac-sha2-512-etm@openssh.com"
// preferredKexAlgos specifies the default preference for key-exchange HMACSHA256 = "hmac-sha2-256"
// algorithms in preference order. The diffie-hellman-group16-sha512 algorithm HMACSHA512 = "hmac-sha2-512"
// is disabled by default because it is a bit slower than the others. HMACSHA1 = "hmac-sha1"
var preferredKexAlgos = []string{ InsecureHMACSHA196 = "hmac-sha1-96"
kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, )
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
kexAlgoDH14SHA256, kexAlgoDH14SHA1,
}
// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods
// of authenticating servers) in preference order.
var supportedHostKeyAlgos = []string{
CertAlgoRSASHA256v01, CertAlgoRSASHA512v01,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
KeyAlgoRSASHA256, KeyAlgoRSASHA512,
KeyAlgoRSA, KeyAlgoDSA,
var (
// supportedKexAlgos specifies key-exchange algorithms implemented by this
// package in preference order, excluding those with security issues.
supportedKexAlgos = []string{
KeyExchangeMLKEM768X25519,
KeyExchangeCurve25519,
KeyExchangeECDHP256,
KeyExchangeECDHP384,
KeyExchangeECDHP521,
KeyExchangeDH14SHA256,
KeyExchangeDH16SHA512,
KeyExchangeDHGEXSHA256,
}
// defaultKexAlgos specifies the default preference for key-exchange
// algorithms in preference order.
defaultKexAlgos = []string{
KeyExchangeMLKEM768X25519,
KeyExchangeCurve25519,
KeyExchangeECDHP256,
KeyExchangeECDHP384,
KeyExchangeECDHP521,
KeyExchangeDH14SHA256,
InsecureKeyExchangeDH14SHA1,
}
// insecureKexAlgos specifies key-exchange algorithms implemented by this
// package and which have security issues.
insecureKexAlgos = []string{
InsecureKeyExchangeDH14SHA1,
InsecureKeyExchangeDH1SHA1,
InsecureKeyExchangeDHGEXSHA1,
}
// supportedCiphers specifies cipher algorithms implemented by this package
// in preference order, excluding those with security issues.
supportedCiphers = []string{
CipherAES128GCM,
CipherAES256GCM,
CipherChaCha20Poly1305,
CipherAES128CTR,
CipherAES192CTR,
CipherAES256CTR,
}
// defaultCiphers specifies the default preference for ciphers algorithms
// in preference order.
defaultCiphers = supportedCiphers
// insecureCiphers specifies cipher algorithms implemented by this
// package and which have security issues.
insecureCiphers = []string{
InsecureCipherAES128CBC,
InsecureCipherTripleDESCBC,
InsecureCipherRC4256,
InsecureCipherRC4128,
InsecureCipherRC4,
}
// supportedMACs specifies MAC algorithms implemented by this package in
// preference order, excluding those with security issues.
supportedMACs = []string{
HMACSHA256ETM,
HMACSHA512ETM,
HMACSHA256,
HMACSHA512,
HMACSHA1,
}
// defaultMACs specifies the default preference for MAC algorithms in
// preference order.
defaultMACs = []string{
HMACSHA256ETM,
HMACSHA512ETM,
HMACSHA256,
HMACSHA512,
HMACSHA1,
InsecureHMACSHA196,
}
// insecureMACs specifies MAC algorithms implemented by this
// package and which have security issues.
insecureMACs = []string{
InsecureHMACSHA196,
}
// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e.
// methods of authenticating servers) implemented by this package in
// preference order, excluding those with security issues.
supportedHostKeyAlgos = []string{
CertAlgoRSASHA256v01,
CertAlgoRSASHA512v01,
CertAlgoECDSA256v01,
CertAlgoECDSA384v01,
CertAlgoECDSA521v01,
CertAlgoED25519v01,
KeyAlgoRSASHA256,
KeyAlgoRSASHA512,
KeyAlgoECDSA256,
KeyAlgoECDSA384,
KeyAlgoECDSA521,
KeyAlgoED25519, KeyAlgoED25519,
}
// defaultHostKeyAlgos specifies the default preference for host-key
// algorithms in preference order.
defaultHostKeyAlgos = []string{
CertAlgoRSASHA256v01,
CertAlgoRSASHA512v01,
CertAlgoRSAv01,
InsecureCertAlgoDSAv01,
CertAlgoECDSA256v01,
CertAlgoECDSA384v01,
CertAlgoECDSA521v01,
CertAlgoED25519v01,
KeyAlgoECDSA256,
KeyAlgoECDSA384,
KeyAlgoECDSA521,
KeyAlgoRSASHA256,
KeyAlgoRSASHA512,
KeyAlgoRSA,
InsecureKeyAlgoDSA,
KeyAlgoED25519,
}
// insecureHostKeyAlgos specifies host-key algorithms implemented by this
// package and which have security issues.
insecureHostKeyAlgos = []string{
KeyAlgoRSA,
InsecureKeyAlgoDSA,
CertAlgoRSAv01,
InsecureCertAlgoDSAv01,
}
// supportedPubKeyAuthAlgos specifies the supported client public key
// authentication algorithms. Note that this doesn't include certificate
// types since those use the underlying algorithm. Order is irrelevant.
supportedPubKeyAuthAlgos = []string{
KeyAlgoED25519,
KeyAlgoSKED25519,
KeyAlgoSKECDSA256,
KeyAlgoECDSA256,
KeyAlgoECDSA384,
KeyAlgoECDSA521,
KeyAlgoRSASHA256,
KeyAlgoRSASHA512,
}
// defaultPubKeyAuthAlgos specifies the preferred client public key
// authentication algorithms. This list is sent to the client if it supports
// the server-sig-algs extension. Order is irrelevant.
defaultPubKeyAuthAlgos = []string{
KeyAlgoED25519,
KeyAlgoSKED25519,
KeyAlgoSKECDSA256,
KeyAlgoECDSA256,
KeyAlgoECDSA384,
KeyAlgoECDSA521,
KeyAlgoRSASHA256,
KeyAlgoRSASHA512,
KeyAlgoRSA,
InsecureKeyAlgoDSA,
}
// insecurePubKeyAuthAlgos specifies client public key authentication
// algorithms implemented by this package and which have security issues.
insecurePubKeyAuthAlgos = []string{
KeyAlgoRSA,
InsecureKeyAlgoDSA,
}
)
// NegotiatedAlgorithms defines algorithms negotiated between client and server.
type NegotiatedAlgorithms struct {
KeyExchange string
HostKey string
Read DirectionAlgorithms
Write DirectionAlgorithms
} }
// supportedMACs specifies a default set of MAC algorithms in preference order. // Algorithms defines a set of algorithms that can be configured in the client
// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed // or server config for negotiation during a handshake.
// because they have reached the end of their useful life. type Algorithms struct {
var supportedMACs = []string{ KeyExchanges []string
"hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", Ciphers []string
MACs []string
HostKeys []string
PublicKeyAuths []string
}
func init() {
if fips140.Enabled() {
defaultHostKeyAlgos = slices.DeleteFunc(defaultHostKeyAlgos, func(algo string) bool {
_, err := hashFunc(underlyingAlgo(algo))
return err != nil
})
defaultPubKeyAuthAlgos = slices.DeleteFunc(defaultPubKeyAuthAlgos, func(algo string) bool {
_, err := hashFunc(underlyingAlgo(algo))
return err != nil
})
}
}
func hashFunc(format string) (crypto.Hash, error) {
switch format {
case KeyAlgoRSASHA256, KeyAlgoECDSA256, KeyAlgoSKED25519, KeyAlgoSKECDSA256:
return crypto.SHA256, nil
case KeyAlgoECDSA384:
return crypto.SHA384, nil
case KeyAlgoRSASHA512, KeyAlgoECDSA521:
return crypto.SHA512, nil
case KeyAlgoED25519:
// KeyAlgoED25519 doesn't pre-hash.
return 0, nil
case KeyAlgoRSA, InsecureKeyAlgoDSA:
if fips140.Enabled() {
return 0, fmt.Errorf("ssh: hash algorithm for format %q not allowed in FIPS 140 mode", format)
}
return crypto.SHA1, nil
default:
return 0, fmt.Errorf("ssh: hash algorithm for format %q not mapped", format)
}
}
// SupportedAlgorithms returns algorithms currently implemented by this package,
// excluding those with security issues, which are returned by
// InsecureAlgorithms. The algorithms listed here are in preference order.
func SupportedAlgorithms() Algorithms {
return Algorithms{
Ciphers: slices.Clone(supportedCiphers),
MACs: slices.Clone(supportedMACs),
KeyExchanges: slices.Clone(supportedKexAlgos),
HostKeys: slices.Clone(supportedHostKeyAlgos),
PublicKeyAuths: slices.Clone(supportedPubKeyAuthAlgos),
}
}
// InsecureAlgorithms returns algorithms currently implemented by this package
// and which have security issues.
func InsecureAlgorithms() Algorithms {
return Algorithms{
KeyExchanges: slices.Clone(insecureKexAlgos),
Ciphers: slices.Clone(insecureCiphers),
MACs: slices.Clone(insecureMACs),
HostKeys: slices.Clone(insecureHostKeyAlgos),
PublicKeyAuths: slices.Clone(insecurePubKeyAuthAlgos),
}
} }
var supportedCompressions = []string{compressionNone} var supportedCompressions = []string{compressionNone}
// hashFuncs keeps the mapping of supported signature algorithms to their
// respective hashes needed for signing and verification.
var hashFuncs = map[string]crypto.Hash{
KeyAlgoRSA: crypto.SHA1,
KeyAlgoRSASHA256: crypto.SHA256,
KeyAlgoRSASHA512: crypto.SHA512,
KeyAlgoDSA: crypto.SHA1,
KeyAlgoECDSA256: crypto.SHA256,
KeyAlgoECDSA384: crypto.SHA384,
KeyAlgoECDSA521: crypto.SHA512,
// KeyAlgoED25519 doesn't pre-hash.
KeyAlgoSKECDSA256: crypto.SHA256,
KeyAlgoSKED25519: crypto.SHA256,
}
// algorithmsForKeyFormat returns the supported signature algorithms for a given // algorithmsForKeyFormat returns the supported signature algorithms for a given
// public key format (PublicKey.Type), in order of preference. See RFC 8332, // public key format (PublicKey.Type), in order of preference. See RFC 8332,
// Section 2. See also the note in sendKexInit on backwards compatibility. // Section 2. See also the note in sendKexInit on backwards compatibility.
@ -120,11 +332,40 @@ func algorithmsForKeyFormat(keyFormat string) []string {
} }
} }
// keyFormatForAlgorithm returns the key format corresponding to the given
// signature algorithm. It returns an empty string if the signature algorithm is
// invalid or unsupported.
func keyFormatForAlgorithm(sigAlgo string) string {
switch sigAlgo {
case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512:
return KeyAlgoRSA
case CertAlgoRSAv01, CertAlgoRSASHA256v01, CertAlgoRSASHA512v01:
return CertAlgoRSAv01
case KeyAlgoED25519,
KeyAlgoSKED25519,
KeyAlgoSKECDSA256,
KeyAlgoECDSA256,
KeyAlgoECDSA384,
KeyAlgoECDSA521,
InsecureKeyAlgoDSA,
InsecureCertAlgoDSAv01,
CertAlgoECDSA256v01,
CertAlgoECDSA384v01,
CertAlgoECDSA521v01,
CertAlgoSKECDSA256v01,
CertAlgoED25519v01,
CertAlgoSKED25519v01:
return sigAlgo
default:
return ""
}
}
// isRSA returns whether algo is a supported RSA algorithm, including certificate // isRSA returns whether algo is a supported RSA algorithm, including certificate
// algorithms. // algorithms.
func isRSA(algo string) bool { func isRSA(algo string) bool {
algos := algorithmsForKeyFormat(KeyAlgoRSA) algos := algorithmsForKeyFormat(KeyAlgoRSA)
return contains(algos, underlyingAlgo(algo)) return slices.Contains(algos, underlyingAlgo(algo))
} }
func isRSACert(algo string) bool { func isRSACert(algo string) bool {
@ -135,18 +376,6 @@ func isRSACert(algo string) bool {
return isRSA(algo) return isRSA(algo)
} }
// supportedPubKeyAuthAlgos specifies the supported client public key
// authentication algorithms. Note that this doesn't include certificate types
// since those use the underlying algorithm. This list is sent to the client if
// it supports the server-sig-algs extension. Order is irrelevant.
var supportedPubKeyAuthAlgos = []string{
KeyAlgoED25519,
KeyAlgoSKED25519, KeyAlgoSKECDSA256,
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA,
KeyAlgoDSA,
}
// unexpectedMessageError results when the SSH message that we received didn't // unexpectedMessageError results when the SSH message that we received didn't
// match what we wanted. // match what we wanted.
func unexpectedMessageError(expected, got uint8) error { func unexpectedMessageError(expected, got uint8) error {
@ -158,7 +387,7 @@ func parseError(tag uint8) error {
return fmt.Errorf("ssh: parse error in message type %d", tag) return fmt.Errorf("ssh: parse error in message type %d", tag)
} }
func findCommon(what string, client []string, server []string) (common string, err error) { func findCommon(what string, client []string, server []string, isClient bool) (string, error) {
for _, c := range client { for _, c := range client {
for _, s := range server { for _, s := range server {
if c == s { if c == s {
@ -166,23 +395,49 @@ func findCommon(what string, client []string, server []string) (common string, e
} }
} }
} }
return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) err := &AlgorithmNegotiationError{
What: what,
}
if isClient {
err.SupportedAlgorithms = client
err.RequestedAlgorithms = server
} else {
err.SupportedAlgorithms = server
err.RequestedAlgorithms = client
}
return "", err
} }
// directionAlgorithms records algorithm choices in one direction (either read or write) // AlgorithmNegotiationError defines the error returned if the client and the
type directionAlgorithms struct { // server cannot agree on an algorithm for key exchange, host key, cipher, MAC.
type AlgorithmNegotiationError struct {
What string
// RequestedAlgorithms lists the algorithms supported by the peer.
RequestedAlgorithms []string
// SupportedAlgorithms lists the algorithms supported on our side.
SupportedAlgorithms []string
}
func (a *AlgorithmNegotiationError) Error() string {
return fmt.Sprintf("ssh: no common algorithm for %s; we offered: %v, peer offered: %v",
a.What, a.SupportedAlgorithms, a.RequestedAlgorithms)
}
// DirectionAlgorithms defines the algorithms negotiated in one direction
// (either read or write).
type DirectionAlgorithms struct {
Cipher string Cipher string
MAC string MAC string
Compression string compression string
} }
// rekeyBytes returns a rekeying intervals in bytes. // rekeyBytes returns a rekeying intervals in bytes.
func (a *directionAlgorithms) rekeyBytes() int64 { func (a *DirectionAlgorithms) rekeyBytes() int64 {
// According to RFC 4344 block ciphers should rekey after // According to RFC 4344 block ciphers should rekey after
// 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is
// 128. // 128.
switch a.Cipher { switch a.Cipher {
case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcm128CipherID, gcm256CipherID, aes128cbcID: case CipherAES128CTR, CipherAES192CTR, CipherAES256CTR, CipherAES128GCM, CipherAES256GCM, InsecureCipherAES128CBC:
return 16 * (1 << 32) return 16 * (1 << 32)
} }
@ -192,66 +447,59 @@ func (a *directionAlgorithms) rekeyBytes() int64 {
} }
var aeadCiphers = map[string]bool{ var aeadCiphers = map[string]bool{
gcm128CipherID: true, CipherAES128GCM: true,
gcm256CipherID: true, CipherAES256GCM: true,
chacha20Poly1305ID: true, CipherChaCha20Poly1305: true,
} }
type algorithms struct { func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *NegotiatedAlgorithms, err error) {
kex string result := &NegotiatedAlgorithms{}
hostKey string
w directionAlgorithms
r directionAlgorithms
}
func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { result.KeyExchange, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos, isClient)
result := &algorithms{}
result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
if err != nil { if err != nil {
return return
} }
result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) result.HostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos, isClient)
if err != nil { if err != nil {
return return
} }
stoc, ctos := &result.w, &result.r stoc, ctos := &result.Write, &result.Read
if isClient { if isClient {
ctos, stoc = stoc, ctos ctos, stoc = stoc, ctos
} }
ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer, isClient)
if err != nil { if err != nil {
return return
} }
stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient, isClient)
if err != nil { if err != nil {
return return
} }
if !aeadCiphers[ctos.Cipher] { if !aeadCiphers[ctos.Cipher] {
ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer, isClient)
if err != nil { if err != nil {
return return
} }
} }
if !aeadCiphers[stoc.Cipher] { if !aeadCiphers[stoc.Cipher] {
stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient, isClient)
if err != nil { if err != nil {
return return
} }
} }
ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) ctos.compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer, isClient)
if err != nil { if err != nil {
return return
} }
stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) stoc.compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient, isClient)
if err != nil { if err != nil {
return return
} }
@ -297,7 +545,7 @@ func (c *Config) SetDefaults() {
c.Rand = rand.Reader c.Rand = rand.Reader
} }
if c.Ciphers == nil { if c.Ciphers == nil {
c.Ciphers = preferredCiphers c.Ciphers = defaultCiphers
} }
var ciphers []string var ciphers []string
for _, c := range c.Ciphers { for _, c := range c.Ciphers {
@ -309,19 +557,22 @@ func (c *Config) SetDefaults() {
c.Ciphers = ciphers c.Ciphers = ciphers
if c.KeyExchanges == nil { if c.KeyExchanges == nil {
c.KeyExchanges = preferredKexAlgos c.KeyExchanges = defaultKexAlgos
} }
var kexs []string var kexs []string
for _, k := range c.KeyExchanges { for _, k := range c.KeyExchanges {
if kexAlgoMap[k] != nil { if kexAlgoMap[k] != nil {
// Ignore the KEX if we have no kexAlgoMap definition. // Ignore the KEX if we have no kexAlgoMap definition.
kexs = append(kexs, k) kexs = append(kexs, k)
if k == KeyExchangeCurve25519 && !slices.Contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) {
kexs = append(kexs, keyExchangeCurve25519LibSSH)
}
} }
} }
c.KeyExchanges = kexs c.KeyExchanges = kexs
if c.MACs == nil { if c.MACs == nil {
c.MACs = supportedMACs c.MACs = defaultMACs
} }
var macs []string var macs []string
for _, m := range c.MACs { for _, m := range c.MACs {

View File

@ -74,6 +74,13 @@ type Conn interface {
// Disconnect // Disconnect
} }
// AlgorithmsConnMetadata is a ConnMetadata that can return the algorithms
// negotiated between client and server.
type AlgorithmsConnMetadata interface {
ConnMetadata
Algorithms() NegotiatedAlgorithms
}
// DiscardRequests consumes and rejects all requests from the // DiscardRequests consumes and rejects all requests from the
// passed-in channel. // passed-in channel.
func DiscardRequests(in <-chan *Request) { func DiscardRequests(in <-chan *Request) {
@ -106,6 +113,7 @@ type sshConn struct {
sessionID []byte sessionID []byte
clientVersion []byte clientVersion []byte
serverVersion []byte serverVersion []byte
algorithms NegotiatedAlgorithms
} }
func dup(src []byte) []byte { func dup(src []byte) []byte {
@ -141,3 +149,7 @@ func (c *sshConn) ClientVersion() []byte {
func (c *sshConn) ServerVersion() []byte { func (c *sshConn) ServerVersion() []byte {
return dup(c.serverVersion) return dup(c.serverVersion)
} }
func (c *sshConn) Algorithms() NegotiatedAlgorithms {
return c.algorithms
}

View File

@ -16,8 +16,19 @@ References:
[PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD [PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
[SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01
[FIPS 140-3 mode]: https://go.dev/doc/security/fips140
This package does not fall under the stability promise of the Go language itself, This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise. so its API may be changed when pressing needs arise.
# FIPS 140-3 mode
When the program is in [FIPS 140-3 mode], this package behaves as if only SP
800-140C and SP 800-140D approved cipher suites, signature algorithms,
certificate public key types and sizes, and key exchange and derivation
algorithms were implemented. Others are silently ignored and not negotiated, or
rejected. This set may depend on the algorithms supported by the FIPS 140-3 Go
Cryptographic Module selected with GOFIPS140, and may change across Go versions.
*/ */
package ssh package ssh

View File

@ -10,6 +10,7 @@ import (
"io" "io"
"log" "log"
"net" "net"
"slices"
"strings" "strings"
"sync" "sync"
) )
@ -38,7 +39,7 @@ type keyingTransport interface {
// prepareKeyChange sets up a key change. The key change for a // prepareKeyChange sets up a key change. The key change for a
// direction will be effected if a msgNewKeys message is sent // direction will be effected if a msgNewKeys message is sent
// or received. // or received.
prepareKeyChange(*algorithms, *kexResult) error prepareKeyChange(*NegotiatedAlgorithms, *kexResult) error
// setStrictMode sets the strict KEX mode, notably triggering // setStrictMode sets the strict KEX mode, notably triggering
// sequence number resets on sending or receiving msgNewKeys. // sequence number resets on sending or receiving msgNewKeys.
@ -115,7 +116,7 @@ type handshakeTransport struct {
bannerCallback BannerCallback bannerCallback BannerCallback
// Algorithms agreed in the last key exchange. // Algorithms agreed in the last key exchange.
algorithms *algorithms algorithms *NegotiatedAlgorithms
// Counters exclusively owned by readLoop. // Counters exclusively owned by readLoop.
readPacketsLeft uint32 readPacketsLeft uint32
@ -164,7 +165,7 @@ func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byt
if config.HostKeyAlgorithms != nil { if config.HostKeyAlgorithms != nil {
t.hostKeyAlgorithms = config.HostKeyAlgorithms t.hostKeyAlgorithms = config.HostKeyAlgorithms
} else { } else {
t.hostKeyAlgorithms = supportedHostKeyAlgos t.hostKeyAlgorithms = defaultHostKeyAlgos
} }
go t.readLoop() go t.readLoop()
go t.kexLoop() go t.kexLoop()
@ -184,6 +185,10 @@ func (t *handshakeTransport) getSessionID() []byte {
return t.sessionID return t.sessionID
} }
func (t *handshakeTransport) getAlgorithms() NegotiatedAlgorithms {
return *t.algorithms
}
// waitSession waits for the session to be established. This should be // waitSession waits for the session to be established. This should be
// the first thing to call after instantiating handshakeTransport. // the first thing to call after instantiating handshakeTransport.
func (t *handshakeTransport) waitSession() error { func (t *handshakeTransport) waitSession() error {
@ -290,7 +295,7 @@ func (t *handshakeTransport) resetWriteThresholds() {
if t.config.RekeyThreshold > 0 { if t.config.RekeyThreshold > 0 {
t.writeBytesLeft = int64(t.config.RekeyThreshold) t.writeBytesLeft = int64(t.config.RekeyThreshold)
} else if t.algorithms != nil { } else if t.algorithms != nil {
t.writeBytesLeft = t.algorithms.w.rekeyBytes() t.writeBytesLeft = t.algorithms.Write.rekeyBytes()
} else { } else {
t.writeBytesLeft = 1 << 30 t.writeBytesLeft = 1 << 30
} }
@ -407,7 +412,7 @@ func (t *handshakeTransport) resetReadThresholds() {
if t.config.RekeyThreshold > 0 { if t.config.RekeyThreshold > 0 {
t.readBytesLeft = int64(t.config.RekeyThreshold) t.readBytesLeft = int64(t.config.RekeyThreshold)
} else if t.algorithms != nil { } else if t.algorithms != nil {
t.readBytesLeft = t.algorithms.r.rekeyBytes() t.readBytesLeft = t.algorithms.Read.rekeyBytes()
} else { } else {
t.readBytesLeft = 1 << 30 t.readBytesLeft = 1 << 30
} }
@ -523,7 +528,7 @@ func (t *handshakeTransport) sendKexInit() error {
switch s := k.(type) { switch s := k.(type) {
case MultiAlgorithmSigner: case MultiAlgorithmSigner:
for _, algo := range algorithmsForKeyFormat(keyFormat) { for _, algo := range algorithmsForKeyFormat(keyFormat) {
if contains(s.Algorithms(), underlyingAlgo(algo)) { if slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo)
} }
} }
@ -675,7 +680,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
return err return err
} }
if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { if t.sessionID == nil && ((isClient && slices.Contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && slices.Contains(clientInit.KexAlgos, kexStrictClient))) {
t.strictMode = true t.strictMode = true
if err := t.conn.setStrictMode(); err != nil { if err := t.conn.setStrictMode(); err != nil {
return err return err
@ -700,9 +705,9 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
} }
} }
kex, ok := kexAlgoMap[t.algorithms.kex] kex, ok := kexAlgoMap[t.algorithms.KeyExchange]
if !ok { if !ok {
return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.KeyExchange)
} }
var result *kexResult var result *kexResult
@ -732,7 +737,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
// On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO
// message with the server-sig-algs extension if the client supports it. See // message with the server-sig-algs extension if the client supports it. See
// RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9. // RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9.
if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { if !isClient && firstKeyExchange && slices.Contains(clientInit.KexAlgos, "ext-info-c") {
supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",") supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",")
extInfo := &extInfoMsg{ extInfo := &extInfoMsg{
NumExtensions: 2, NumExtensions: 2,
@ -786,7 +791,7 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a
func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
for _, k := range hostKeys { for _, k := range hostKeys {
if s, ok := k.(MultiAlgorithmSigner); ok { if s, ok := k.(MultiAlgorithmSigner); ok {
if !contains(s.Algorithms(), underlyingAlgo(algo)) { if !slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
continue continue
} }
} }
@ -809,12 +814,12 @@ func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
} }
func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) {
hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) hostKey := pickHostKey(t.hostKeys, t.algorithms.HostKey)
if hostKey == nil { if hostKey == nil {
return nil, errors.New("ssh: internal error: negotiated unsupported signature type") return nil, errors.New("ssh: internal error: negotiated unsupported signature type")
} }
r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.HostKey)
return r, err return r, err
} }
@ -829,7 +834,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (
return nil, err return nil, err
} }
if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { if err := verifyHostKeySignature(hostKey, t.algorithms.HostKey, result); err != nil {
return nil, err return nil, err
} }

151
vendor/golang.org/x/crypto/ssh/kex.go generated vendored
View File

@ -8,33 +8,31 @@ import (
"crypto" "crypto"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/elliptic" "crypto/elliptic"
"crypto/fips140"
"crypto/rand" "crypto/rand"
"crypto/subtle"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math/big" "math/big"
"slices"
"golang.org/x/crypto/curve25519" "golang.org/x/crypto/curve25519"
) )
const ( const (
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" // This is the group called diffie-hellman-group1-sha1 in RFC 4253 and
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" // Oakley Group 2 in RFC 2409.
kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" oakleyGroup2 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF"
kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512" // This is the group called diffie-hellman-group14-sha1 in RFC 4253 and
kexAlgoECDH256 = "ecdh-sha2-nistp256" // Oakley Group 14 in RFC 3526.
kexAlgoECDH384 = "ecdh-sha2-nistp384" oakleyGroup14 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF"
kexAlgoECDH521 = "ecdh-sha2-nistp521" // This is the group called diffie-hellman-group15-sha512 in RFC 8268 and
kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" // Oakley Group 15 in RFC 3526.
kexAlgoCurve25519SHA256 = "curve25519-sha256" oakleyGroup15 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF"
// This is the group called diffie-hellman-group16-sha512 in RFC 8268 and
// For the following kex only the client half contains a production // Oakley Group 16 in RFC 3526.
// ready implementation. The server half only consists of a minimal oakleyGroup16 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF"
// implementation to satisfy the automated tests.
kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1"
kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256"
) )
// kexResult captures the outcome of a key exchange. // kexResult captures the outcome of a key exchange.
@ -399,56 +397,64 @@ func ecHash(curve elliptic.Curve) crypto.Hash {
return crypto.SHA512 return crypto.SHA512
} }
// kexAlgoMap defines the supported KEXs. KEXs not included are not supported
// and will not be negotiated, even if explicitly configured. When FIPS mode is
// enabled, only FIPS-approved algorithms are included.
var kexAlgoMap = map[string]kexAlgorithm{} var kexAlgoMap = map[string]kexAlgorithm{}
func init() { func init() {
// This is the group called diffie-hellman-group1-sha1 in // mlkem768x25519-sha256 we'll work with fips140=on but not fips140=only
// RFC 4253 and Oakley Group 2 in RFC 2409. // until Go 1.26.
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{}
kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()}
kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()}
kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()}
if fips140.Enabled() {
defaultKexAlgos = slices.DeleteFunc(defaultKexAlgos, func(algo string) bool {
_, ok := kexAlgoMap[algo]
return !ok
})
return
}
p, _ := new(big.Int).SetString(oakleyGroup2, 16)
kexAlgoMap[InsecureKeyExchangeDH1SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2), g: new(big.Int).SetInt64(2),
p: p, p: p,
pMinus1: new(big.Int).Sub(p, bigOne), pMinus1: new(big.Int).Sub(p, bigOne),
hashFunc: crypto.SHA1, hashFunc: crypto.SHA1,
} }
// This are the groups called diffie-hellman-group14-sha1 and p, _ = new(big.Int).SetString(oakleyGroup14, 16)
// diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268,
// and Oakley Group 14 in RFC 3526.
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
group14 := &dhGroup{ group14 := &dhGroup{
g: new(big.Int).SetInt64(2), g: new(big.Int).SetInt64(2),
p: p, p: p,
pMinus1: new(big.Int).Sub(p, bigOne), pMinus1: new(big.Int).Sub(p, bigOne),
} }
kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ kexAlgoMap[InsecureKeyExchangeDH14SHA1] = &dhGroup{
g: group14.g, p: group14.p, pMinus1: group14.pMinus1, g: group14.g, p: group14.p, pMinus1: group14.pMinus1,
hashFunc: crypto.SHA1, hashFunc: crypto.SHA1,
} }
kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ kexAlgoMap[KeyExchangeDH14SHA256] = &dhGroup{
g: group14.g, p: group14.p, pMinus1: group14.pMinus1, g: group14.g, p: group14.p, pMinus1: group14.pMinus1,
hashFunc: crypto.SHA256, hashFunc: crypto.SHA256,
} }
// This is the group called diffie-hellman-group16-sha512 in RFC p, _ = new(big.Int).SetString(oakleyGroup16, 16)
// 8268 and Oakley Group 16 in RFC 3526.
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH16SHA512] = &dhGroup{ kexAlgoMap[KeyExchangeDH16SHA512] = &dhGroup{
g: new(big.Int).SetInt64(2), g: new(big.Int).SetInt64(2),
p: p, p: p,
pMinus1: new(big.Int).Sub(p, bigOne), pMinus1: new(big.Int).Sub(p, bigOne),
hashFunc: crypto.SHA512, hashFunc: crypto.SHA512,
} }
kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[KeyExchangeCurve25519] = &curve25519sha256{}
kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[keyExchangeCurve25519LibSSH] = &curve25519sha256{}
kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} kexAlgoMap[InsecureKeyExchangeDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1}
kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} kexAlgoMap[KeyExchangeDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256}
kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{}
kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1}
kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256}
} }
// curve25519sha256 implements the curve25519-sha256 (formerly known as // curve25519sha256 implements the curve25519-sha256 (formerly known as
@ -464,15 +470,17 @@ func (kp *curve25519KeyPair) generate(rand io.Reader) error {
if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
return err return err
} }
curve25519.ScalarBaseMult(&kp.pub, &kp.priv) p, err := curve25519.X25519(kp.priv[:], curve25519.Basepoint)
if err != nil {
return fmt.Errorf("curve25519: %w", err)
}
if len(p) != 32 {
return fmt.Errorf("curve25519: internal error: X25519 returned %d bytes, expected 32", len(p))
}
copy(kp.pub[:], p)
return nil return nil
} }
// curve25519Zeros is just an array of 32 zero bytes so that we have something
// convenient to compare against in order to reject curve25519 points with the
// wrong order.
var curve25519Zeros [32]byte
func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
var kp curve25519KeyPair var kp curve25519KeyPair
if err := kp.generate(rand); err != nil { if err := kp.generate(rand); err != nil {
@ -495,11 +503,9 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh
return nil, errors.New("ssh: peer's curve25519 public value has wrong length") return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
} }
var servPub, secret [32]byte secret, err := curve25519.X25519(kp.priv[:], reply.EphemeralPubKey)
copy(servPub[:], reply.EphemeralPubKey) if err != nil {
curve25519.ScalarMult(&secret, &kp.priv, &servPub) return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err)
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
} }
h := crypto.SHA256.New() h := crypto.SHA256.New()
@ -541,11 +547,9 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
return nil, err return nil, err
} }
var clientPub, secret [32]byte secret, err := curve25519.X25519(kp.priv[:], kexInit.ClientPubKey)
copy(clientPub[:], kexInit.ClientPubKey) if err != nil {
curve25519.ScalarMult(&secret, &kp.priv, &clientPub) return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err)
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
} }
hostKeyBytes := priv.PublicKey().Marshal() hostKeyBytes := priv.PublicKey().Marshal()
@ -602,7 +606,7 @@ func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshak
// Send GexRequest // Send GexRequest
kexDHGexRequest := kexDHGexRequestMsg{ kexDHGexRequest := kexDHGexRequestMsg{
MinBits: dhGroupExchangeMinimumBits, MinBits: dhGroupExchangeMinimumBits,
PreferedBits: dhGroupExchangePreferredBits, PreferredBits: dhGroupExchangePreferredBits,
MaxBits: dhGroupExchangeMaximumBits, MaxBits: dhGroupExchangeMaximumBits,
} }
if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil {
@ -690,9 +694,7 @@ func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshak
} }
// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. // Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256.
// func (gex *dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
// This is a minimal implementation to satisfy the automated tests.
func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
// Receive GexRequest // Receive GexRequest
packet, err := c.readPacket() packet, err := c.readPacket()
if err != nil { if err != nil {
@ -702,13 +704,32 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
if err = Unmarshal(packet, &kexDHGexRequest); err != nil { if err = Unmarshal(packet, &kexDHGexRequest); err != nil {
return return
} }
// We check that the request received is valid and that the MaxBits
// requested are at least equal to our supported minimum. This is the same
// check done in OpenSSH:
// https://github.com/openssh/openssh-portable/blob/80a2f64b/kexgexs.c#L94
//
// Furthermore, we also check that the required MinBits are less than or
// equal to 4096 because we can use up to Oakley Group 16.
if kexDHGexRequest.MaxBits < kexDHGexRequest.MinBits || kexDHGexRequest.PreferredBits < kexDHGexRequest.MinBits ||
kexDHGexRequest.MaxBits < kexDHGexRequest.PreferredBits || kexDHGexRequest.MaxBits < dhGroupExchangeMinimumBits ||
kexDHGexRequest.MinBits > 4096 {
return nil, fmt.Errorf("ssh: DH GEX request out of range, min: %d, max: %d, preferred: %d", kexDHGexRequest.MinBits,
kexDHGexRequest.MaxBits, kexDHGexRequest.PreferredBits)
}
var p *big.Int
// We hardcode sending Oakley Group 14 (2048 bits), Oakley Group 15 (3072
// bits) or Oakley Group 16 (4096 bits), based on the requested max size.
if kexDHGexRequest.MaxBits < 3072 {
p, _ = new(big.Int).SetString(oakleyGroup14, 16)
} else if kexDHGexRequest.MaxBits < 4096 {
p, _ = new(big.Int).SetString(oakleyGroup15, 16)
} else {
p, _ = new(big.Int).SetString(oakleyGroup16, 16)
}
// Send GexGroup
// This is the group called diffie-hellman-group14-sha1 in RFC
// 4253 and Oakley Group 14 in RFC 3526.
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
g := big.NewInt(2) g := big.NewInt(2)
msg := &kexDHGexGroupMsg{ msg := &kexDHGexGroupMsg{
P: p, P: p,
G: g, G: g,
@ -746,9 +767,9 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
h := gex.hashFunc.New() h := gex.hashFunc.New()
magics.write(h) magics.write(h)
writeString(h, hostKeyBytes) writeString(h, hostKeyBytes)
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) binary.Write(h, binary.BigEndian, kexDHGexRequest.MinBits)
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) binary.Write(h, binary.BigEndian, kexDHGexRequest.PreferredBits)
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) binary.Write(h, binary.BigEndian, kexDHGexRequest.MaxBits)
writeInt(h, p) writeInt(h, p)
writeInt(h, g) writeInt(h, g)
writeInt(h, kexDHGexInit.X) writeInt(h, kexDHGexInit.X)

View File

@ -27,6 +27,7 @@ import (
"fmt" "fmt"
"io" "io"
"math/big" "math/big"
"slices"
"strings" "strings"
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
@ -37,7 +38,12 @@ import (
// arguments. // arguments.
const ( const (
KeyAlgoRSA = "ssh-rsa" KeyAlgoRSA = "ssh-rsa"
KeyAlgoDSA = "ssh-dss" // Deprecated: DSA is only supported at insecure key sizes, and was removed
// from major implementations.
KeyAlgoDSA = InsecureKeyAlgoDSA
// Deprecated: DSA is only supported at insecure key sizes, and was removed
// from major implementations.
InsecureKeyAlgoDSA = "ssh-dss"
KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com"
KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
@ -67,7 +73,7 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
switch algo { switch algo {
case KeyAlgoRSA: case KeyAlgoRSA:
return parseRSA(in) return parseRSA(in)
case KeyAlgoDSA: case InsecureKeyAlgoDSA:
return parseDSA(in) return parseDSA(in)
case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
return parseECDSA(in) return parseECDSA(in)
@ -77,13 +83,18 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
return parseED25519(in) return parseED25519(in)
case KeyAlgoSKED25519: case KeyAlgoSKED25519:
return parseSKEd25519(in) return parseSKEd25519(in)
case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: case CertAlgoRSAv01, InsecureCertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
cert, err := parseCert(in, certKeyAlgoNames[algo]) cert, err := parseCert(in, certKeyAlgoNames[algo])
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
return cert, nil, nil return cert, nil, nil
} }
if keyFormat := keyFormatForAlgorithm(algo); keyFormat != "" {
return nil, nil, fmt.Errorf("ssh: signature algorithm %q isn't a key format; key is malformed and should be re-encoded with type %q",
algo, keyFormat)
}
return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
} }
@ -186,9 +197,10 @@ func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey
return "", nil, nil, "", nil, io.EOF return "", nil, nil, "", nil, io.EOF
} }
// ParseAuthorizedKey parses a public key from an authorized_keys // ParseAuthorizedKey parses a public key from an authorized_keys file used in
// file used in OpenSSH according to the sshd(8) manual page. // OpenSSH according to the sshd(8) manual page. Invalid lines are ignored.
func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
var lastErr error
for len(in) > 0 { for len(in) > 0 {
end := bytes.IndexByte(in, '\n') end := bytes.IndexByte(in, '\n')
if end != -1 { if end != -1 {
@ -217,6 +229,8 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
return out, comment, options, rest, nil return out, comment, options, rest, nil
} else {
lastErr = err
} }
// No key type recognised. Maybe there's an options field at // No key type recognised. Maybe there's an options field at
@ -259,16 +273,22 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
options = candidateOptions options = candidateOptions
return out, comment, options, rest, nil return out, comment, options, rest, nil
} else {
lastErr = err
} }
in = rest in = rest
continue continue
} }
if lastErr != nil {
return nil, "", nil, nil, fmt.Errorf("ssh: no key found; last parsing error for ignored line: %w", lastErr)
}
return nil, "", nil, nil, errors.New("ssh: no key found") return nil, "", nil, nil, errors.New("ssh: no key found")
} }
// ParsePublicKey parses an SSH public key formatted for use in // ParsePublicKey parses an SSH public key or certificate formatted for use in
// the SSH wire protocol according to RFC 4253, section 6.6. // the SSH wire protocol according to RFC 4253, section 6.6.
func ParsePublicKey(in []byte) (out PublicKey, err error) { func ParsePublicKey(in []byte) (out PublicKey, err error) {
algo, in, ok := parseString(in) algo, in, ok := parseString(in)
@ -390,11 +410,11 @@ func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (Multi
} }
for _, algo := range algorithms { for _, algo := range algorithms {
if !contains(supportedAlgos, algo) { if !slices.Contains(supportedAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q", return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q",
algo, signer.PublicKey().Type()) algo, signer.PublicKey().Type())
} }
if !contains(signerAlgos, algo) { if !slices.Contains(signerAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo) return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo)
} }
} }
@ -481,10 +501,13 @@ func (r *rsaPublicKey) Marshal() []byte {
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
supportedAlgos := algorithmsForKeyFormat(r.Type()) supportedAlgos := algorithmsForKeyFormat(r.Type())
if !contains(supportedAlgos, sig.Format) { if !slices.Contains(supportedAlgos, sig.Format) {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
} }
hash := hashFuncs[sig.Format] hash, err := hashFunc(sig.Format)
if err != nil {
return err
}
h := hash.New() h := hash.New()
h.Write(data) h.Write(data)
digest := h.Sum(nil) digest := h.Sum(nil)
@ -601,7 +624,11 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() { if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
} }
h := hashFuncs[sig.Format].New() hash, err := hashFunc(sig.Format)
if err != nil {
return err
}
h := hash.New()
h.Write(data) h.Write(data)
digest := h.Sum(nil) digest := h.Sum(nil)
@ -646,7 +673,11 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
} }
h := hashFuncs[k.PublicKey().Type()].New() hash, err := hashFunc(k.PublicKey().Type())
if err != nil {
return nil, err
}
h := hash.New()
h.Write(data) h.Write(data)
digest := h.Sum(nil) digest := h.Sum(nil)
r, s, err := dsa.Sign(rand, k.PrivateKey, digest) r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
@ -796,8 +827,11 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() { if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
} }
hash, err := hashFunc(sig.Format)
h := hashFuncs[sig.Format].New() if err != nil {
return err
}
h := hash.New()
h.Write(data) h.Write(data)
digest := h.Sum(nil) digest := h.Sum(nil)
@ -900,8 +934,11 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() { if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
} }
hash, err := hashFunc(sig.Format)
h := hashFuncs[sig.Format].New() if err != nil {
return err
}
h := hash.New()
h.Write([]byte(k.application)) h.Write([]byte(k.application))
appDigest := h.Sum(nil) appDigest := h.Sum(nil)
@ -1004,7 +1041,11 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("invalid size %d for Ed25519 public key", l) return fmt.Errorf("invalid size %d for Ed25519 public key", l)
} }
h := hashFuncs[sig.Format].New() hash, err := hashFunc(sig.Format)
if err != nil {
return err
}
h := hash.New()
h.Write([]byte(k.application)) h.Write([]byte(k.application))
appDigest := h.Sum(nil) appDigest := h.Sum(nil)
@ -1107,11 +1148,14 @@ func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
algorithm = s.pubKey.Type() algorithm = s.pubKey.Type()
} }
if !contains(s.Algorithms(), algorithm) { if !slices.Contains(s.Algorithms(), algorithm) {
return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
} }
hashFunc := hashFuncs[algorithm] hashFunc, err := hashFunc(algorithm)
if err != nil {
return nil, err
}
var digest []byte var digest []byte
if hashFunc != 0 { if hashFunc != 0 {
h := hashFunc.New() h := hashFunc.New()
@ -1446,6 +1490,7 @@ type openSSHEncryptedPrivateKey struct {
NumKeys uint32 NumKeys uint32
PubKey []byte PubKey []byte
PrivKeyBlock []byte PrivKeyBlock []byte
Rest []byte `ssh:"rest"`
} }
type openSSHPrivateKey struct { type openSSHPrivateKey struct {

View File

@ -7,11 +7,13 @@ package ssh
// Message authentication support // Message authentication support
import ( import (
"crypto/fips140"
"crypto/hmac" "crypto/hmac"
"crypto/sha1" "crypto/sha1"
"crypto/sha256" "crypto/sha256"
"crypto/sha512" "crypto/sha512"
"hash" "hash"
"slices"
) )
type macMode struct { type macMode struct {
@ -46,23 +48,37 @@ func (t truncatingMAC) Size() int {
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
var macModes = map[string]*macMode{ // macModes defines the supported MACs. MACs not included are not supported
"hmac-sha2-512-etm@openssh.com": {64, true, func(key []byte) hash.Hash { // and will not be negotiated, even if explicitly configured. When FIPS mode is
// enabled, only FIPS-approved algorithms are included.
var macModes = map[string]*macMode{}
func init() {
macModes[HMACSHA512ETM] = &macMode{64, true, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key) return hmac.New(sha512.New, key)
}}, }}
"hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { macModes[HMACSHA256ETM] = &macMode{32, true, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key) return hmac.New(sha256.New, key)
}}, }}
"hmac-sha2-512": {64, false, func(key []byte) hash.Hash { macModes[HMACSHA512] = &macMode{64, false, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key) return hmac.New(sha512.New, key)
}}, }}
"hmac-sha2-256": {32, false, func(key []byte) hash.Hash { macModes[HMACSHA256] = &macMode{32, false, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key) return hmac.New(sha256.New, key)
}}, }}
"hmac-sha1": {20, false, func(key []byte) hash.Hash {
if fips140.Enabled() {
defaultMACs = slices.DeleteFunc(defaultMACs, func(algo string) bool {
_, ok := macModes[algo]
return !ok
})
return
}
macModes[HMACSHA1] = &macMode{20, false, func(key []byte) hash.Hash {
return hmac.New(sha1.New, key) return hmac.New(sha1.New, key)
}}, }}
"hmac-sha1-96": {20, false, func(key []byte) hash.Hash { macModes[InsecureHMACSHA196] = &macMode{20, false, func(key []byte) hash.Hash {
return truncatingMAC{12, hmac.New(sha1.New, key)} return truncatingMAC{12, hmac.New(sha1.New, key)}
}}, }}
} }

View File

@ -123,7 +123,7 @@ const msgKexDHGexRequest = 34
type kexDHGexRequestMsg struct { type kexDHGexRequestMsg struct {
MinBits uint32 `sshtype:"34"` MinBits uint32 `sshtype:"34"`
PreferedBits uint32 PreferredBits uint32
MaxBits uint32 MaxBits uint32
} }
@ -792,7 +792,7 @@ func marshalString(to []byte, s []byte) []byte {
return to[len(s):] return to[len(s):]
} }
var bigIntType = reflect.TypeOf((*big.Int)(nil)) var bigIntType = reflect.TypeFor[*big.Int]()
// Decode a packet into its corresponding message. // Decode a packet into its corresponding message.
func decode(packet []byte) (interface{}, error) { func decode(packet []byte) (interface{}, error) {

View File

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build go1.24
package ssh package ssh
import ( import (
@ -13,27 +11,10 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"runtime"
"slices"
"golang.org/x/crypto/curve25519" "golang.org/x/crypto/curve25519"
) )
const (
kexAlgoMLKEM768xCurve25519SHA256 = "mlkem768x25519-sha256"
)
func init() {
// After Go 1.24rc1 mlkem swapped the order of return values of Encapsulate.
// See #70950.
if runtime.Version() == "go1.24rc1" {
return
}
supportedKexAlgos = slices.Insert(supportedKexAlgos, 0, kexAlgoMLKEM768xCurve25519SHA256)
preferredKexAlgos = slices.Insert(preferredKexAlgos, 0, kexAlgoMLKEM768xCurve25519SHA256)
kexAlgoMap[kexAlgoMLKEM768xCurve25519SHA256] = &mlkem768WithCurve25519sha256{}
}
// mlkem768WithCurve25519sha256 implements the hybrid ML-KEM768 with // mlkem768WithCurve25519sha256 implements the hybrid ML-KEM768 with
// curve25519-sha256 key exchange method, as described by // curve25519-sha256 key exchange method, as described by
// draft-kampanakis-curdle-ssh-pq-ke-05 section 2.3.3. // draft-kampanakis-curdle-ssh-pq-ke-05 section 2.3.3.

View File

@ -10,6 +10,7 @@ import (
"fmt" "fmt"
"io" "io"
"net" "net"
"slices"
"strings" "strings"
) )
@ -43,6 +44,9 @@ type Permissions struct {
// pass data from the authentication callbacks to the server // pass data from the authentication callbacks to the server
// application layer. // application layer.
Extensions map[string]string Extensions map[string]string
// ExtraData allows to store user defined data.
ExtraData map[any]any
} }
type GSSAPIWithMICConfig struct { type GSSAPIWithMICConfig struct {
@ -126,6 +130,21 @@ type ServerConfig struct {
// Permissions.Extensions entry. // Permissions.Extensions entry.
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
// VerifiedPublicKeyCallback, if non-nil, is called after a client
// successfully confirms having control over a key that was previously
// approved by PublicKeyCallback. The permissions object passed to the
// callback is the one returned by PublicKeyCallback for the given public
// key and its ownership is transferred to the callback. The returned
// Permissions object can be the same object, optionally modified, or a
// completely new object. If VerifiedPublicKeyCallback is non-nil,
// PublicKeyCallback is not allowed to return a PartialSuccessError, which
// can instead be returned by VerifiedPublicKeyCallback.
//
// VerifiedPublicKeyCallback does not affect which authentication methods
// are included in the list of methods that can be attempted by the client.
VerifiedPublicKeyCallback func(conn ConnMetadata, key PublicKey, permissions *Permissions,
signatureAlgorithm string) (*Permissions, error)
// KeyboardInteractiveCallback, if non-nil, is called when // KeyboardInteractiveCallback, if non-nil, is called when
// keyboard-interactive authentication is selected (RFC // keyboard-interactive authentication is selected (RFC
// 4256). The client object's Challenge function should be // 4256). The client object's Challenge function should be
@ -243,22 +262,15 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha
fullConf.MaxAuthTries = 6 fullConf.MaxAuthTries = 6
} }
if len(fullConf.PublicKeyAuthAlgorithms) == 0 { if len(fullConf.PublicKeyAuthAlgorithms) == 0 {
fullConf.PublicKeyAuthAlgorithms = supportedPubKeyAuthAlgos fullConf.PublicKeyAuthAlgorithms = defaultPubKeyAuthAlgos
} else { } else {
for _, algo := range fullConf.PublicKeyAuthAlgorithms { for _, algo := range fullConf.PublicKeyAuthAlgorithms {
if !contains(supportedPubKeyAuthAlgos, algo) { if !slices.Contains(SupportedAlgorithms().PublicKeyAuths, algo) && !slices.Contains(InsecureAlgorithms().PublicKeyAuths, algo) {
c.Close() c.Close()
return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo) return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo)
} }
} }
} }
// Check if the config contains any unsupported key exchanges
for _, kex := range fullConf.KeyExchanges {
if _, ok := serverForbiddenKexAlgos[kex]; ok {
c.Close()
return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex)
}
}
s := &connection{ s := &connection{
sshConn: sshConn{conn: c}, sshConn: sshConn{conn: c},
@ -315,6 +327,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
// We just did the key change, so the session ID is established. // We just did the key change, so the session ID is established.
s.sessionID = s.transport.getSessionID() s.sessionID = s.transport.getSessionID()
s.algorithms = s.transport.getAlgorithms()
var packet []byte var packet []byte
if packet, err = s.transport.readPacket(); err != nil { if packet, err = s.transport.readPacket(); err != nil {
@ -637,7 +650,7 @@ userAuthLoop:
return nil, parseError(msgUserAuthRequest) return nil, parseError(msgUserAuthRequest)
} }
algo := string(algoBytes) algo := string(algoBytes)
if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) { if !slices.Contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
break break
} }
@ -658,6 +671,9 @@ userAuthLoop:
candidate.pubKeyData = pubKeyData candidate.pubKeyData = pubKeyData
candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey)
_, isPartialSuccessError := candidate.result.(*PartialSuccessError) _, isPartialSuccessError := candidate.result.(*PartialSuccessError)
if isPartialSuccessError && config.VerifiedPublicKeyCallback != nil {
return nil, errors.New("ssh: invalid library usage: PublicKeyCallback must not return partial success when VerifiedPublicKeyCallback is defined")
}
if (candidate.result == nil || isPartialSuccessError) && if (candidate.result == nil || isPartialSuccessError) &&
candidate.perms != nil && candidate.perms != nil &&
@ -701,7 +717,7 @@ userAuthLoop:
// ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public // ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public
// key type. The algorithm and public key type must be // key type. The algorithm and public key type must be
// consistent: both must be certificate algorithms, or neither. // consistent: both must be certificate algorithms, or neither.
if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) { if !slices.Contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q", authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q",
pubKey.Type(), algo) pubKey.Type(), algo)
break break
@ -711,7 +727,7 @@ userAuthLoop:
// algorithm name that corresponds to algo with // algorithm name that corresponds to algo with
// sig.Format. This is usually the same, but // sig.Format. This is usually the same, but
// for certs, the names differ. // for certs, the names differ.
if !contains(config.PublicKeyAuthAlgorithms, sig.Format) { if !slices.Contains(config.PublicKeyAuthAlgorithms, sig.Format) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break break
} }
@ -728,6 +744,12 @@ userAuthLoop:
authErr = candidate.result authErr = candidate.result
perms = candidate.perms perms = candidate.perms
if authErr == nil && config.VerifiedPublicKeyCallback != nil {
// Only call VerifiedPublicKeyCallback after the key has been accepted
// and successfully verified. If authErr is non-nil, the key is not
// considered verified and the callback must not run.
perms, authErr = config.VerifiedPublicKeyCallback(s, pubKey, perms, algo)
}
} }
case "gssapi-with-mic": case "gssapi-with-mic":
if authConfig.GSSAPIWithMICConfig == nil { if authConfig.GSSAPIWithMICConfig == nil {

View File

@ -106,6 +106,13 @@ func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) {
if !ok { if !ok {
return nil, errors.New("parse uint32 failed") return nil, errors.New("parse uint32 failed")
} }
// Each ASN.1 encoded OID must have a minimum
// of 2 bytes; 64 maximum mechanisms is an
// arbitrary, but reasonable ceiling.
const maxMechs = 64
if n > maxMechs || int(n)*2 > len(rest) {
return nil, errors.New("invalid mechanism count")
}
s := &userAuthRequestGSSAPI{ s := &userAuthRequestGSSAPI{
N: n, N: n,
OIDS: make([]asn1.ObjectIdentifier, n), OIDS: make([]asn1.ObjectIdentifier, n),
@ -122,7 +129,6 @@ func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) {
if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil {
return nil, err return nil, err
} }
} }
return s, nil return s, nil
} }

View File

@ -44,7 +44,7 @@ func (c *Client) ListenUnix(socketPath string) (net.Listener, error) {
if !ok { if !ok {
return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer")
} }
ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) ch := c.forwards.add("unix", socketPath)
return &unixListener{socketPath, c, ch}, nil return &unixListener{socketPath, c, ch}, nil
} }
@ -96,7 +96,7 @@ func (l *unixListener) Accept() (net.Conn, error) {
// Close closes the listener. // Close closes the listener.
func (l *unixListener) Close() error { func (l *unixListener) Close() error {
// this also closes the listener. // this also closes the listener.
l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) l.conn.forwards.remove("unix", l.socketPath)
m := streamLocalChannelForwardMsg{ m := streamLocalChannelForwardMsg{
l.socketPath, l.socketPath,
} }

View File

@ -11,6 +11,7 @@ import (
"io" "io"
"math/rand" "math/rand"
"net" "net"
"net/netip"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -22,14 +23,21 @@ import (
// the returned net.Listener. The listener must be serviced, or the // the returned net.Listener. The listener must be serviced, or the
// SSH connection may hang. // SSH connection may hang.
// N must be "tcp", "tcp4", "tcp6", or "unix". // N must be "tcp", "tcp4", "tcp6", or "unix".
//
// If the address is a hostname, it is sent to the remote peer as-is, without
// being resolved locally, and the Listener Addr method will return a zero IP.
func (c *Client) Listen(n, addr string) (net.Listener, error) { func (c *Client) Listen(n, addr string) (net.Listener, error) {
switch n { switch n {
case "tcp", "tcp4", "tcp6": case "tcp", "tcp4", "tcp6":
laddr, err := net.ResolveTCPAddr(n, addr) host, portStr, err := net.SplitHostPort(addr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return c.ListenTCP(laddr) port, err := strconv.ParseInt(portStr, 10, 32)
if err != nil {
return nil, err
}
return c.listenTCPInternal(host, int(port))
case "unix": case "unix":
return c.ListenUnix(addr) return c.ListenUnix(addr)
default: default:
@ -102,15 +110,24 @@ func (c *Client) handleForwards() {
// ListenTCP requests the remote peer open a listening socket // ListenTCP requests the remote peer open a listening socket
// on laddr. Incoming connections will be available by calling // on laddr. Incoming connections will be available by calling
// Accept on the returned net.Listener. // Accept on the returned net.Listener.
//
// ListenTCP accepts an IP address, to provide a hostname use [Client.Listen]
// with "tcp", "tcp4", or "tcp6" network instead.
func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
c.handleForwardsOnce.Do(c.handleForwards) c.handleForwardsOnce.Do(c.handleForwards)
if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
return c.autoPortListenWorkaround(laddr) return c.autoPortListenWorkaround(laddr)
} }
return c.listenTCPInternal(laddr.IP.String(), laddr.Port)
}
func (c *Client) listenTCPInternal(host string, port int) (net.Listener, error) {
c.handleForwardsOnce.Do(c.handleForwards)
m := channelForwardMsg{ m := channelForwardMsg{
laddr.IP.String(), host,
uint32(laddr.Port), uint32(port),
} }
// send message // send message
ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
@ -123,20 +140,33 @@ func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
// If the original port was 0, then the remote side will // If the original port was 0, then the remote side will
// supply a real port number in the response. // supply a real port number in the response.
if laddr.Port == 0 { if port == 0 {
var p struct { var p struct {
Port uint32 Port uint32
} }
if err := Unmarshal(resp, &p); err != nil { if err := Unmarshal(resp, &p); err != nil {
return nil, err return nil, err
} }
laddr.Port = int(p.Port) port = int(p.Port)
} }
// Construct a local address placeholder for the remote listener. If the
// original host is an IP address, preserve it so that Listener.Addr()
// reports the same IP. If the host is a hostname or cannot be parsed as an
// IP, fall back to IPv4zero. The port field is always set, even if the
// original port was 0, because in that case the remote server will assign
// one, allowing callers to determine which port was selected.
ip := net.IPv4zero
if parsed, err := netip.ParseAddr(host); err == nil {
ip = net.IP(parsed.AsSlice())
}
laddr := &net.TCPAddr{
IP: ip,
Port: port,
}
addr := net.JoinHostPort(host, strconv.FormatInt(int64(port), 10))
ch := c.forwards.add("tcp", addr)
// Register this forward, using the port number we obtained. return &tcpListener{laddr, addr, c, ch}, nil
ch := c.forwards.add(laddr)
return &tcpListener{laddr, c, ch}, nil
} }
// forwardList stores a mapping between remote // forwardList stores a mapping between remote
@ -149,7 +179,8 @@ type forwardList struct {
// forwardEntry represents an established mapping of a laddr on a // forwardEntry represents an established mapping of a laddr on a
// remote ssh server to a channel connected to a tcpListener. // remote ssh server to a channel connected to a tcpListener.
type forwardEntry struct { type forwardEntry struct {
laddr net.Addr addr string // host:port or socket path
network string // tcp or unix
c chan forward c chan forward
} }
@ -161,11 +192,12 @@ type forward struct {
raddr net.Addr // the raddr of the incoming connection raddr net.Addr // the raddr of the incoming connection
} }
func (l *forwardList) add(addr net.Addr) chan forward { func (l *forwardList) add(n, addr string) chan forward {
l.Lock() l.Lock()
defer l.Unlock() defer l.Unlock()
f := forwardEntry{ f := forwardEntry{
laddr: addr, addr: addr,
network: n,
c: make(chan forward, 1), c: make(chan forward, 1),
} }
l.entries = append(l.entries, f) l.entries = append(l.entries, f)
@ -185,17 +217,18 @@ func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
if port == 0 || port > 65535 { if port == 0 || port > 65535 {
return nil, fmt.Errorf("ssh: port number out of range: %d", port) return nil, fmt.Errorf("ssh: port number out of range: %d", port)
} }
ip := net.ParseIP(string(addr)) ip, err := netip.ParseAddr(addr)
if ip == nil { if err != nil {
return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
} }
return &net.TCPAddr{IP: ip, Port: int(port)}, nil return &net.TCPAddr{IP: net.IP(ip.AsSlice()), Port: int(port)}, nil
} }
func (l *forwardList) handleChannels(in <-chan NewChannel) { func (l *forwardList) handleChannels(in <-chan NewChannel) {
for ch := range in { for ch := range in {
var ( var (
laddr net.Addr addr string
network string
raddr net.Addr raddr net.Addr
err error err error
) )
@ -207,40 +240,34 @@ func (l *forwardList) handleChannels(in <-chan NewChannel) {
continue continue
} }
// RFC 4254 section 7.2 specifies that incoming // RFC 4254 section 7.2 specifies that incoming addresses should
// addresses should list the address, in string // list the address that was connected, in string format. It is the
// format. It is implied that this should be an IP // same address used in the tcpip-forward request. The originator
// address, as it would be impossible to connect to it // address is an IP address instead.
// otherwise. addr = net.JoinHostPort(payload.Addr, strconv.FormatUint(uint64(payload.Port), 10))
laddr, err = parseTCPAddr(payload.Addr, payload.Port)
if err != nil {
ch.Reject(ConnectionFailed, err.Error())
continue
}
raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort)
if err != nil { if err != nil {
ch.Reject(ConnectionFailed, err.Error()) ch.Reject(ConnectionFailed, err.Error())
continue continue
} }
network = "tcp"
case "forwarded-streamlocal@openssh.com": case "forwarded-streamlocal@openssh.com":
var payload forwardedStreamLocalPayload var payload forwardedStreamLocalPayload
if err = Unmarshal(ch.ExtraData(), &payload); err != nil { if err = Unmarshal(ch.ExtraData(), &payload); err != nil {
ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error())
continue continue
} }
laddr = &net.UnixAddr{ addr = payload.SocketPath
Name: payload.SocketPath,
Net: "unix",
}
raddr = &net.UnixAddr{ raddr = &net.UnixAddr{
Name: "@", Name: "@",
Net: "unix", Net: "unix",
} }
network = "unix"
default: default:
panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) panic(fmt.Errorf("ssh: unknown channel type %s", channelType))
} }
if ok := l.forward(laddr, raddr, ch); !ok { if ok := l.forward(network, addr, raddr, ch); !ok {
// Section 7.2, implementations MUST reject spurious incoming // Section 7.2, implementations MUST reject spurious incoming
// connections. // connections.
ch.Reject(Prohibited, "no forward for address") ch.Reject(Prohibited, "no forward for address")
@ -252,11 +279,11 @@ func (l *forwardList) handleChannels(in <-chan NewChannel) {
// remove removes the forward entry, and the channel feeding its // remove removes the forward entry, and the channel feeding its
// listener. // listener.
func (l *forwardList) remove(addr net.Addr) { func (l *forwardList) remove(n, addr string) {
l.Lock() l.Lock()
defer l.Unlock() defer l.Unlock()
for i, f := range l.entries { for i, f := range l.entries {
if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { if n == f.network && addr == f.addr {
l.entries = append(l.entries[:i], l.entries[i+1:]...) l.entries = append(l.entries[:i], l.entries[i+1:]...)
close(f.c) close(f.c)
return return
@ -274,11 +301,11 @@ func (l *forwardList) closeAll() {
l.entries = nil l.entries = nil
} }
func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { func (l *forwardList) forward(n, addr string, raddr net.Addr, ch NewChannel) bool {
l.Lock() l.Lock()
defer l.Unlock() defer l.Unlock()
for _, f := range l.entries { for _, f := range l.entries {
if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { if n == f.network && addr == f.addr {
f.c <- forward{newCh: ch, raddr: raddr} f.c <- forward{newCh: ch, raddr: raddr}
return true return true
} }
@ -288,6 +315,7 @@ func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool {
type tcpListener struct { type tcpListener struct {
laddr *net.TCPAddr laddr *net.TCPAddr
addr string
conn *Client conn *Client
in <-chan forward in <-chan forward
@ -314,13 +342,21 @@ func (l *tcpListener) Accept() (net.Conn, error) {
// Close closes the listener. // Close closes the listener.
func (l *tcpListener) Close() error { func (l *tcpListener) Close() error {
host, port, err := net.SplitHostPort(l.addr)
if err != nil {
return err
}
rport, err := strconv.ParseUint(port, 10, 32)
if err != nil {
return err
}
m := channelForwardMsg{ m := channelForwardMsg{
l.laddr.IP.String(), host,
uint32(l.laddr.Port), uint32(rport),
} }
// this also closes the listener. // this also closes the listener.
l.conn.forwards.remove(l.laddr) l.conn.forwards.remove("tcp", l.addr)
ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
if err == nil && !ok { if err == nil && !ok {
err = errors.New("ssh: cancel-tcpip-forward failed") err = errors.New("ssh: cancel-tcpip-forward failed")

View File

@ -8,6 +8,7 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"errors" "errors"
"fmt"
"io" "io"
"log" "log"
) )
@ -16,13 +17,6 @@ import (
// wire. No message decoding is done, to minimize the impact on timing. // wire. No message decoding is done, to minimize the impact on timing.
const debugTransport = false const debugTransport = false
const (
gcm128CipherID = "aes128-gcm@openssh.com"
gcm256CipherID = "aes256-gcm@openssh.com"
aes128cbcID = "aes128-cbc"
tripledescbcID = "3des-cbc"
)
// packetConn represents a transport that implements packet based // packetConn represents a transport that implements packet based
// operations. // operations.
type packetConn interface { type packetConn interface {
@ -92,14 +86,14 @@ func (t *transport) setInitialKEXDone() {
// prepareKeyChange sets up key material for a keychange. The key changes in // prepareKeyChange sets up key material for a keychange. The key changes in
// both directions are triggered by reading and writing a msgNewKey packet // both directions are triggered by reading and writing a msgNewKey packet
// respectively. // respectively.
func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { func (t *transport) prepareKeyChange(algs *NegotiatedAlgorithms, kexResult *kexResult) error {
ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) ciph, err := newPacketCipher(t.reader.dir, algs.Read, kexResult)
if err != nil { if err != nil {
return err return err
} }
t.reader.pendingKeyChange <- ciph t.reader.pendingKeyChange <- ciph
ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) ciph, err = newPacketCipher(t.writer.dir, algs.Write, kexResult)
if err != nil { if err != nil {
return err return err
} }
@ -259,8 +253,11 @@ var (
// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as // setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
// described in RFC 4253, section 6.4. direction should either be serverKeys // described in RFC 4253, section 6.4. direction should either be serverKeys
// (to setup server->client keys) or clientKeys (for client->server keys). // (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { func newPacketCipher(d direction, algs DirectionAlgorithms, kex *kexResult) (packetCipher, error) {
cipherMode := cipherModes[algs.Cipher] cipherMode := cipherModes[algs.Cipher]
if cipherMode == nil {
return nil, fmt.Errorf("ssh: unsupported cipher %v", algs.Cipher)
}
iv := make([]byte, cipherMode.ivSize) iv := make([]byte, cipherMode.ivSize)
key := make([]byte, cipherMode.keySize) key := make([]byte, cipherMode.keySize)

View File

@ -33,7 +33,7 @@ type printer struct {
} }
// printf prints to the buffer. // printf prints to the buffer.
func (p *printer) printf(format string, args ...interface{}) { func (p *printer) printf(format string, args ...any) {
fmt.Fprintf(p, format, args...) fmt.Fprintf(p, format, args...)
} }

View File

@ -94,7 +94,7 @@ func (x *FileSyntax) Span() (start, end Position) {
// line, the new line is added at the end of the block containing hint, // line, the new line is added at the end of the block containing hint,
// extracting hint into a new block if it is not yet in one. // extracting hint into a new block if it is not yet in one.
// //
// If the hint is non-nil buts its first token does not match, // If the hint is non-nil but its first token does not match,
// the new line is added after the block containing hint // the new line is added after the block containing hint
// (or hint itself, if not in a block). // (or hint itself, if not in a block).
// //
@ -600,7 +600,7 @@ func (in *input) readToken() {
// Checked all punctuation. Must be identifier token. // Checked all punctuation. Must be identifier token.
if c := in.peekRune(); !isIdent(c) { if c := in.peekRune(); !isIdent(c) {
in.Error(fmt.Sprintf("unexpected input character %#q", c)) in.Error(fmt.Sprintf("unexpected input character %#q", rune(c)))
} }
// Scan over identifier. // Scan over identifier.

View File

@ -20,10 +20,11 @@
package modfile package modfile
import ( import (
"cmp"
"errors" "errors"
"fmt" "fmt"
"path/filepath" "path/filepath"
"sort" "slices"
"strconv" "strconv"
"strings" "strings"
"unicode" "unicode"
@ -44,6 +45,7 @@ type File struct {
Replace []*Replace Replace []*Replace
Retract []*Retract Retract []*Retract
Tool []*Tool Tool []*Tool
Ignore []*Ignore
Syntax *FileSyntax Syntax *FileSyntax
} }
@ -100,6 +102,12 @@ type Tool struct {
Syntax *Line Syntax *Line
} }
// An Ignore is a single ignore statement.
type Ignore struct {
Path string
Syntax *Line
}
// A VersionInterval represents a range of versions with upper and lower bounds. // A VersionInterval represents a range of versions with upper and lower bounds.
// Intervals are closed: both bounds are included. When Low is equal to High, // Intervals are closed: both bounds are included. When Low is equal to High,
// the interval may refer to a single version ('v1.2.3') or an interval // the interval may refer to a single version ('v1.2.3') or an interval
@ -304,7 +312,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse
}) })
} }
continue continue
case "module", "godebug", "require", "exclude", "replace", "retract", "tool": case "module", "godebug", "require", "exclude", "replace", "retract", "tool", "ignore":
for _, l := range x.Line { for _, l := range x.Line {
f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) f.add(&errs, x, l, x.Token[0], l.Token, fix, strict)
} }
@ -337,7 +345,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
// and simply ignore those statements. // and simply ignore those statements.
if !strict { if !strict {
switch verb { switch verb {
case "go", "module", "retract", "require": case "go", "module", "retract", "require", "ignore":
// want these even for dependency go.mods // want these even for dependency go.mods
default: default:
return return
@ -360,7 +368,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
Err: err, Err: err,
}) })
} }
errorf := func(format string, args ...interface{}) { errorf := func(format string, args ...any) {
wrapError(fmt.Errorf(format, args...)) wrapError(fmt.Errorf(format, args...))
} }
@ -531,6 +539,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
Path: s, Path: s,
Syntax: line, Syntax: line,
}) })
case "ignore":
if len(args) != 1 {
errorf("ignore directive expects exactly one argument")
return
}
s, err := parseString(&args[0])
if err != nil {
errorf("invalid quoted string: %v", err)
return
}
f.Ignore = append(f.Ignore, &Ignore{
Path: s,
Syntax: line,
})
} }
} }
@ -551,7 +574,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V
Err: err, Err: err,
} }
} }
errorf := func(format string, args ...interface{}) *Error { errorf := func(format string, args ...any) *Error {
return wrapError(fmt.Errorf(format, args...)) return wrapError(fmt.Errorf(format, args...))
} }
@ -662,7 +685,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string,
Err: err, Err: err,
}) })
} }
errorf := func(format string, args ...interface{}) { errorf := func(format string, args ...any) {
wrapError(fmt.Errorf(format, args...)) wrapError(fmt.Errorf(format, args...))
} }
@ -1571,7 +1594,7 @@ func (f *File) AddRetract(vi VersionInterval, rationale string) error {
r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]")
} }
if rationale != "" { if rationale != "" {
for _, line := range strings.Split(rationale, "\n") { for line := range strings.SplitSeq(rationale, "\n") {
com := Comment{Token: "// " + line} com := Comment{Token: "// " + line}
r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com)
} }
@ -1619,6 +1642,36 @@ func (f *File) DropTool(path string) error {
return nil return nil
} }
// AddIgnore adds a new ignore directive with the given path.
// It does nothing if the ignore line already exists.
func (f *File) AddIgnore(path string) error {
for _, t := range f.Ignore {
if t.Path == path {
return nil
}
}
f.Ignore = append(f.Ignore, &Ignore{
Path: path,
Syntax: f.Syntax.addLine(nil, "ignore", path),
})
f.SortBlocks()
return nil
}
// DropIgnore removes a ignore directive with the given path.
// It does nothing if no such ignore directive exists.
func (f *File) DropIgnore(path string) error {
for _, t := range f.Ignore {
if t.Path == path {
t.Syntax.markRemoved()
*t = Ignore{}
}
}
return nil
}
func (f *File) SortBlocks() { func (f *File) SortBlocks() {
f.removeDups() // otherwise sorting is unsafe f.removeDups() // otherwise sorting is unsafe
@ -1633,15 +1686,13 @@ func (f *File) SortBlocks() {
if !ok { if !ok {
continue continue
} }
less := lineLess less := compareLine
if block.Token[0] == "exclude" && useSemanticSortForExclude { if block.Token[0] == "exclude" && useSemanticSortForExclude {
less = lineExcludeLess less = compareLineExclude
} else if block.Token[0] == "retract" { } else if block.Token[0] == "retract" {
less = lineRetractLess less = compareLineRetract
} }
sort.SliceStable(block.Line, func(i, j int) bool { slices.SortStableFunc(block.Line, less)
return less(block.Line[i], block.Line[j])
})
} }
} }
@ -1657,10 +1708,10 @@ func (f *File) SortBlocks() {
// retract directives are not de-duplicated since comments are // retract directives are not de-duplicated since comments are
// meaningful, and versions may be retracted multiple times. // meaningful, and versions may be retracted multiple times.
func (f *File) removeDups() { func (f *File) removeDups() {
removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool) removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool, &f.Ignore)
} }
func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool) { func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool, ignore *[]*Ignore) {
kill := make(map[*Line]bool) kill := make(map[*Line]bool)
// Remove duplicate excludes. // Remove duplicate excludes.
@ -1719,6 +1770,24 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, to
*tool = newTool *tool = newTool
} }
if ignore != nil {
haveIgnore := make(map[string]bool)
for _, i := range *ignore {
if haveIgnore[i.Path] {
kill[i.Syntax] = true
continue
}
haveIgnore[i.Path] = true
}
var newIgnore []*Ignore
for _, i := range *ignore {
if !kill[i.Syntax] {
newIgnore = append(newIgnore, i)
}
}
*ignore = newIgnore
}
// Duplicate require and retract directives are not removed. // Duplicate require and retract directives are not removed.
// Drop killed statements from the syntax tree. // Drop killed statements from the syntax tree.
@ -1746,39 +1815,38 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, to
syntax.Stmt = stmts syntax.Stmt = stmts
} }
// lineLess returns whether li should be sorted before lj. It sorts // compareLine compares li and lj. It sorts lexicographically without assigning
// lexicographically without assigning any special meaning to tokens. // any special meaning to tokens.
func lineLess(li, lj *Line) bool { func compareLine(li, lj *Line) int {
for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { for k := 0; k < len(li.Token) && k < len(lj.Token); k++ {
if li.Token[k] != lj.Token[k] { if li.Token[k] != lj.Token[k] {
return li.Token[k] < lj.Token[k] return cmp.Compare(li.Token[k], lj.Token[k])
} }
} }
return len(li.Token) < len(lj.Token) return cmp.Compare(len(li.Token), len(lj.Token))
} }
// lineExcludeLess reports whether li should be sorted before lj for lines in // compareLineExclude compares li and lj for lines in an "exclude" block.
// an "exclude" block. func compareLineExclude(li, lj *Line) int {
func lineExcludeLess(li, lj *Line) bool {
if len(li.Token) != 2 || len(lj.Token) != 2 { if len(li.Token) != 2 || len(lj.Token) != 2 {
// Not a known exclude specification. // Not a known exclude specification.
// Fall back to sorting lexicographically. // Fall back to sorting lexicographically.
return lineLess(li, lj) return compareLine(li, lj)
} }
// An exclude specification has two tokens: ModulePath and Version. // An exclude specification has two tokens: ModulePath and Version.
// Compare module path by string order and version by semver rules. // Compare module path by string order and version by semver rules.
if pi, pj := li.Token[0], lj.Token[0]; pi != pj { if pi, pj := li.Token[0], lj.Token[0]; pi != pj {
return pi < pj return cmp.Compare(pi, pj)
} }
return semver.Compare(li.Token[1], lj.Token[1]) < 0 return semver.Compare(li.Token[1], lj.Token[1])
} }
// lineRetractLess returns whether li should be sorted before lj for lines in // compareLineRetract compares li and lj for lines in a "retract" block.
// a "retract" block. It treats each line as a version interval. Single versions // It treats each line as a version interval. Single versions are compared as
// are compared as if they were intervals with the same low and high version. // if they were intervals with the same low and high version.
// Intervals are sorted in descending order, first by low version, then by // Intervals are sorted in descending order, first by low version, then by
// high version, using semver.Compare. // high version, using [semver.Compare].
func lineRetractLess(li, lj *Line) bool { func compareLineRetract(li, lj *Line) int {
interval := func(l *Line) VersionInterval { interval := func(l *Line) VersionInterval {
if len(l.Token) == 1 { if len(l.Token) == 1 {
return VersionInterval{Low: l.Token[0], High: l.Token[0]} return VersionInterval{Low: l.Token[0], High: l.Token[0]}
@ -1792,9 +1860,9 @@ func lineRetractLess(li, lj *Line) bool {
vii := interval(li) vii := interval(li)
vij := interval(lj) vij := interval(lj)
if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 { if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 {
return cmp > 0 return -cmp
} }
return semver.Compare(vii.High, vij.High) > 0 return -semver.Compare(vii.High, vij.High)
} }
// checkCanonicalVersion returns a non-nil error if vers is not a canonical // checkCanonicalVersion returns a non-nil error if vers is not a canonical

View File

@ -6,7 +6,7 @@ package modfile
import ( import (
"fmt" "fmt"
"sort" "slices"
"strings" "strings"
) )
@ -315,9 +315,7 @@ func (f *WorkFile) SortBlocks() {
if !ok { if !ok {
continue continue
} }
sort.SliceStable(block.Line, func(i, j int) bool { slices.SortStableFunc(block.Line, compareLine)
return lineLess(block.Line[i], block.Line[j])
})
} }
} }
@ -331,5 +329,5 @@ func (f *WorkFile) SortBlocks() {
// retract directives are not de-duplicated since comments are // retract directives are not de-duplicated since comments are
// meaningful, and versions may be retracted multiple times. // meaningful, and versions may be retracted multiple times.
func (f *WorkFile) removeDups() { func (f *WorkFile) removeDups() {
removeDups(f.Syntax, nil, &f.Replace, nil) removeDups(f.Syntax, nil, &f.Replace, nil, nil)
} }

View File

@ -96,10 +96,11 @@ package module
// Changes to the semantics in this file require approval from rsc. // Changes to the semantics in this file require approval from rsc.
import ( import (
"cmp"
"errors" "errors"
"fmt" "fmt"
"path" "path"
"sort" "slices"
"strings" "strings"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
@ -260,7 +261,7 @@ func modPathOK(r rune) bool {
// importPathOK reports whether r can appear in a package import path element. // importPathOK reports whether r can appear in a package import path element.
// //
// Import paths are intermediate between module paths and file paths: we allow // Import paths are intermediate between module paths and file paths: we
// disallow characters that would be confusing or ambiguous as arguments to // disallow characters that would be confusing or ambiguous as arguments to
// 'go get' (such as '@' and ' ' ), but allow certain characters that are // 'go get' (such as '@' and ' ' ), but allow certain characters that are
// otherwise-unambiguous on the command line and historically used for some // otherwise-unambiguous on the command line and historically used for some
@ -657,17 +658,15 @@ func CanonicalVersion(v string) string {
// optionally followed by a tie-breaking suffix introduced by a slash character, // optionally followed by a tie-breaking suffix introduced by a slash character,
// like in "v0.0.1/go.mod". // like in "v0.0.1/go.mod".
func Sort(list []Version) { func Sort(list []Version) {
sort.Slice(list, func(i, j int) bool { slices.SortFunc(list, func(i, j Version) int {
mi := list[i] if i.Path != j.Path {
mj := list[j] return strings.Compare(i.Path, j.Path)
if mi.Path != mj.Path {
return mi.Path < mj.Path
} }
// To help go.sum formatting, allow version/file. // To help go.sum formatting, allow version/file.
// Compare semver prefix by semver rules, // Compare semver prefix by semver rules,
// file by string order. // file by string order.
vi := mi.Version vi := i.Version
vj := mj.Version vj := j.Version
var fi, fj string var fi, fj string
if k := strings.Index(vi, "/"); k >= 0 { if k := strings.Index(vi, "/"); k >= 0 {
vi, fi = vi[:k], vi[k:] vi, fi = vi[:k], vi[k:]
@ -676,9 +675,9 @@ func Sort(list []Version) {
vj, fj = vj[:k], vj[k:] vj, fj = vj[:k], vj[k:]
} }
if vi != vj { if vi != vj {
return semver.Compare(vi, vj) < 0 return semver.Compare(vi, vj)
} }
return fi < fj return cmp.Compare(fi, fj)
}) })
} }
@ -803,8 +802,8 @@ func MatchPrefixPatterns(globs, target string) bool {
for globs != "" { for globs != "" {
// Extract next non-empty glob in comma-separated list. // Extract next non-empty glob in comma-separated list.
var glob string var glob string
if i := strings.Index(globs, ","); i >= 0 { if before, after, ok := strings.Cut(globs, ","); ok {
glob, globs = globs[:i], globs[i+1:] glob, globs = before, after
} else { } else {
glob, globs = globs, "" glob, globs = globs, ""
} }

View File

@ -22,7 +22,10 @@
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. // as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
package semver package semver
import "sort" import (
"slices"
"strings"
)
// parsed returns the parsed form of a semantic version string. // parsed returns the parsed form of a semantic version string.
type parsed struct { type parsed struct {
@ -42,8 +45,8 @@ func IsValid(v string) bool {
// Canonical returns the canonical formatting of the semantic version v. // Canonical returns the canonical formatting of the semantic version v.
// It fills in any missing .MINOR or .PATCH and discards build metadata. // It fills in any missing .MINOR or .PATCH and discards build metadata.
// Two semantic versions compare equal only if their canonical formattings // Two semantic versions compare equal only if their canonical formatting
// are identical strings. // is an identical string.
// The canonical invalid semantic version is the empty string. // The canonical invalid semantic version is the empty string.
func Canonical(v string) string { func Canonical(v string) string {
p, ok := parse(v) p, ok := parse(v)
@ -156,17 +159,20 @@ type ByVersion []string
func (vs ByVersion) Len() int { return len(vs) } func (vs ByVersion) Len() int { return len(vs) }
func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
func (vs ByVersion) Less(i, j int) bool { func (vs ByVersion) Less(i, j int) bool { return compareVersion(vs[i], vs[j]) < 0 }
cmp := Compare(vs[i], vs[j])
if cmp != 0 { // Sort sorts a list of semantic version strings using [Compare] and falls back
return cmp < 0 // to use [strings.Compare] if both versions are considered equal.
} func Sort(list []string) {
return vs[i] < vs[j] slices.SortFunc(list, compareVersion)
} }
// Sort sorts a list of semantic version strings using [ByVersion]. func compareVersion(a, b string) int {
func Sort(list []string) { cmp := Compare(a, b)
sort.Sort(ByVersion(list)) if cmp != 0 {
return cmp
}
return strings.Compare(a, b)
} }
func parse(v string) (p parsed, ok bool) { func parse(v string) (p parsed, ok bool) {

View File

@ -2,44 +2,9 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines, // Package context has been superseded by the standard library [context] package.
// cancellation signals, and other request-scoped values across API boundaries
// and between processes.
// As of Go 1.7 this package is available in the standard library under the
// name [context], and migrating to it can be done automatically with [go fix].
// //
// Incoming requests to a server should create a [Context], and outgoing // Deprecated: Use the standard library context package instead.
// calls to servers should accept a Context. The chain of function
// calls between them must propagate the Context, optionally replacing
// it with a derived Context created using [WithCancel], [WithDeadline],
// [WithTimeout], or [WithValue].
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. This is discussed further in
// https://go.dev/blog/context-and-structs. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See https://go.dev/blog/context for example code for a server that uses
// Contexts.
//
// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs
package context package context
import ( import (
@ -51,36 +16,37 @@ import (
// API boundaries. // API boundaries.
// //
// Context's methods may be called by multiple goroutines simultaneously. // Context's methods may be called by multiple goroutines simultaneously.
//
//go:fix inline
type Context = context.Context type Context = context.Context
// Canceled is the error returned by [Context.Err] when the context is canceled // Canceled is the error returned by [Context.Err] when the context is canceled
// for some reason other than its deadline passing. // for some reason other than its deadline passing.
//
//go:fix inline
var Canceled = context.Canceled var Canceled = context.Canceled
// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled // DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
// due to its deadline passing. // due to its deadline passing.
//
//go:fix inline
var DeadlineExceeded = context.DeadlineExceeded var DeadlineExceeded = context.DeadlineExceeded
// Background returns a non-nil, empty Context. It is never canceled, has no // Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function, // values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming // initialization, and tests, and as the top-level Context for incoming
// requests. // requests.
func Background() Context { //
return background //go:fix inline
} func Background() Context { return context.Background() }
// TODO returns a non-nil, empty Context. Code should use context.TODO when // TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the // it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context // surrounding function has not yet been extended to accept a Context
// parameter). // parameter).
func TODO() Context { //
return todo //go:fix inline
} func TODO() Context { return context.TODO() }
var (
background = context.Background()
todo = context.TODO()
)
// A CancelFunc tells an operation to abandon its work. // A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop. // A CancelFunc does not wait for the work to stop.
@ -95,6 +61,8 @@ type CancelFunc = context.CancelFunc
// //
// Canceling this context releases resources associated with it, so code should // Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete. // call cancel as soon as the operations running in this [Context] complete.
//
//go:fix inline
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
return context.WithCancel(parent) return context.WithCancel(parent)
} }
@ -108,6 +76,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
// //
// Canceling this context releases resources associated with it, so code should // Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete. // call cancel as soon as the operations running in this [Context] complete.
//
//go:fix inline
func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
return context.WithDeadline(parent, d) return context.WithDeadline(parent, d)
} }
@ -122,6 +92,8 @@ func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
// defer cancel() // releases resources if slowOperation completes before timeout elapses // defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx) // return slowOperation(ctx)
// } // }
//
//go:fix inline
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return context.WithTimeout(parent, timeout) return context.WithTimeout(parent, timeout)
} }
@ -139,6 +111,8 @@ func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
// interface{}, context keys often have concrete type // interface{}, context keys often have concrete type
// struct{}. Alternatively, exported context key variables' static // struct{}. Alternatively, exported context key variables' static
// type should be a pointer or interface. // type should be a pointer or interface.
//
//go:fix inline
func WithValue(parent Context, key, val interface{}) Context { func WithValue(parent Context, key, val interface{}) Context {
return context.WithValue(parent, key, val) return context.WithValue(parent, key, val)
} }

View File

@ -27,6 +27,7 @@ import (
// - If the resulting value is zero or out of range, use a default. // - If the resulting value is zero or out of range, use a default.
type http2Config struct { type http2Config struct {
MaxConcurrentStreams uint32 MaxConcurrentStreams uint32
StrictMaxConcurrentRequests bool
MaxDecoderHeaderTableSize uint32 MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32 MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32 MaxReadFrameSize uint32
@ -55,7 +56,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
CountError: h2.CountError, CountError: h2.CountError,
} }
fillNetHTTPServerConfig(&conf, h1) fillNetHTTPConfig(&conf, h1.HTTP2)
setConfigDefaults(&conf, true) setConfigDefaults(&conf, true)
return conf return conf
} }
@ -64,6 +65,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
// (the net/http Transport). // (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config { func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{ conf := http2Config{
StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
MaxReadFrameSize: h2.MaxReadFrameSize, MaxReadFrameSize: h2.MaxReadFrameSize,
@ -81,7 +83,7 @@ func configFromTransport(h2 *Transport) http2Config {
} }
if h2.t1 != nil { if h2.t1 != nil {
fillNetHTTPTransportConfig(&conf, h2.t1) fillNetHTTPConfig(&conf, h2.t1.HTTP2)
} }
setConfigDefaults(&conf, false) setConfigDefaults(&conf, false)
return conf return conf
@ -120,3 +122,48 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 {
const typicalHeaders = 10 // conservative const typicalHeaders = 10 // conservative
return n + typicalHeaders*perFieldOverhead return n + typicalHeaders*perFieldOverhead
} }
func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
if h2 == nil {
return
}
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if http2ConfigStrictMaxConcurrentRequests(h2) {
conf.StrictMaxConcurrentRequests = true
}
if h2.MaxEncoderHeaderTableSize != 0 {
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
}
if h2.MaxDecoderHeaderTableSize != 0 {
conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
}
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if h2.MaxReadFrameSize != 0 {
conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
}
if h2.MaxReceiveBufferPerConnection != 0 {
conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
}
if h2.MaxReceiveBufferPerStream != 0 {
conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
}
if h2.SendPingTimeout != 0 {
conf.SendPingTimeout = h2.SendPingTimeout
}
if h2.PingTimeout != 0 {
conf.PingTimeout = h2.PingTimeout
}
if h2.WriteByteTimeout != 0 {
conf.WriteByteTimeout = h2.WriteByteTimeout
}
if h2.PermitProhibitedCipherSuites {
conf.PermitProhibitedCipherSuites = true
}
if h2.CountError != nil {
conf.CountError = h2.CountError
}
}

View File

@ -1,61 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.24
package http2
import "net/http"
// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
fillNetHTTPConfig(conf, srv.HTTP2)
}
// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
fillNetHTTPConfig(conf, tr.HTTP2)
}
func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
if h2 == nil {
return
}
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if h2.MaxEncoderHeaderTableSize != 0 {
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
}
if h2.MaxDecoderHeaderTableSize != 0 {
conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
}
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if h2.MaxReadFrameSize != 0 {
conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
}
if h2.MaxReceiveBufferPerConnection != 0 {
conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
}
if h2.MaxReceiveBufferPerStream != 0 {
conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
}
if h2.SendPingTimeout != 0 {
conf.SendPingTimeout = h2.SendPingTimeout
}
if h2.PingTimeout != 0 {
conf.PingTimeout = h2.PingTimeout
}
if h2.WriteByteTimeout != 0 {
conf.WriteByteTimeout = h2.WriteByteTimeout
}
if h2.PermitProhibitedCipherSuites {
conf.PermitProhibitedCipherSuites = true
}
if h2.CountError != nil {
conf.CountError = h2.CountError
}
}

15
vendor/golang.org/x/net/http2/config_go125.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.26
package http2
import (
"net/http"
)
func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
return false
}

15
vendor/golang.org/x/net/http2/config_go126.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.26
package http2
import (
"net/http"
)
func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
return h2.StrictMaxConcurrentRequests
}

View File

@ -1,16 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.24
package http2
import "net/http"
// Pre-Go 1.24 fallback.
// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}

View File

@ -39,7 +39,7 @@ const (
FrameContinuation FrameType = 0x9 FrameContinuation FrameType = 0x9
) )
var frameName = map[FrameType]string{ var frameNames = [...]string{
FrameData: "DATA", FrameData: "DATA",
FrameHeaders: "HEADERS", FrameHeaders: "HEADERS",
FramePriority: "PRIORITY", FramePriority: "PRIORITY",
@ -53,10 +53,10 @@ var frameName = map[FrameType]string{
} }
func (t FrameType) String() string { func (t FrameType) String() string {
if s, ok := frameName[t]; ok { if int(t) < len(frameNames) {
return s return frameNames[t]
} }
return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t)
} }
// Flags is a bitmask of HTTP/2 flags. // Flags is a bitmask of HTTP/2 flags.
@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{
// might be 0). // might be 0).
type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
var frameParsers = map[FrameType]frameParser{ var frameParsers = [...]frameParser{
FrameData: parseDataFrame, FrameData: parseDataFrame,
FrameHeaders: parseHeadersFrame, FrameHeaders: parseHeadersFrame,
FramePriority: parsePriorityFrame, FramePriority: parsePriorityFrame,
@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{
} }
func typeFrameParser(t FrameType) frameParser { func typeFrameParser(t FrameType) frameParser {
if f := frameParsers[t]; f != nil { if int(t) < len(frameParsers) {
return f return frameParsers[t]
} }
return parseUnknownFrame return parseUnknownFrame
} }
@ -280,6 +280,8 @@ type Framer struct {
// lastHeaderStream is non-zero if the last frame was an // lastHeaderStream is non-zero if the last frame was an
// unfinished HEADERS/CONTINUATION. // unfinished HEADERS/CONTINUATION.
lastHeaderStream uint32 lastHeaderStream uint32
// lastFrameType holds the type of the last frame for verifying frame order.
lastFrameType FrameType
maxReadSize uint32 maxReadSize uint32
headerBuf [frameHeaderLen]byte headerBuf [frameHeaderLen]byte
@ -347,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 {
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
// Write the FrameHeader. // Write the FrameHeader.
f.wbuf = append(f.wbuf[:0], f.wbuf = append(f.wbuf[:0],
0, // 3 bytes of length, filled in in endWrite 0, // 3 bytes of length, filled in endWrite
0, 0,
0, 0,
byte(ftype), byte(ftype),
@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool {
return err != nil return err != nil
} }
// ReadFrame reads a single frame. The returned Frame is only valid // ReadFrameHeader reads the header of the next frame.
// until the next call to ReadFrame. // It reads the 9-byte fixed frame header, and does not read any portion of the
// frame payload. The caller is responsible for consuming the payload, either
// with ReadFrameForHeader or directly from the Framer's io.Reader.
// //
// If the frame is larger than previously set with SetMaxReadFrameSize, the // If the frame is larger than previously set with SetMaxReadFrameSize, it
// returned error is ErrFrameTooLarge. Other errors may be of type // returns the frame header and ErrFrameTooLarge.
// ConnectionError, StreamError, or anything else from the underlying
// reader.
// //
// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID // If the returned FrameHeader.StreamID is non-zero, it indicates the stream
// indicates the stream responsible for the error. // responsible for the error.
func (fr *Framer) ReadFrame() (Frame, error) { func (fr *Framer) ReadFrameHeader() (FrameHeader, error) {
fr.errDetail = nil fr.errDetail = nil
if fr.lastFrame != nil {
fr.lastFrame.invalidate()
}
fh, err := readFrameHeader(fr.headerBuf[:], fr.r) fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
if err != nil { if err != nil {
return nil, err return fh, err
} }
if fh.Length > fr.maxReadSize { if fh.Length > fr.maxReadSize {
if fh == invalidHTTP1LookingFrameHeader() { if fh == invalidHTTP1LookingFrameHeader() {
return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
} }
return nil, ErrFrameTooLarge return fh, ErrFrameTooLarge
}
if err := fr.checkFrameOrder(fh); err != nil {
return fh, err
}
return fh, nil
}
// ReadFrameForHeader reads the payload for the frame with the given FrameHeader.
//
// It behaves identically to ReadFrame, other than not checking the maximum
// frame size.
func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) {
if fr.lastFrame != nil {
fr.lastFrame.invalidate()
} }
payload := fr.getReadBuf(fh.Length) payload := fr.getReadBuf(fh.Length)
if _, err := io.ReadFull(fr.r, payload); err != nil { if _, err := io.ReadFull(fr.r, payload); err != nil {
@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
} }
return nil, err return nil, err
} }
if err := fr.checkFrameOrder(f); err != nil { fr.lastFrame = f
return nil, err
}
if fr.logReads { if fr.logReads {
fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
} }
@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) {
return f, nil return f, nil
} }
// ReadFrame reads a single frame. The returned Frame is only valid
// until the next call to ReadFrame or ReadFrameBodyForHeader.
//
// If the frame is larger than previously set with SetMaxReadFrameSize, the
// returned error is ErrFrameTooLarge. Other errors may be of type
// ConnectionError, StreamError, or anything else from the underlying
// reader.
//
// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
// indicates the stream responsible for the error.
func (fr *Framer) ReadFrame() (Frame, error) {
fh, err := fr.ReadFrameHeader()
if err != nil {
return nil, err
}
return fr.ReadFrameForHeader(fh)
}
// connError returns ConnectionError(code) but first // connError returns ConnectionError(code) but first
// stashes away a public reason to the caller can optionally relay it // stashes away a public reason to the caller can optionally relay it
// to the peer before hanging up on them. This might help others debug // to the peer before hanging up on them. This might help others debug
@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error {
// checkFrameOrder reports an error if f is an invalid frame to return // checkFrameOrder reports an error if f is an invalid frame to return
// next from ReadFrame. Mostly it checks whether HEADERS and // next from ReadFrame. Mostly it checks whether HEADERS and
// CONTINUATION frames are contiguous. // CONTINUATION frames are contiguous.
func (fr *Framer) checkFrameOrder(f Frame) error { func (fr *Framer) checkFrameOrder(fh FrameHeader) error {
last := fr.lastFrame lastType := fr.lastFrameType
fr.lastFrame = f fr.lastFrameType = fh.Type
if fr.AllowIllegalReads { if fr.AllowIllegalReads {
return nil return nil
} }
fh := f.Header()
if fr.lastHeaderStream != 0 { if fr.lastHeaderStream != 0 {
if fh.Type != FrameContinuation { if fh.Type != FrameContinuation {
return fr.connError(ErrCodeProtocol, return fr.connError(ErrCodeProtocol,
fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
fh.Type, fh.StreamID, fh.Type, fh.StreamID,
last.Header().Type, fr.lastHeaderStream)) lastType, fr.lastHeaderStream))
} }
if fh.StreamID != fr.lastHeaderStream { if fh.StreamID != fr.lastHeaderStream {
return fr.connError(ErrCodeProtocol, return fr.connError(ErrCodeProtocol,
@ -1152,7 +1180,16 @@ type PriorityFrame struct {
PriorityParam PriorityParam
} }
// PriorityParam are the stream prioritzation parameters. var defaultRFC9218Priority = PriorityParam{
incremental: 0,
urgency: 3,
}
// Note that HTTP/2 has had two different prioritization schemes, and
// PriorityParam struct below is a superset of both schemes. The exported
// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
// PriorityParam are the stream prioritization parameters.
type PriorityParam struct { type PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the // StreamDep is a 31-bit stream identifier for the
// stream that this stream depends on. Zero means no // stream that this stream depends on. Zero means no
@ -1167,6 +1204,20 @@ type PriorityParam struct {
// the spec, "Add one to the value to obtain a weight between // the spec, "Add one to the value to obtain a weight between
// 1 and 256." // 1 and 256."
Weight uint8 Weight uint8
// "The urgency (u) parameter value is Integer (see Section 3.3.1 of
// [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
// priority. The default is 3."
urgency uint8
// "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
// [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
// incrementally, i.e., provide some meaningful output as chunks of the
// response arrive."
//
// We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
// avoid unnecessary type conversions and because either type takes 1 byte.
incremental uint8
} }
func (p PriorityParam) IsZero() bool { func (p PriorityParam) IsZero() bool {

View File

@ -15,21 +15,32 @@ import (
"runtime" "runtime"
"strconv" "strconv"
"sync" "sync"
"sync/atomic"
) )
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
// Setting DebugGoroutines to false during a test to disable goroutine debugging
// results in race detector complaints when a test leaves goroutines running before
// returning. Tests shouldn't do this, of course, but when they do it generally shows
// up as infrequent, hard-to-debug flakes. (See #66519.)
//
// Disable goroutine debugging during individual tests with an atomic bool.
// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
// here is harmless.)
var disableDebugGoroutines atomic.Bool
type goroutineLock uint64 type goroutineLock uint64
func newGoroutineLock() goroutineLock { func newGoroutineLock() goroutineLock {
if !DebugGoroutines { if !DebugGoroutines || disableDebugGoroutines.Load() {
return 0 return 0
} }
return goroutineLock(curGoroutineID()) return goroutineLock(curGoroutineID())
} }
func (g goroutineLock) check() { func (g goroutineLock) check() {
if !DebugGoroutines { if !DebugGoroutines || disableDebugGoroutines.Load() {
return return
} }
if curGoroutineID() != uint64(g) { if curGoroutineID() != uint64(g) {
@ -38,7 +49,7 @@ func (g goroutineLock) check() {
} }
func (g goroutineLock) checkNotOn() { func (g goroutineLock) checkNotOn() {
if !DebugGoroutines { if !DebugGoroutines || disableDebugGoroutines.Load() {
return return
} }
if curGoroutineID() == uint64(g) { if curGoroutineID() == uint64(g) {

View File

@ -11,13 +11,10 @@
// requires Go 1.6 or later) // requires Go 1.6 or later)
// //
// See https://http2.github.io/ for more information on HTTP/2. // See https://http2.github.io/ for more information on HTTP/2.
//
// See https://http2.golang.org/ for a test server running this code.
package http2 // import "golang.org/x/net/http2" package http2 // import "golang.org/x/net/http2"
import ( import (
"bufio" "bufio"
"context"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt" "fmt"
@ -37,7 +34,6 @@ var (
VerboseLogs bool VerboseLogs bool
logFrameWrites bool logFrameWrites bool
logFrameReads bool logFrameReads bool
inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use // Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket // WebSockets-over-HTTP/2. This results in problems when the server's websocket
@ -257,15 +253,13 @@ func (cw closeWaiter) Wait() {
// idle memory usage with many connections. // idle memory usage with many connections.
type bufferedWriter struct { type bufferedWriter struct {
_ incomparable _ incomparable
group synctestGroupInterface // immutable
conn net.Conn // immutable conn net.Conn // immutable
bw *bufio.Writer // non-nil when data is buffered bw *bufio.Writer // non-nil when data is buffered
byteTimeout time.Duration // immutable, WriteByteTimeout byteTimeout time.Duration // immutable, WriteByteTimeout
} }
func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
return &bufferedWriter{ return &bufferedWriter{
group: group,
conn: conn, conn: conn,
byteTimeout: timeout, byteTimeout: timeout,
} }
@ -316,24 +310,18 @@ func (w *bufferedWriter) Flush() error {
type bufferedWriterTimeoutWriter bufferedWriter type bufferedWriterTimeoutWriter bufferedWriter
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) return writeWithByteTimeout(w.conn, w.byteTimeout, p)
} }
// writeWithByteTimeout writes to conn. // writeWithByteTimeout writes to conn.
// If more than timeout passes without any bytes being written to the connection, // If more than timeout passes without any bytes being written to the connection,
// the write fails. // the write fails.
func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
if timeout <= 0 { if timeout <= 0 {
return conn.Write(p) return conn.Write(p)
} }
for { for {
var now time.Time conn.SetWriteDeadline(time.Now().Add(timeout))
if group == nil {
now = time.Now()
} else {
now = group.Now()
}
conn.SetWriteDeadline(now.Add(timeout))
nn, err := conn.Write(p[n:]) nn, err := conn.Write(p[n:])
n += nn n += nn
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
@ -419,14 +407,3 @@ func (s *sorter) SortStrings(ss []string) {
// makes that struct also non-comparable, and generally doesn't add // makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first). // any size (as long as it's first).
type incomparable [0]func() type incomparable [0]func()
// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
// It's defined as an interface here to let us keep synctestGroup entirely test-only
// and not a part of non-test builds.
type synctestGroupInterface interface {
Join()
Now() time.Time
NewTimer(d time.Duration) timer
AfterFunc(d time.Duration, f func()) timer
ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
}

View File

@ -176,44 +176,15 @@ type Server struct {
// so that we don't embed a Mutex in this struct, which will make the // so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers. // struct non-copyable, which might break some callers.
state *serverInternalState state *serverInternalState
// Synchronization group used for testing.
// Outside of tests, this is nil.
group synctestGroupInterface
}
func (s *Server) markNewGoroutine() {
if s.group != nil {
s.group.Join()
}
}
func (s *Server) now() time.Time {
if s.group != nil {
return s.group.Now()
}
return time.Now()
}
// newTimer creates a new time.Timer, or a synthetic timer in tests.
func (s *Server) newTimer(d time.Duration) timer {
if s.group != nil {
return s.group.NewTimer(d)
}
return timeTimer{time.NewTimer(d)}
}
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
func (s *Server) afterFunc(d time.Duration, f func()) timer {
if s.group != nil {
return s.group.AfterFunc(d, f)
}
return timeTimer{time.AfterFunc(d, f)}
} }
type serverInternalState struct { type serverInternalState struct {
mu sync.Mutex mu sync.Mutex
activeConns map[*serverConn]struct{} activeConns map[*serverConn]struct{}
// Pool of error channels. This is per-Server rather than global
// because channels can't be reused across synctest bubbles.
errChanPool sync.Pool
} }
func (s *serverInternalState) registerConn(sc *serverConn) { func (s *serverInternalState) registerConn(sc *serverConn) {
@ -245,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() {
s.mu.Unlock() s.mu.Unlock()
} }
// Global error channel pool used for uninitialized Servers.
// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
var errChanPool = sync.Pool{
New: func() any { return make(chan error, 1) },
}
func (s *serverInternalState) getErrChan() chan error {
if s == nil {
return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
}
return s.errChanPool.Get().(chan error)
}
func (s *serverInternalState) putErrChan(ch chan error) {
if s == nil {
errChanPool.Put(ch) // Server used without calling ConfigureServer
return
}
s.errChanPool.Put(ch)
}
// ConfigureServer adds HTTP/2 support to a net/http Server. // ConfigureServer adds HTTP/2 support to a net/http Server.
// //
// The configuration conf may be nil. // The configuration conf may be nil.
@ -257,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil { if conf == nil {
conf = new(Server) conf = new(Server)
} }
conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} conf.state = &serverInternalState{
activeConns: make(map[*serverConn]struct{}),
errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
}
if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 { if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout h2.IdleTimeout = h1.IdleTimeout
@ -423,6 +418,9 @@ func (o *ServeConnOpts) handler() http.Handler {
// //
// The opts parameter is optional. If nil, default values are used. // The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
if opts == nil {
opts = &ServeConnOpts{}
}
s.serveConn(c, opts, nil) s.serveConn(c, opts, nil)
} }
@ -438,7 +436,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
conn: c, conn: c,
baseCtx: baseCtx, baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(), remoteAddrStr: c.RemoteAddr().String(),
bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), bw: newBufferedWriter(c, conf.WriteByteTimeout),
handler: opts.handler(), handler: opts.handler(),
streams: make(map[uint32]*stream), streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult), readFrameCh: make(chan readFrameResult),
@ -638,11 +636,11 @@ type serverConn struct {
pingSent bool pingSent bool
sentPingData [8]byte sentPingData [8]byte
goAwayCode ErrCode goAwayCode ErrCode
shutdownTimer timer // nil until used shutdownTimer *time.Timer // nil until used
idleTimer timer // nil if unused idleTimer *time.Timer // nil if unused
readIdleTimeout time.Duration readIdleTimeout time.Duration
pingTimeout time.Duration pingTimeout time.Duration
readIdleTimer timer // nil if unused readIdleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine: // Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer headerWriteBuf bytes.Buffer
@ -690,8 +688,8 @@ type stream struct {
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen gotTrailerHeader bool // HEADER frame for trailers was seen
wroteHeaders bool // whether we wrote headers (not status 100) wroteHeaders bool // whether we wrote headers (not status 100)
readDeadline timer // nil if unused readDeadline *time.Timer // nil if unused
writeDeadline timer // nil if unused writeDeadline *time.Timer // nil if unused
closeErr error // set before cw is closed closeErr error // set before cw is closed
trailer http.Header // accumulated trailers trailer http.Header // accumulated trailers
@ -848,7 +846,6 @@ type readFrameResult struct {
// consumer is done with the frame. // consumer is done with the frame.
// It's run on its own goroutine. // It's run on its own goroutine.
func (sc *serverConn) readFrames() { func (sc *serverConn) readFrames() {
sc.srv.markNewGoroutine()
gate := make(chan struct{}) gate := make(chan struct{})
gateDone := func() { gate <- struct{}{} } gateDone := func() { gate <- struct{}{} }
for { for {
@ -881,7 +878,6 @@ type frameWriteResult struct {
// At most one goroutine can be running writeFrameAsync at a time per // At most one goroutine can be running writeFrameAsync at a time per
// serverConn. // serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
sc.srv.markNewGoroutine()
var err error var err error
if wd == nil { if wd == nil {
err = wr.write.writeFrame(sc) err = wr.write.writeFrame(sc)
@ -965,22 +961,22 @@ func (sc *serverConn) serve(conf http2Config) {
sc.setConnState(http.StateIdle) sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 { if sc.srv.IdleTimeout > 0 {
sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop() defer sc.idleTimer.Stop()
} }
if conf.SendPingTimeout > 0 { if conf.SendPingTimeout > 0 {
sc.readIdleTimeout = conf.SendPingTimeout sc.readIdleTimeout = conf.SendPingTimeout
sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
defer sc.readIdleTimer.Stop() defer sc.readIdleTimer.Stop()
} }
go sc.readFrames() // closed by defer sc.conn.Close above go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop() defer settingsTimer.Stop()
lastFrameTime := sc.srv.now() lastFrameTime := time.Now()
loopNum := 0 loopNum := 0
for { for {
loopNum++ loopNum++
@ -994,7 +990,7 @@ func (sc *serverConn) serve(conf http2Config) {
case res := <-sc.wroteFrameCh: case res := <-sc.wroteFrameCh:
sc.wroteFrame(res) sc.wroteFrame(res)
case res := <-sc.readFrameCh: case res := <-sc.readFrameCh:
lastFrameTime = sc.srv.now() lastFrameTime = time.Now()
// Process any written frames before reading new frames from the client since a // Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started. // written frame could have triggered a new stream to be started.
if sc.writingFrameAsync { if sc.writingFrameAsync {
@ -1077,7 +1073,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
} }
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
now := sc.srv.now() now := time.Now()
if pingAt.After(now) { if pingAt.After(now) {
// We received frames since arming the ping timer. // We received frames since arming the ping timer.
// Reset it for the next possible timeout. // Reset it for the next possible timeout.
@ -1141,10 +1137,10 @@ func (sc *serverConn) readPreface() error {
errc <- nil errc <- nil
} }
}() }()
timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop() defer timer.Stop()
select { select {
case <-timer.C(): case <-timer.C:
return errPrefaceTimeout return errPrefaceTimeout
case err := <-errc: case err := <-errc:
if err == nil { if err == nil {
@ -1156,10 +1152,6 @@ func (sc *serverConn) readPreface() error {
} }
} }
var errChanPool = sync.Pool{
New: func() interface{} { return make(chan error, 1) },
}
var writeDataPool = sync.Pool{ var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) }, New: func() interface{} { return new(writeData) },
} }
@ -1167,7 +1159,7 @@ var writeDataPool = sync.Pool{
// writeDataFromHandler writes DATA response frames from a handler on // writeDataFromHandler writes DATA response frames from a handler on
// the given stream. // the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
ch := errChanPool.Get().(chan error) ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData) writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream} *writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{ err := sc.writeFrameFromHandler(FrameWriteRequest{
@ -1199,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return errStreamClosed return errStreamClosed
} }
} }
errChanPool.Put(ch) sc.srv.state.putErrChan(ch)
if frameWriteDone { if frameWriteDone {
writeDataPool.Put(writeArg) writeDataPool.Put(writeArg)
} }
@ -1513,7 +1505,7 @@ func (sc *serverConn) goAway(code ErrCode) {
func (sc *serverConn) shutDownIn(d time.Duration) { func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check() sc.serveG.check()
sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
} }
func (sc *serverConn) resetStream(se StreamError) { func (sc *serverConn) resetStream(se StreamError) {
@ -2118,7 +2110,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway. // (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 { if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{}) sc.conn.SetReadDeadline(time.Time{})
st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
} }
return sc.scheduleHandler(id, rw, req, handler) return sc.scheduleHandler(id, rw, req, handler)
@ -2216,7 +2208,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.flow.add(sc.initialStreamSendWindowSize) st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.initialStreamRecvWindowSize) st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 { if sc.hs.WriteTimeout > 0 {
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
} }
sc.streams[id] = st sc.streams[id] = st
@ -2405,7 +2397,6 @@ func (sc *serverConn) handlerDone() {
// Run on its own goroutine. // Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg) defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true didPanic := true
defer func() { defer func() {
@ -2454,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// waiting for this frame to be written, so an http.Flush mid-handler // waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially // writes out the correct value of keys, before a handler later potentially
// mutates it. // mutates it.
errc = errChanPool.Get().(chan error) errc = sc.srv.state.getErrChan()
} }
if err := sc.writeFrameFromHandler(FrameWriteRequest{ if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData, write: headerData,
@ -2466,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
if errc != nil { if errc != nil {
select { select {
case err := <-errc: case err := <-errc:
errChanPool.Put(errc) sc.srv.state.putErrChan(errc)
return err return err
case <-sc.doneServing: case <-sc.doneServing:
return errClientDisconnected return errClientDisconnected
@ -2573,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
if err == io.EOF { if err == io.EOF {
b.sawEOF = true b.sawEOF = true
} }
if b.conn == nil && inTests { if b.conn == nil {
return return
} }
b.conn.noteBodyReadFromHandler(b.stream, n, err) b.conn.noteBodyReadFromHandler(b.stream, n, err)
@ -2702,7 +2693,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
var date string var date string
if _, ok := rws.snapHeader["Date"]; !ok { if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure. // TODO(bradfitz): be faster here, like net/http? measure.
date = rws.conn.srv.now().UTC().Format(http.TimeFormat) date = time.Now().UTC().Format(http.TimeFormat)
} }
for _, v := range rws.snapHeader["Trailer"] { for _, v := range rws.snapHeader["Trailer"] {
@ -2824,7 +2815,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream st := w.rws.stream
if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately // If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail. // so writes after SetWriteDeadline returns will fail.
st.onReadTimeout() st.onReadTimeout()
@ -2840,9 +2831,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
if deadline.IsZero() { if deadline.IsZero() {
st.readDeadline = nil st.readDeadline = nil
} else if st.readDeadline == nil { } else if st.readDeadline == nil {
st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
} else { } else {
st.readDeadline.Reset(deadline.Sub(sc.srv.now())) st.readDeadline.Reset(deadline.Sub(time.Now()))
} }
}) })
return nil return nil
@ -2850,7 +2841,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream st := w.rws.stream
if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately // If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail. // so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout() st.onWriteTimeout()
@ -2866,9 +2857,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
if deadline.IsZero() { if deadline.IsZero() {
st.writeDeadline = nil st.writeDeadline = nil
} else if st.writeDeadline == nil { } else if st.writeDeadline == nil {
st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
} else { } else {
st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) st.writeDeadline.Reset(deadline.Sub(time.Now()))
} }
}) })
return nil return nil
@ -3147,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
method: opts.Method, method: opts.Method,
url: u, url: u,
header: cloneHeader(opts.Header), header: cloneHeader(opts.Header),
done: errChanPool.Get().(chan error), done: sc.srv.state.getErrChan(),
} }
select { select {
@ -3164,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
case <-st.cw: case <-st.cw:
return errStreamClosed return errStreamClosed
case err := <-msg.done: case err := <-msg.done:
errChanPool.Put(msg.done) sc.srv.state.putErrChan(msg.done)
return err return err
} }
} }

View File

@ -1,20 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "time"
// A timer is a time.Timer, as an interface which can be replaced in tests.
type timer = interface {
C() <-chan time.Time
Reset(d time.Duration) bool
Stop() bool
}
// timeTimer adapts a time.Timer to the timer interface.
type timeTimer struct {
*time.Timer
}
func (t timeTimer) C() <-chan time.Time { return t.Timer.C }

View File

@ -9,6 +9,7 @@ package http2
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"compress/flate"
"compress/gzip" "compress/gzip"
"context" "context"
"crypto/rand" "crypto/rand"
@ -193,50 +194,6 @@ type Transport struct {
type transportTestHooks struct { type transportTestHooks struct {
newclientconn func(*ClientConn) newclientconn func(*ClientConn)
group synctestGroupInterface
}
func (t *Transport) markNewGoroutine() {
if t != nil && t.transportTestHooks != nil {
t.transportTestHooks.group.Join()
}
}
func (t *Transport) now() time.Time {
if t != nil && t.transportTestHooks != nil {
return t.transportTestHooks.group.Now()
}
return time.Now()
}
func (t *Transport) timeSince(when time.Time) time.Duration {
if t != nil && t.transportTestHooks != nil {
return t.now().Sub(when)
}
return time.Since(when)
}
// newTimer creates a new time.Timer, or a synthetic timer in tests.
func (t *Transport) newTimer(d time.Duration) timer {
if t.transportTestHooks != nil {
return t.transportTestHooks.group.NewTimer(d)
}
return timeTimer{time.NewTimer(d)}
}
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
func (t *Transport) afterFunc(d time.Duration, f func()) timer {
if t.transportTestHooks != nil {
return t.transportTestHooks.group.AfterFunc(d, f)
}
return timeTimer{time.AfterFunc(d, f)}
}
func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
if t.transportTestHooks != nil {
return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
}
return context.WithTimeout(ctx, d)
} }
func (t *Transport) maxHeaderListSize() uint32 { func (t *Transport) maxHeaderListSize() uint32 {
@ -366,7 +323,7 @@ type ClientConn struct {
readerErr error // set before readerDone is closed readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never idleTimeout time.Duration // or 0 for never
idleTimer timer idleTimer *time.Timer
mu sync.Mutex // guards following mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes cond *sync.Cond // hold mu; broadcast on flow/closed changes
@ -399,6 +356,7 @@ type ClientConn struct {
readIdleTimeout time.Duration readIdleTimeout time.Duration
pingTimeout time.Duration pingTimeout time.Duration
extendedConnectAllowed bool extendedConnectAllowed bool
strictMaxConcurrentStreams bool
// rstStreamPingsBlocked works around an unfortunate gRPC behavior. // rstStreamPingsBlocked works around an unfortunate gRPC behavior.
// gRPC strictly limits the number of PING frames that it will receive. // gRPC strictly limits the number of PING frames that it will receive.
@ -418,11 +376,24 @@ type ClientConn struct {
// completely unresponsive connection. // completely unresponsive connection.
pendingResets int pendingResets int
// readBeforeStreamID is the smallest stream ID that has not been followed by
// a frame read from the peer. We use this to determine when a request may
// have been sent to a completely unresponsive connection:
// If the request ID is less than readBeforeStreamID, then we have had some
// indication of life on the connection since sending the request.
readBeforeStreamID uint32
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock. // Write to reqHeaderMu to lock it, read from it to unlock.
// Lock reqmu BEFORE mu or wmu. // Lock reqmu BEFORE mu or wmu.
reqHeaderMu chan struct{} reqHeaderMu chan struct{}
// internalStateHook reports state changes back to the net/http.ClientConn.
// Note that this is different from the user state hook registered by
// net/http.ClientConn.SetStateHook: The internal hook calls ClientConn,
// which calls the user hook.
internalStateHook func()
// wmu is held while writing. // wmu is held while writing.
// Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
// Only acquire both at the same time when changing peer settings. // Only acquire both at the same time when changing peer settings.
@ -534,14 +505,12 @@ func (cs *clientStream) closeReqBodyLocked() {
cs.reqBodyClosed = make(chan struct{}) cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed reqBodyClosed := cs.reqBodyClosed
go func() { go func() {
cs.cc.t.markNewGoroutine()
cs.reqBody.Close() cs.reqBody.Close()
close(reqBodyClosed) close(reqBodyClosed)
}() }()
} }
type stickyErrWriter struct { type stickyErrWriter struct {
group synctestGroupInterface
conn net.Conn conn net.Conn
timeout time.Duration timeout time.Duration
err *error err *error
@ -551,7 +520,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil { if *sew.err != nil {
return 0, *sew.err return 0, *sew.err
} }
n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) n, err = writeWithByteTimeout(sew.conn, sew.timeout, p)
*sew.err = err *sew.err = err
return n, err return n, err
} }
@ -650,9 +619,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
backoff := float64(uint(1) << (uint(retry) - 1)) backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64()) backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff) d := time.Second * time.Duration(backoff)
tm := t.newTimer(d) tm := time.NewTimer(d)
select { select {
case <-tm.C(): case <-tm.C:
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue continue
case <-req.Context().Done(): case <-req.Context().Done():
@ -699,6 +668,7 @@ var (
errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnNotEstablished = errors.New("http2: client conn could not be established") errClientConnNotEstablished = errors.New("http2: client conn could not be established")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close")
) )
// shouldRetryRequest is called by RoundTrip when a request fails to get // shouldRetryRequest is called by RoundTrip when a request fails to get
@ -753,7 +723,7 @@ func canRetryError(err error) bool {
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
if t.transportTestHooks != nil { if t.transportTestHooks != nil {
return t.newClientConn(nil, singleUse) return t.newClientConn(nil, singleUse, nil)
} }
host, _, err := net.SplitHostPort(addr) host, _, err := net.SplitHostPort(addr)
if err != nil { if err != nil {
@ -763,7 +733,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
if err != nil { if err != nil {
return nil, err return nil, err
} }
return t.newClientConn(tconn, singleUse) return t.newClientConn(tconn, singleUse, nil)
} }
func (t *Transport) newTLSConfig(host string) *tls.Config { func (t *Transport) newTLSConfig(host string) *tls.Config {
@ -815,10 +785,10 @@ func (t *Transport) expectContinueTimeout() time.Duration {
} }
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return t.newClientConn(c, t.disableKeepAlives()) return t.newClientConn(c, t.disableKeepAlives(), nil)
} }
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) {
conf := configFromTransport(t) conf := configFromTransport(t)
cc := &ClientConn{ cc := &ClientConn{
t: t, t: t,
@ -829,6 +799,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialWindowSize: 65535, // spec default initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests,
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream), streams: make(map[uint32]*clientStream),
singleUse: singleUse, singleUse: singleUse,
@ -838,14 +809,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
pingTimeout: conf.PingTimeout, pingTimeout: conf.PingTimeout,
pings: make(map[[8]byte]chan struct{}), pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1), reqHeaderMu: make(chan struct{}, 1),
lastActive: t.now(), lastActive: time.Now(),
internalStateHook: internalStateHook,
} }
var group synctestGroupInterface
if t.transportTestHooks != nil { if t.transportTestHooks != nil {
t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc) t.transportTestHooks.newclientconn(cc)
c = cc.tconn c = cc.tconn
group = t.group
} }
if VerboseLogs { if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@ -857,7 +826,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size + // TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding. // MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{ cc.bw = bufio.NewWriter(stickyErrWriter{
group: group,
conn: c, conn: c,
timeout: conf.WriteByteTimeout, timeout: conf.WriteByteTimeout,
err: &cc.werr, err: &cc.werr,
@ -906,7 +874,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// Start the idle timer after the connection is fully initialized. // Start the idle timer after the connection is fully initialized.
if d := t.idleConnTimeout(); d != 0 { if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d cc.idleTimeout = d
cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
} }
go cc.readLoop() go cc.readLoop()
@ -917,7 +885,7 @@ func (cc *ClientConn) healthCheck() {
pingTimeout := cc.pingTimeout pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will // We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received. // trigger the healthCheck again if there is no frame received.
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
defer cancel() defer cancel()
cc.vlogf("http2: Transport sending health check") cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx) err := cc.Ping(ctx)
@ -1067,7 +1035,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return return
} }
var maxConcurrentOkay bool var maxConcurrentOkay bool
if cc.t.StrictMaxConcurrentStreams { if cc.strictMaxConcurrentStreams {
// We'll tell the caller we can take a new request to // We'll tell the caller we can take a new request to
// prevent the caller from dialing a new TCP // prevent the caller from dialing a new TCP
// connection, but then we'll block later before // connection, but then we'll block later before
@ -1083,10 +1051,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams)
} }
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked()
!cc.doNotReuse &&
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
!cc.tooIdleLocked()
// If this connection has never been used for a request and is closed, // If this connection has never been used for a request and is closed,
// then let it take a request (which will fail). // then let it take a request (which will fail).
@ -1102,6 +1067,31 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return return
} }
func (cc *ClientConn) isUsableLocked() bool {
return cc.goAway == nil &&
!cc.closed &&
!cc.closing &&
!cc.doNotReuse &&
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
!cc.tooIdleLocked()
}
// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn.
//
// This follows slightly different rules than clientConnIdleState.canTakeNewRequest.
// We only permit reservations up to the conn's concurrency limit.
// This differs from ClientConn.ReserveNewRequest, which permits reservations
// past the limit when StrictMaxConcurrentStreams is set.
func (cc *ClientConn) canReserveLocked() bool {
if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) {
return false
}
if !cc.isUsableLocked() {
return false
}
return true
}
// currentRequestCountLocked reports the number of concurrency slots currently in use, // currentRequestCountLocked reports the number of concurrency slots currently in use,
// including active streams, reserved slots, and reset streams waiting for acknowledgement. // including active streams, reserved slots, and reset streams waiting for acknowledgement.
func (cc *ClientConn) currentRequestCountLocked() int { func (cc *ClientConn) currentRequestCountLocked() int {
@ -1113,6 +1103,14 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
return st.canTakeNewRequest return st.canTakeNewRequest
} }
// availableLocked reports the number of concurrency slots available.
func (cc *ClientConn) availableLocked() int {
if !cc.canTakeNewRequestLocked() {
return 0
}
return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked())
}
// tooIdleLocked reports whether this connection has been been sitting idle // tooIdleLocked reports whether this connection has been been sitting idle
// for too much wall time. // for too much wall time.
func (cc *ClientConn) tooIdleLocked() bool { func (cc *ClientConn) tooIdleLocked() bool {
@ -1120,7 +1118,7 @@ func (cc *ClientConn) tooIdleLocked() bool {
// times are compared based on their wall time. We don't want // times are compared based on their wall time. We don't want
// to reuse a connection that's been sitting idle during // to reuse a connection that's been sitting idle during
// VM/laptop suspend if monotonic time was also frozen. // VM/laptop suspend if monotonic time was also frozen.
return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
} }
// onIdleTimeout is called from a time.AfterFunc goroutine. It will // onIdleTimeout is called from a time.AfterFunc goroutine. It will
@ -1137,6 +1135,7 @@ func (cc *ClientConn) closeConn() {
t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn)
defer t.Stop() defer t.Stop()
cc.tconn.Close() cc.tconn.Close()
cc.maybeCallStateHook()
} }
// A tls.Conn.Close can hang for a long time if the peer is unresponsive. // A tls.Conn.Close can hang for a long time if the peer is unresponsive.
@ -1186,7 +1185,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
done := make(chan struct{}) done := make(chan struct{})
cancelled := false // guarded by cc.mu cancelled := false // guarded by cc.mu
go func() { go func() {
cc.t.markNewGoroutine()
cc.mu.Lock() cc.mu.Lock()
defer cc.mu.Unlock() defer cc.mu.Unlock()
for { for {
@ -1257,8 +1255,7 @@ func (cc *ClientConn) closeForError(err error) {
// //
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func (cc *ClientConn) Close() error { func (cc *ClientConn) Close() error {
err := errors.New("http2: client connection force closed via ClientConn.Close") cc.closeForError(errClientConnForceClosed)
cc.closeForError(err)
return nil return nil
} }
@ -1427,7 +1424,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
// //
// It sends the request and performs post-request cleanup (closing Request.Body, etc.). // It sends the request and performs post-request cleanup (closing Request.Body, etc.).
func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
cs.cc.t.markNewGoroutine()
err := cs.writeRequest(req, streamf) err := cs.writeRequest(req, streamf)
cs.cleanupWriteRequest(err) cs.cleanupWriteRequest(err)
} }
@ -1558,9 +1554,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
var respHeaderTimer <-chan time.Time var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{} var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 { if d := cc.responseHeaderTimeout(); d != 0 {
timer := cc.t.newTimer(d) timer := time.NewTimer(d)
defer timer.Stop() defer timer.Stop()
respHeaderTimer = timer.C() respHeaderTimer = timer.C
respHeaderRecv = cs.respHeaderRecv respHeaderRecv = cs.respHeaderRecv
} }
// Wait until the peer half-closes its end of the stream, // Wait until the peer half-closes its end of the stream,
@ -1665,6 +1661,8 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
} }
bodyClosed := cs.reqBodyClosed bodyClosed := cs.reqBodyClosed
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
// Have we read any frames from the connection since sending this request?
readSinceStream := cc.readBeforeStreamID > cs.ID
cc.mu.Unlock() cc.mu.Unlock()
if mustCloseBody { if mustCloseBody {
cs.reqBody.Close() cs.reqBody.Close()
@ -1696,8 +1694,10 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
// //
// This could be due to the server becoming unresponsive. // This could be due to the server becoming unresponsive.
// To avoid sending too many requests on a dead connection, // To avoid sending too many requests on a dead connection,
// we let the request continue to consume a concurrency slot // if we haven't read any frames from the connection since
// until we can confirm the server is still responding. // sending this request, we let it continue to consume
// a concurrency slot until we can confirm the server is
// still responding.
// We do this by sending a PING frame along with the RST_STREAM // We do this by sending a PING frame along with the RST_STREAM
// (unless a ping is already in flight). // (unless a ping is already in flight).
// //
@ -1708,7 +1708,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
// because it's short lived and will probably be closed before // because it's short lived and will probably be closed before
// we get the ping response. // we get the ping response.
ping := false ping := false
if !closeOnIdle { if !closeOnIdle && !readSinceStream {
cc.mu.Lock() cc.mu.Lock()
// rstStreamPingsBlocked works around a gRPC behavior: // rstStreamPingsBlocked works around a gRPC behavior:
// see comment on the field for details. // see comment on the field for details.
@ -1742,6 +1742,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
} }
close(cs.donec) close(cs.donec)
cc.maybeCallStateHook()
} }
// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. // awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams.
@ -1753,7 +1754,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
// Return a fatal error which aborts the retry loop. // Return a fatal error which aborts the retry loop.
return errClientConnNotEstablished return errClientConnNotEstablished
} }
cc.lastActive = cc.t.now() cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() { if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable return errClientConnUnusable
} }
@ -2092,10 +2093,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
if len(cc.streams) != slen-1 { if len(cc.streams) != slen-1 {
panic("forgetting unknown stream id") panic("forgetting unknown stream id")
} }
cc.lastActive = cc.t.now() cc.lastActive = time.Now()
if len(cc.streams) == 0 && cc.idleTimer != nil { if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout) cc.idleTimer.Reset(cc.idleTimeout)
cc.lastIdle = cc.t.now() cc.lastIdle = time.Now()
} }
// Wake up writeRequestBody via clientStream.awaitFlowControl and // Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request. // wake up RoundTrip if there is a pending request.
@ -2121,7 +2122,6 @@ type clientConnReadLoop struct {
// readLoop runs in its own goroutine and reads and dispatches frames. // readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() { func (cc *ClientConn) readLoop() {
cc.t.markNewGoroutine()
rl := &clientConnReadLoop{cc: cc} rl := &clientConnReadLoop{cc: cc}
defer rl.cleanup() defer rl.cleanup()
cc.readerErr = rl.run() cc.readerErr = rl.run()
@ -2188,9 +2188,9 @@ func (rl *clientConnReadLoop) cleanup() {
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
unusedWaitTime = cc.idleTimeout unusedWaitTime = cc.idleTimeout
} }
idleTime := cc.t.now().Sub(cc.lastActive) idleTime := time.Now().Sub(cc.lastActive)
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() {
cc.t.connPool().MarkDead(cc) cc.t.connPool().MarkDead(cc)
}) })
} else { } else {
@ -2250,9 +2250,9 @@ func (rl *clientConnReadLoop) run() error {
cc := rl.cc cc := rl.cc
gotSettings := false gotSettings := false
readIdleTimeout := cc.readIdleTimeout readIdleTimeout := cc.readIdleTimeout
var t timer var t *time.Timer
if readIdleTimeout != 0 { if readIdleTimeout != 0 {
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
} }
for { for {
f, err := cc.fr.ReadFrame() f, err := cc.fr.ReadFrame()
@ -2795,6 +2795,7 @@ func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientSt
// See comment on ClientConn.rstStreamPingsBlocked for details. // See comment on ClientConn.rstStreamPingsBlocked for details.
rl.cc.rstStreamPingsBlocked = false rl.cc.rstStreamPingsBlocked = false
} }
rl.cc.readBeforeStreamID = rl.cc.nextStreamID
cs := rl.cc.streams[id] cs := rl.cc.streams[id]
if cs != nil && !cs.readAborted { if cs != nil && !cs.readAborted {
return cs return cs
@ -2845,6 +2846,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
cc := rl.cc cc := rl.cc
defer cc.maybeCallStateHook()
cc.mu.Lock() cc.mu.Lock()
defer cc.mu.Unlock() defer cc.mu.Unlock()
@ -2998,7 +3000,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
var pingError error var pingError error
errc := make(chan struct{}) errc := make(chan struct{})
go func() { go func() {
cc.t.markNewGoroutine()
cc.wmu.Lock() cc.wmu.Lock()
defer cc.wmu.Unlock() defer cc.wmu.Unlock()
if pingError = cc.fr.WritePing(false, p); pingError != nil { if pingError = cc.fr.WritePing(false, p); pingError != nil {
@ -3026,6 +3027,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
func (rl *clientConnReadLoop) processPing(f *PingFrame) error { func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
if f.IsAck() { if f.IsAck() {
cc := rl.cc cc := rl.cc
defer cc.maybeCallStateHook()
cc.mu.Lock() cc.mu.Lock()
defer cc.mu.Unlock() defer cc.mu.Unlock()
// If ack, notify listener if any // If ack, notify listener if any
@ -3128,35 +3130,102 @@ type erringRoundTripper struct{ err error }
func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTripErr() error { return rt.err }
func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body")
// gzipReader wraps a response body so it can lazily // gzipReader wraps a response body so it can lazily
// call gzip.NewReader on the first call to Read // get gzip.Reader from the pool on the first call to Read.
// After Close is called it puts gzip.Reader to the pool immediately
// if there is no Read in progress or later when Read completes.
type gzipReader struct { type gzipReader struct {
_ incomparable _ incomparable
body io.ReadCloser // underlying Response.Body body io.ReadCloser // underlying Response.Body
zr *gzip.Reader // lazily-initialized gzip reader mu sync.Mutex // guards zr and zerr
zerr error // sticky error zr *gzip.Reader // stores gzip reader from the pool between reads
zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close
}
type eofReader struct{}
func (eofReader) Read([]byte) (int, error) { return 0, io.EOF }
func (eofReader) ReadByte() (byte, error) { return 0, io.EOF }
var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }}
// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r.
func gzipPoolGet(r io.Reader) (*gzip.Reader, error) {
zr := gzipPool.Get().(*gzip.Reader)
if err := zr.Reset(r); err != nil {
gzipPoolPut(zr)
return nil, err
}
return zr, nil
}
// gzipPoolPut puts a gzip.Reader back into the pool.
func gzipPoolPut(zr *gzip.Reader) {
// Reset will allocate bufio.Reader if we pass it anything
// other than a flate.Reader, so ensure that it's getting one.
var r flate.Reader = eofReader{}
zr.Reset(r)
gzipPool.Put(zr)
}
// acquire returns a gzip.Reader for reading response body.
// The reader must be released after use.
func (gz *gzipReader) acquire() (*gzip.Reader, error) {
gz.mu.Lock()
defer gz.mu.Unlock()
if gz.zerr != nil {
return nil, gz.zerr
}
if gz.zr == nil {
gz.zr, gz.zerr = gzipPoolGet(gz.body)
if gz.zerr != nil {
return nil, gz.zerr
}
}
ret := gz.zr
gz.zr, gz.zerr = nil, errConcurrentReadOnResBody
return ret, nil
}
// release returns the gzip.Reader to the pool if Close was called during Read.
func (gz *gzipReader) release(zr *gzip.Reader) {
gz.mu.Lock()
defer gz.mu.Unlock()
if gz.zerr == errConcurrentReadOnResBody {
gz.zr, gz.zerr = zr, nil
} else { // fs.ErrClosed
gzipPoolPut(zr)
}
}
// close returns the gzip.Reader to the pool immediately or
// signals release to do so after Read completes.
func (gz *gzipReader) close() {
gz.mu.Lock()
defer gz.mu.Unlock()
if gz.zerr == nil && gz.zr != nil {
gzipPoolPut(gz.zr)
gz.zr = nil
}
gz.zerr = fs.ErrClosed
} }
func (gz *gzipReader) Read(p []byte) (n int, err error) { func (gz *gzipReader) Read(p []byte) (n int, err error) {
if gz.zerr != nil { zr, err := gz.acquire()
return 0, gz.zerr
}
if gz.zr == nil {
gz.zr, err = gzip.NewReader(gz.body)
if err != nil { if err != nil {
gz.zerr = err
return 0, err return 0, err
} }
} defer gz.release(zr)
return gz.zr.Read(p)
return zr.Read(p)
} }
func (gz *gzipReader) Close() error { func (gz *gzipReader) Close() error {
if err := gz.body.Close(); err != nil { gz.close()
return err
} return gz.body.Close()
gz.zerr = fs.ErrClosed
return nil
} }
type errorReader struct{ err error } type errorReader struct{ err error }
@ -3182,9 +3251,13 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro
} }
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request // noDialH2RoundTripper is a RoundTripper which only tries to complete the request
// if there's already has a cached connection to the host. // if there's already a cached connection to the host.
// (The field is exported so it can be accessed via reflect from net/http; tested // (The field is exported so it can be accessed via reflect from net/http; tested
// by TestNoDialH2RoundTripperType) // by TestNoDialH2RoundTripperType)
//
// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol,
// and the http1.Transport can use type assertions to call non-RoundTrip methods on it.
// This lets us expose, for example, NewClientConn to net/http.
type noDialH2RoundTripper struct{ *Transport } type noDialH2RoundTripper struct{ *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
@ -3195,6 +3268,85 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err
return res, err return res, err
} }
func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) {
tr := rt.Transport
cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook)
if err != nil {
return nil, err
}
// RoundTrip should block when the conn is at its concurrency limit,
// not return an error. Setting strictMaxConcurrentStreams enables this.
cc.strictMaxConcurrentStreams = true
return netHTTPClientConn{cc}, nil
}
// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from
// the RoundTripper returned by NewClientConn.
type netHTTPClientConn struct {
cc *ClientConn
}
func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return cc.cc.RoundTrip(req)
}
func (cc netHTTPClientConn) Close() error {
return cc.cc.Close()
}
func (cc netHTTPClientConn) Err() error {
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
if cc.cc.closed {
return errors.New("connection closed")
}
return nil
}
func (cc netHTTPClientConn) Reserve() error {
defer cc.cc.maybeCallStateHook()
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
if !cc.cc.canReserveLocked() {
return errors.New("connection is unavailable")
}
cc.cc.streamsReserved++
return nil
}
func (cc netHTTPClientConn) Release() {
defer cc.cc.maybeCallStateHook()
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
// We don't complain if streamsReserved is 0.
//
// This is consistent with RoundTrip: both Release and RoundTrip will
// consume a reservation iff one exists.
if cc.cc.streamsReserved > 0 {
cc.cc.streamsReserved--
}
}
func (cc netHTTPClientConn) Available() int {
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
return cc.cc.availableLocked()
}
func (cc netHTTPClientConn) InFlight() int {
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
return cc.cc.currentRequestCountLocked()
}
func (cc *ClientConn) maybeCallStateHook() {
if cc.internalStateHook != nil {
cc.internalStateHook()
}
}
func (t *Transport) idleConnTimeout() time.Duration { func (t *Transport) idleConnTimeout() time.Duration {
// to keep things backwards compatible, we use non-zero values of // to keep things backwards compatible, we use non-zero values of
// IdleConnTimeout, followed by using the IdleConnTimeout on the underlying // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying
@ -3228,7 +3380,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
cc.mu.Lock() cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() { if ci.WasIdle && !cc.lastActive.IsZero() {
ci.IdleTime = cc.t.timeSince(cc.lastActive) ci.IdleTime = time.Since(cc.lastActive)
} }
cc.mu.Unlock() cc.mu.Unlock()

View File

@ -42,6 +42,8 @@ type OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream. // PusherID names the stream that pushed the newly opened stream.
PusherID uint32 PusherID uint32
// priority is used to set the priority of the newly opened stream.
priority PriorityParam
} }
// FrameWriteRequest is a request to write a frame. // FrameWriteRequest is a request to write a frame.
@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
} }
// writeQueue is used by implementations of WriteScheduler. // writeQueue is used by implementations of WriteScheduler.
//
// Each writeQueue contains a queue of FrameWriteRequests, meant to store all
// FrameWriteRequests associated with a given stream. This is implemented as a
// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done
// by incrementing currPos of currQueue. Adding an item is done by appending it
// to the nextQueue. If currQueue is empty when trying to remove an item, we
// can swap currQueue and nextQueue to remedy the situation.
// This two-stage queue is analogous to the use of two lists in Okasaki's
// purely functional queue but without the overhead of reversing the list when
// swapping stages.
//
// writeQueue also contains prev and next, this can be used by implementations
// of WriteScheduler to construct data structures that represent the order of
// writing between different streams (e.g. circular linked list).
type writeQueue struct { type writeQueue struct {
s []FrameWriteRequest currQueue []FrameWriteRequest
nextQueue []FrameWriteRequest
currPos int
prev, next *writeQueue prev, next *writeQueue
} }
func (q *writeQueue) empty() bool { return len(q.s) == 0 } func (q *writeQueue) empty() bool {
return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0
}
func (q *writeQueue) push(wr FrameWriteRequest) { func (q *writeQueue) push(wr FrameWriteRequest) {
q.s = append(q.s, wr) q.nextQueue = append(q.nextQueue, wr)
} }
func (q *writeQueue) shift() FrameWriteRequest { func (q *writeQueue) shift() FrameWriteRequest {
if len(q.s) == 0 { if q.empty() {
panic("invalid use of queue") panic("invalid use of queue")
} }
wr := q.s[0] if q.currPos >= len(q.currQueue) {
// TODO: less copy-happy queue. q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0]
copy(q.s, q.s[1:]) }
q.s[len(q.s)-1] = FrameWriteRequest{} wr := q.currQueue[q.currPos]
q.s = q.s[:len(q.s)-1] q.currQueue[q.currPos] = FrameWriteRequest{}
q.currPos++
return wr return wr
} }
func (q *writeQueue) peek() *FrameWriteRequest {
if q.currPos < len(q.currQueue) {
return &q.currQueue[q.currPos]
}
if len(q.nextQueue) > 0 {
return &q.nextQueue[0]
}
return nil
}
// consume consumes up to n bytes from q.s[0]. If the frame is // consume consumes up to n bytes from q.s[0]. If the frame is
// entirely consumed, it is removed from the queue. If the frame // entirely consumed, it is removed from the queue. If the frame
// is partially consumed, the frame is kept with the consumed // is partially consumed, the frame is kept with the consumed
// bytes removed. Returns true iff any bytes were consumed. // bytes removed. Returns true iff any bytes were consumed.
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
if len(q.s) == 0 { if q.empty() {
return FrameWriteRequest{}, false return FrameWriteRequest{}, false
} }
consumed, rest, numresult := q.s[0].Consume(n) consumed, rest, numresult := q.peek().Consume(n)
switch numresult { switch numresult {
case 0: case 0:
return FrameWriteRequest{}, false return FrameWriteRequest{}, false
case 1: case 1:
q.shift() q.shift()
case 2: case 2:
q.s[0] = rest *q.peek() = rest
} }
return consumed, true return consumed, true
} }
@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue
// put inserts an unused writeQueue into the pool. // put inserts an unused writeQueue into the pool.
func (p *writeQueuePool) put(q *writeQueue) { func (p *writeQueuePool) put(q *writeQueue) {
for i := range q.s { for i := range q.currQueue {
q.s[i] = FrameWriteRequest{} q.currQueue[i] = FrameWriteRequest{}
} }
q.s = q.s[:0] for i := range q.nextQueue {
q.nextQueue[i] = FrameWriteRequest{}
}
q.currQueue = q.currQueue[:0]
q.nextQueue = q.nextQueue[:0]
q.currPos = 0
*p = append(*p, q) *p = append(*p, q)
} }

View File

@ -11,7 +11,7 @@ import (
) )
// RFC 7540, Section 5.3.5: the default weight is 16. // RFC 7540, Section 5.3.5: the default weight is 16.
const priorityDefaultWeight = 15 // 16 = 15 + 1 const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. // PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type PriorityWriteSchedulerConfig struct { type PriorityWriteSchedulerConfig struct {
@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
} }
} }
ws := &priorityWriteScheduler{ ws := &priorityWriteSchedulerRFC7540{
nodes: make(map[uint32]*priorityNode), nodes: make(map[uint32]*priorityNodeRFC7540),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree, maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree, maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
return ws return ws
} }
type priorityNodeState int type priorityNodeStateRFC7540 int
const ( const (
priorityNodeOpen priorityNodeState = iota priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
priorityNodeClosed priorityNodeClosedRFC7540
priorityNodeIdle priorityNodeIdleRFC7540
) )
// priorityNode is a node in an HTTP/2 priority tree. // priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID. // Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3. // See RFC 7540, Section 5.3.
type priorityNode struct { type priorityNodeRFC7540 struct {
q writeQueue // queue of pending frames to write q writeQueue // queue of pending frames to write
id uint32 // id of the stream, or 0 for the root of the tree id uint32 // id of the stream, or 0 for the root of the tree
weight uint8 // the actual weight is weight+1, so the value is in [1,256] weight uint8 // the actual weight is weight+1, so the value is in [1,256]
state priorityNodeState // open | closed | idle state priorityNodeStateRFC7540 // open | closed | idle
bytes int64 // number of bytes written by this node, or 0 if closed bytes int64 // number of bytes written by this node, or 0 if closed
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree. // These links form the priority tree.
parent *priorityNode parent *priorityNodeRFC7540
kids *priorityNode // start of the kids list kids *priorityNodeRFC7540 // start of the kids list
prev, next *priorityNode // doubly-linked list of siblings prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
} }
func (n *priorityNode) setParent(parent *priorityNode) { func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
if n == parent { if n == parent {
panic("setParent to self") panic("setParent to self")
} }
@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) {
} }
} }
func (n *priorityNode) addBytes(b int64) { func (n *priorityNodeRFC7540) addBytes(b int64) {
n.bytes += b n.bytes += b
for ; n != nil; n = n.parent { for ; n != nil; n = n.parent {
n.subtreeBytes += b n.subtreeBytes += b
@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) {
// //
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node). // if any ancestor p of n is still open (ignoring the root node).
func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
if !n.q.empty() && f(n, openParent) { if !n.q.empty() && f(n, openParent) {
return true return true
} }
@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
// Don't consider the root "open" when updating openParent since // Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames). // we can't send data frames on the root stream (only control frames).
if n.id != 0 { if n.id != 0 {
openParent = openParent || (n.state == priorityNodeOpen) openParent = openParent || (n.state == priorityNodeOpenRFC7540)
} }
// Common case: only one kid or all kids have the same weight. // Common case: only one kid or all kids have the same weight.
@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
*tmp = append(*tmp, n.kids) *tmp = append(*tmp, n.kids)
n.kids.setParent(nil) n.kids.setParent(nil)
} }
sort.Sort(sortPriorityNodeSiblings(*tmp)) sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- { for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
} }
@ -207,15 +207,15 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
return false return false
} }
type sortPriorityNodeSiblings []*priorityNode type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
func (z sortPriorityNodeSiblings) Len() int { return len(z) } func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
func (z sortPriorityNodeSiblings) Less(i, k int) bool { func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight. // Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4. // See sections 5.3.2 and 5.3.4.
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes)
wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes)
if bi == 0 && bk == 0 { if bi == 0 && bk == 0 {
return wi >= wk return wi >= wk
} }
@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool {
return bi/bk <= wi/wk return bi/bk <= wi/wk
} }
type priorityWriteScheduler struct { type priorityWriteSchedulerRFC7540 struct {
// root is the root of the priority tree, where root.id = 0. // root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream. // The root queues control frames that are not associated with any stream.
root priorityNode root priorityNodeRFC7540
// nodes maps stream ids to priority tree nodes. // nodes maps stream ids to priority tree nodes.
nodes map[uint32]*priorityNode nodes map[uint32]*priorityNodeRFC7540
// maxID is the maximum stream id in nodes. // maxID is the maximum stream id in nodes.
maxID uint32 maxID uint32
@ -239,7 +239,7 @@ type priorityWriteScheduler struct {
// lists of nodes that have been closed or are idle, but are kept in // lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either // the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
closedNodes, idleNodes []*priorityNode closedNodes, idleNodes []*priorityNodeRFC7540
// From the config. // From the config.
maxClosedNodesInTree int maxClosedNodesInTree int
@ -248,19 +248,19 @@ type priorityWriteScheduler struct {
enableWriteThrottle bool enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
tmp []*priorityNode tmp []*priorityNodeRFC7540
// pool of empty queues for reuse. // pool of empty queues for reuse.
queuePool writeQueuePool queuePool writeQueuePool
} }
func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed. // The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil { if curr := ws.nodes[streamID]; curr != nil {
if curr.state != priorityNodeIdle { if curr.state != priorityNodeIdleRFC7540 {
panic(fmt.Sprintf("stream %d already opened", streamID)) panic(fmt.Sprintf("stream %d already opened", streamID))
} }
curr.state = priorityNodeOpen curr.state = priorityNodeOpenRFC7540
return return
} }
@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
if parent == nil { if parent == nil {
parent = &ws.root parent = &ws.root
} }
n := &priorityNode{ n := &priorityNodeRFC7540{
q: *ws.queuePool.get(), q: *ws.queuePool.get(),
id: streamID, id: streamID,
weight: priorityDefaultWeight, weight: priorityDefaultWeightRFC7540,
state: priorityNodeOpen, state: priorityNodeOpenRFC7540,
} }
n.setParent(parent) n.setParent(parent)
ws.nodes[streamID] = n ws.nodes[streamID] = n
@ -285,24 +285,23 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
} }
} }
func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
if streamID == 0 { if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0") panic("violation of WriteScheduler interface: cannot close stream 0")
} }
if ws.nodes[streamID] == nil { if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
} }
if ws.nodes[streamID].state != priorityNodeOpen { if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
} }
n := ws.nodes[streamID] n := ws.nodes[streamID]
n.state = priorityNodeClosed n.state = priorityNodeClosedRFC7540
n.addBytes(-n.bytes) n.addBytes(-n.bytes)
q := n.q q := n.q
ws.queuePool.put(&q) ws.queuePool.put(&q)
n.q.s = nil
if ws.maxClosedNodesInTree > 0 { if ws.maxClosedNodesInTree > 0 {
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
} else { } else {
@ -310,7 +309,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
} }
} }
func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
if streamID == 0 { if streamID == 0 {
panic("adjustPriority on root") panic("adjustPriority on root")
} }
@ -324,11 +323,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
return return
} }
ws.maxID = streamID ws.maxID = streamID
n = &priorityNode{ n = &priorityNodeRFC7540{
q: *ws.queuePool.get(), q: *ws.queuePool.get(),
id: streamID, id: streamID,
weight: priorityDefaultWeight, weight: priorityDefaultWeightRFC7540,
state: priorityNodeIdle, state: priorityNodeIdleRFC7540,
} }
n.setParent(&ws.root) n.setParent(&ws.root)
ws.nodes[streamID] = n ws.nodes[streamID] = n
@ -340,7 +339,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
parent := ws.nodes[priority.StreamDep] parent := ws.nodes[priority.StreamDep]
if parent == nil { if parent == nil {
n.setParent(&ws.root) n.setParent(&ws.root)
n.weight = priorityDefaultWeight n.weight = priorityDefaultWeightRFC7540
return return
} }
@ -381,8 +380,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
n.weight = priority.Weight n.weight = priority.Weight
} }
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
var n *priorityNode var n *priorityNodeRFC7540
if wr.isControl() { if wr.isControl() {
n = &ws.root n = &ws.root
} else { } else {
@ -401,8 +400,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
n.q.push(wr) n.q.push(wr)
} }
func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
limit := int32(math.MaxInt32) limit := int32(math.MaxInt32)
if openParent { if openParent {
limit = ws.writeThrottleLimit limit = ws.writeThrottleLimit
@ -428,7 +427,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
return wr, ok return wr, ok
} }
func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
if maxSize == 0 { if maxSize == 0 {
return return
} }
@ -442,7 +441,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
*list = append(*list, n) *list = append(*list, n)
} }
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
for n.kids != nil { for n.kids != nil {
n.kids.setParent(n.parent) n.kids.setParent(n.parent)
} }

View File

@ -0,0 +1,224 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"math"
)
type streamMetadata struct {
location *writeQueue
priority PriorityParam
}
type priorityWriteSchedulerRFC9218 struct {
// control contains control frames (SETTINGS, PING, etc.).
control writeQueue
// heads contain the head of a circular list of streams.
// We put these heads within a nested array that represents urgency and
// incremental, as defined in
// https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
// 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
heads [8][2]*writeQueue
// streams contains a mapping between each stream ID and their metadata, so
// we can quickly locate them when needing to, for example, adjust their
// priority.
streams map[uint32]streamMetadata
// queuePool are empty queues for reuse.
queuePool writeQueuePool
// prioritizeIncremental is used to determine whether we should prioritize
// incremental streams or not, when urgency is the same in a given Pop()
// call.
prioritizeIncremental bool
// priorityUpdateBuf is used to buffer the most recent PRIORITY_UPDATE we
// receive per https://www.rfc-editor.org/rfc/rfc9218.html#name-the-priority_update-frame.
priorityUpdateBuf struct {
// streamID being 0 means that the buffer is empty. This is a safe
// assumption as PRIORITY_UPDATE for stream 0 is a PROTOCOL_ERROR.
streamID uint32
priority PriorityParam
}
}
func newPriorityWriteSchedulerRFC9218() WriteScheduler {
ws := &priorityWriteSchedulerRFC9218{
streams: make(map[uint32]streamMetadata),
}
return ws
}
func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
if ws.streams[streamID].location != nil {
panic(fmt.Errorf("stream %d already opened", streamID))
}
if streamID == ws.priorityUpdateBuf.streamID {
ws.priorityUpdateBuf.streamID = 0
opt.priority = ws.priorityUpdateBuf.priority
}
q := ws.queuePool.get()
ws.streams[streamID] = streamMetadata{
location: q,
priority: opt.priority,
}
u, i := opt.priority.urgency, opt.priority.incremental
if ws.heads[u][i] == nil {
ws.heads[u][i] = q
q.next = q
q.prev = q
} else {
// Queues are stored in a ring.
// Insert the new stream before ws.head, putting it at the end of the list.
q.prev = ws.heads[u][i].prev
q.next = ws.heads[u][i]
q.prev.next = q
q.next.prev = q
}
}
func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
metadata := ws.streams[streamID]
q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
if q == nil {
return
}
if q.next == q {
// This was the only open stream.
ws.heads[u][i] = nil
} else {
q.prev.next = q.next
q.next.prev = q.prev
if ws.heads[u][i] == q {
ws.heads[u][i] = q.next
}
}
delete(ws.streams, streamID)
ws.queuePool.put(q)
}
func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
metadata := ws.streams[streamID]
q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
if q == nil {
ws.priorityUpdateBuf.streamID = streamID
ws.priorityUpdateBuf.priority = priority
return
}
// Remove stream from current location.
if q.next == q {
// This was the only open stream.
ws.heads[u][i] = nil
} else {
q.prev.next = q.next
q.next.prev = q.prev
if ws.heads[u][i] == q {
ws.heads[u][i] = q.next
}
}
// Insert stream to the new queue.
u, i = priority.urgency, priority.incremental
if ws.heads[u][i] == nil {
ws.heads[u][i] = q
q.next = q
q.prev = q
} else {
// Queues are stored in a ring.
// Insert the new stream before ws.head, putting it at the end of the list.
q.prev = ws.heads[u][i].prev
q.next = ws.heads[u][i]
q.prev.next = q
q.next.prev = q
}
// Update the metadata.
ws.streams[streamID] = streamMetadata{
location: q,
priority: priority,
}
}
func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
if wr.isControl() {
ws.control.push(wr)
return
}
q := ws.streams[wr.StreamID()].location
if q == nil {
// This is a closed stream.
// wr should not be a HEADERS or DATA frame.
// We push the request onto the control queue.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
ws.control.push(wr)
return
}
q.push(wr)
}
func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
// Control and RST_STREAM frames first.
if !ws.control.empty() {
return ws.control.shift(), true
}
// On the next Pop(), we want to prioritize incremental if we prioritized
// non-incremental request of the same urgency this time. Vice-versa.
// i.e. when there are incremental and non-incremental requests at the same
// priority, we give 50% of our bandwidth to the incremental ones in
// aggregate and 50% to the first non-incremental one (since
// non-incremental streams do not use round-robin writes).
ws.prioritizeIncremental = !ws.prioritizeIncremental
// Always prioritize lowest u (i.e. highest urgency level).
for u := range ws.heads {
for i := range ws.heads[u] {
// When we want to prioritize incremental, we try to pop i=true
// first before i=false when u is the same.
if ws.prioritizeIncremental {
i = (i + 1) % 2
}
q := ws.heads[u][i]
if q == nil {
continue
}
for {
if wr, ok := q.consume(math.MaxInt32); ok {
if i == 1 {
// For incremental streams, we update head to q.next so
// we can round-robin between multiple streams that can
// immediately benefit from partial writes.
ws.heads[u][i] = q.next
} else {
// For non-incremental streams, we try to finish one to
// completion rather than doing round-robin. However,
// we update head here so that if q.consume() is !ok
// (e.g. the stream has no more frame to consume), head
// is updated to the next q that has frames to consume
// on future iterations. This way, we do not prioritize
// writing to unavailable stream on next Pop() calls,
// preventing head-of-line blocking.
ws.heads[u][i] = q
}
return wr, true
}
q = q.next
if q == ws.heads[u][i] {
break
}
}
}
}
return FrameWriteRequest{}, false
}

View File

@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct {
} }
// newRoundRobinWriteScheduler constructs a new write scheduler. // newRoundRobinWriteScheduler constructs a new write scheduler.
// The round robin scheduler priorizes control frames // The round robin scheduler prioritizes control frames
// like SETTINGS and PING over DATA frames. // like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin // When there are no control frames to send, it performs a round-robin
// selection from the ready streams. // selection from the ready streams.

View File

@ -51,7 +51,7 @@ type EncodeHeadersParam struct {
DefaultUserAgent string DefaultUserAgent string
} }
// EncodeHeadersParam is the result of EncodeHeaders. // EncodeHeadersResult is the result of EncodeHeaders.
type EncodeHeadersResult struct { type EncodeHeadersResult struct {
HasBody bool HasBody bool
HasTrailers bool HasTrailers bool
@ -399,7 +399,7 @@ type ServerRequestResult struct {
// If the request should be rejected, this is a short string suitable for passing // If the request should be rejected, this is a short string suitable for passing
// to the http2 package's CountError function. // to the http2 package's CountError function.
// It might be a bit odd to return errors this way rather than returing an error, // It might be a bit odd to return errors this way rather than returning an error,
// but this ensures we don't forget to include a CountError reason. // but this ensures we don't forget to include a CountError reason.
InvalidReason string InvalidReason string
} }

View File

@ -297,7 +297,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter,
b = append(b, up.Username...) b = append(b, up.Username...)
b = append(b, byte(len(up.Password))) b = append(b, byte(len(up.Password)))
b = append(b, up.Password...) b = append(b, up.Password...)
// TODO(mikio): handle IO deadlines and cancelation if // TODO(mikio): handle IO deadlines and cancellation if
// necessary // necessary
if _, err := rw.Write(b); err != nil { if _, err := rw.Write(b); err != nil {
return err return err

View File

@ -142,7 +142,7 @@ func testPingPong(t *testing.T, c1, c2 net.Conn) {
} }
// testRacyRead tests that it is safe to mutate the input Read buffer // testRacyRead tests that it is safe to mutate the input Read buffer
// immediately after cancelation has occurred. // immediately after cancellation has occurred.
func testRacyRead(t *testing.T, c1, c2 net.Conn) { func testRacyRead(t *testing.T, c1, c2 net.Conn) {
go chunkedCopy(c2, rand.New(rand.NewSource(0))) go chunkedCopy(c2, rand.New(rand.NewSource(0)))
@ -170,7 +170,7 @@ func testRacyRead(t *testing.T, c1, c2 net.Conn) {
} }
// testRacyWrite tests that it is safe to mutate the input Write buffer // testRacyWrite tests that it is safe to mutate the input Write buffer
// immediately after cancelation has occurred. // immediately after cancellation has occurred.
func testRacyWrite(t *testing.T, c1, c2 net.Conn) { func testRacyWrite(t *testing.T, c1, c2 net.Conn) {
go chunkedCopy(io.Discard, c2) go chunkedCopy(io.Discard, c2)
@ -318,7 +318,7 @@ func testCloseTimeout(t *testing.T, c1, c2 net.Conn) {
defer wg.Wait() defer wg.Wait()
wg.Add(3) wg.Add(3)
// Test for cancelation upon connection closure. // Test for cancellation upon connection closure.
c1.SetDeadline(neverTimeout) c1.SetDeadline(neverTimeout)
go func() { go func() {
defer wg.Done() defer wg.Done()

View File

@ -58,8 +58,8 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
Buckets: buckets, Buckets: buckets,
} }
data.Families = make([]string, 0, len(families))
famMu.RLock() famMu.RLock()
data.Families = make([]string, 0, len(families))
for name := range families { for name := range families {
data.Families = append(data.Families, name) data.Families = append(data.Families, name)
} }
@ -508,7 +508,7 @@ const eventsHTML = `
<tr class="first"> <tr class="first">
<td class="when">{{$el.When}}</td> <td class="when">{{$el.When}}</td>
<td class="elapsed">{{$el.ElapsedTime}}</td> <td class="elapsed">{{$el.ElapsedTime}}</td>
<td>{{$el.Title}} <td>{{$el.Title}}</td>
</tr> </tr>
{{if $.Expanded}} {{if $.Expanded}}
<tr> <tr>

View File

@ -440,6 +440,7 @@ func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (er
if err != nil { if err != nil {
return err return err
} }
defer resp.Body.Close()
if resp.StatusCode != 101 { if resp.StatusCode != 101 {
return ErrBadStatus return ErrBadStatus
} }

View File

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package errgroup provides synchronization, error propagation, and Context // Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task. // cancellation for groups of goroutines working on subtasks of a common task.
// //
// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks // [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
// returning errors. // returning errors.
@ -12,8 +12,6 @@ package errgroup
import ( import (
"context" "context"
"fmt" "fmt"
"runtime"
"runtime/debug"
"sync" "sync"
) )
@ -33,10 +31,6 @@ type Group struct {
errOnce sync.Once errOnce sync.Once
err error err error
mu sync.Mutex
panicValue any // = PanicError | PanicValue; non-nil if some Group.Go goroutine panicked.
abnormal bool // some Group.Go goroutine terminated abnormally (panic or goexit).
} }
func (g *Group) done() { func (g *Group) done() {
@ -56,80 +50,47 @@ func WithContext(ctx context.Context) (*Group, context.Context) {
return &Group{cancel: cancel}, ctx return &Group{cancel: cancel}, ctx
} }
// Wait blocks until all function calls from the Go method have returned // Wait blocks until all function calls from the Go method have returned, then
// normally, then returns the first non-nil error (if any) from them. // returns the first non-nil error (if any) from them.
//
// If any of the calls panics, Wait panics with a [PanicValue];
// and if any of them calls [runtime.Goexit], Wait calls runtime.Goexit.
func (g *Group) Wait() error { func (g *Group) Wait() error {
g.wg.Wait() g.wg.Wait()
if g.cancel != nil { if g.cancel != nil {
g.cancel(g.err) g.cancel(g.err)
} }
if g.panicValue != nil {
panic(g.panicValue)
}
if g.abnormal {
runtime.Goexit()
}
return g.err return g.err
} }
// Go calls the given function in a new goroutine. // Go calls the given function in a new goroutine.
// The first call to Go must happen before a Wait.
// It blocks until the new goroutine can be added without the number of
// active goroutines in the group exceeding the configured limit.
// //
// The first call to Go must happen before a Wait.
// It blocks until the new goroutine can be added without the number of // It blocks until the new goroutine can be added without the number of
// goroutines in the group exceeding the configured limit. // goroutines in the group exceeding the configured limit.
// //
// The first goroutine in the group that returns a non-nil error, panics, or // The first goroutine in the group that returns a non-nil error will
// invokes [runtime.Goexit] will cancel the associated Context, if any. // cancel the associated Context, if any. The error will be returned
// by Wait.
func (g *Group) Go(f func() error) { func (g *Group) Go(f func() error) {
if g.sem != nil { if g.sem != nil {
g.sem <- token{} g.sem <- token{}
} }
g.add(f)
}
func (g *Group) add(f func() error) {
g.wg.Add(1) g.wg.Add(1)
go func() { go func() {
defer g.done() defer g.done()
normalReturn := false
defer func() {
if normalReturn {
return
}
v := recover()
g.mu.Lock()
defer g.mu.Unlock()
if !g.abnormal {
if g.cancel != nil {
g.cancel(g.err)
}
g.abnormal = true
}
if v != nil && g.panicValue == nil {
switch v := v.(type) {
case error:
g.panicValue = PanicError{
Recovered: v,
Stack: debug.Stack(),
}
default:
g.panicValue = PanicValue{
Recovered: v,
Stack: debug.Stack(),
}
}
}
}()
err := f() // It is tempting to propagate panics from f()
normalReturn = true // up to the goroutine that calls Wait, but
if err != nil { // it creates more problems than it solves:
// - it delays panics arbitrarily,
// making bugs harder to detect;
// - it turns f's panic stack into a mere value,
// hiding it from crash-monitoring tools;
// - it risks deadlocks that hide the panic entirely,
// if f's panic leaves the program in a state
// that prevents the Wait call from being reached.
// See #53757, #74275, #74304, #74306.
if err := f(); err != nil {
g.errOnce.Do(func() { g.errOnce.Do(func() {
g.err = err g.err = err
if g.cancel != nil { if g.cancel != nil {
@ -154,7 +115,19 @@ func (g *Group) TryGo(f func() error) bool {
} }
} }
g.add(f) g.wg.Add(1)
go func() {
defer g.done()
if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel(g.err)
}
})
}
}()
return true return true
} }
@ -171,38 +144,8 @@ func (g *Group) SetLimit(n int) {
g.sem = nil g.sem = nil
return return
} }
if len(g.sem) != 0 { if active := len(g.sem); active != 0 {
panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", active))
} }
g.sem = make(chan token, n) g.sem = make(chan token, n)
} }
// PanicError wraps an error recovered from an unhandled panic
// when calling a function passed to Go or TryGo.
type PanicError struct {
Recovered error
Stack []byte // result of call to [debug.Stack]
}
func (p PanicError) Error() string {
// A Go Error method conventionally does not include a stack dump, so omit it
// here. (Callers who care can extract it from the Stack field.)
return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
}
func (p PanicError) Unwrap() error { return p.Recovered }
// PanicValue wraps a value that does not implement the error interface,
// recovered from an unhandled panic when calling a function passed to Go or
// TryGo.
type PanicValue struct {
Recovered any
Stack []byte // result of call to [debug.Stack]
}
func (p PanicValue) String() string {
if len(p.Stack) > 0 {
return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack)
}
return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
}

View File

@ -47,7 +47,7 @@ func archInit() {
switch runtime.GOOS { switch runtime.GOOS {
case "freebsd": case "freebsd":
readARM64Registers() readARM64Registers()
case "linux", "netbsd", "openbsd": case "linux", "netbsd", "openbsd", "windows":
doinit() doinit()
default: default:
// Many platforms don't seem to allow reading these registers. // Many platforms don't seem to allow reading these registers.

View File

@ -9,31 +9,27 @@
// func getisar0() uint64 // func getisar0() uint64
TEXT ·getisar0(SB),NOSPLIT,$0-8 TEXT ·getisar0(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 0 into x0 // get Instruction Set Attributes 0 into x0
// mrs x0, ID_AA64ISAR0_EL1 = d5380600 MRS ID_AA64ISAR0_EL1, R0
WORD $0xd5380600
MOVD R0, ret+0(FP) MOVD R0, ret+0(FP)
RET RET
// func getisar1() uint64 // func getisar1() uint64
TEXT ·getisar1(SB),NOSPLIT,$0-8 TEXT ·getisar1(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 1 into x0 // get Instruction Set Attributes 1 into x0
// mrs x0, ID_AA64ISAR1_EL1 = d5380620 MRS ID_AA64ISAR1_EL1, R0
WORD $0xd5380620
MOVD R0, ret+0(FP) MOVD R0, ret+0(FP)
RET RET
// func getpfr0() uint64 // func getpfr0() uint64
TEXT ·getpfr0(SB),NOSPLIT,$0-8 TEXT ·getpfr0(SB),NOSPLIT,$0-8
// get Processor Feature Register 0 into x0 // get Processor Feature Register 0 into x0
// mrs x0, ID_AA64PFR0_EL1 = d5380400 MRS ID_AA64PFR0_EL1, R0
WORD $0xd5380400
MOVD R0, ret+0(FP) MOVD R0, ret+0(FP)
RET RET
// func getzfr0() uint64 // func getzfr0() uint64
TEXT ·getzfr0(SB),NOSPLIT,$0-8 TEXT ·getzfr0(SB),NOSPLIT,$0-8
// get SVE Feature Register 0 into x0 // get SVE Feature Register 0 into x0
// mrs x0, ID_AA64ZFR0_EL1 = d5380480 MRS ID_AA64ZFR0_EL1, R0
WORD $0xd5380480
MOVD R0, ret+0(FP) MOVD R0, ret+0(FP)
RET RET

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !linux && !netbsd && !openbsd && arm64 //go:build !linux && !netbsd && !openbsd && !windows && arm64
package cpu package cpu

42
vendor/golang.org/x/sys/cpu/cpu_windows_arm64.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright 2026 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"golang.org/x/sys/windows"
)
func doinit() {
// set HasASIMD and HasFP to true as per
// https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#base-requirements
//
// The ARM64 version of Windows always presupposes that it's running on an ARMv8 or later architecture.
// Both floating-point and NEON support are presumed to be present in hardware.
//
ARM64.HasASIMD = true
ARM64.HasFP = true
if windows.IsProcessorFeaturePresent(windows.PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) {
ARM64.HasAES = true
ARM64.HasPMULL = true
ARM64.HasSHA1 = true
ARM64.HasSHA2 = true
}
ARM64.HasSHA3 = windows.IsProcessorFeaturePresent(windows.PF_ARM_SHA3_INSTRUCTIONS_AVAILABLE)
ARM64.HasCRC32 = windows.IsProcessorFeaturePresent(windows.PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE)
ARM64.HasSHA512 = windows.IsProcessorFeaturePresent(windows.PF_ARM_SHA512_INSTRUCTIONS_AVAILABLE)
ARM64.HasATOMICS = windows.IsProcessorFeaturePresent(windows.PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE)
if windows.IsProcessorFeaturePresent(windows.PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE) {
ARM64.HasASIMDDP = true
ARM64.HasASIMDRDM = true
}
if windows.IsProcessorFeaturePresent(windows.PF_ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE) {
ARM64.HasLRCPC = true
ARM64.HasSM3 = true
}
ARM64.HasSVE = windows.IsProcessorFeaturePresent(windows.PF_ARM_SVE_INSTRUCTIONS_AVAILABLE)
ARM64.HasSVE2 = windows.IsProcessorFeaturePresent(windows.PF_ARM_SVE2_INSTRUCTIONS_AVAILABLE)
ARM64.HasJSCVT = windows.IsProcessorFeaturePresent(windows.PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE)
}

View File

@ -64,6 +64,80 @@ func initOptions() {
func archInit() { func archInit() {
// From internal/cpu
const (
// eax bits
cpuid_AVXVNNI = 1 << 4
// ecx bits
cpuid_SSE3 = 1 << 0
cpuid_PCLMULQDQ = 1 << 1
cpuid_AVX512VBMI = 1 << 1
cpuid_AVX512VBMI2 = 1 << 6
cpuid_SSSE3 = 1 << 9
cpuid_AVX512GFNI = 1 << 8
cpuid_AVX512VAES = 1 << 9
cpuid_AVX512VNNI = 1 << 11
cpuid_AVX512BITALG = 1 << 12
cpuid_FMA = 1 << 12
cpuid_AVX512VPOPCNTDQ = 1 << 14
cpuid_SSE41 = 1 << 19
cpuid_SSE42 = 1 << 20
cpuid_POPCNT = 1 << 23
cpuid_AES = 1 << 25
cpuid_OSXSAVE = 1 << 27
cpuid_AVX = 1 << 28
// "Extended Feature Flag" bits returned in EBX for CPUID EAX=0x7 ECX=0x0
cpuid_BMI1 = 1 << 3
cpuid_AVX2 = 1 << 5
cpuid_BMI2 = 1 << 8
cpuid_ERMS = 1 << 9
cpuid_AVX512F = 1 << 16
cpuid_AVX512DQ = 1 << 17
cpuid_ADX = 1 << 19
cpuid_AVX512CD = 1 << 28
cpuid_SHA = 1 << 29
cpuid_AVX512BW = 1 << 30
cpuid_AVX512VL = 1 << 31
// "Extended Feature Flag" bits returned in ECX for CPUID EAX=0x7 ECX=0x0
cpuid_AVX512_VBMI = 1 << 1
cpuid_AVX512_VBMI2 = 1 << 6
cpuid_GFNI = 1 << 8
cpuid_AVX512VPCLMULQDQ = 1 << 10
cpuid_AVX512_BITALG = 1 << 12
// edx bits
cpuid_FSRM = 1 << 4
// edx bits for CPUID 0x80000001
cpuid_RDTSCP = 1 << 27
)
// Additional constants not in internal/cpu
const (
// eax=1: edx
cpuid_SSE2 = 1 << 26
// eax=1: ecx
cpuid_CX16 = 1 << 13
cpuid_RDRAND = 1 << 30
// eax=7,ecx=0: ebx
cpuid_RDSEED = 1 << 18
cpuid_AVX512IFMA = 1 << 21
cpuid_AVX512PF = 1 << 26
cpuid_AVX512ER = 1 << 27
// eax=7,ecx=0: edx
cpuid_AVX5124VNNIW = 1 << 2
cpuid_AVX5124FMAPS = 1 << 3
cpuid_AMXBF16 = 1 << 22
cpuid_AMXTile = 1 << 24
cpuid_AMXInt8 = 1 << 25
// eax=7,ecx=1: eax
cpuid_AVX512BF16 = 1 << 5
cpuid_AVXIFMA = 1 << 23
// eax=7,ecx=1: edx
cpuid_AVXVNNIInt8 = 1 << 4
)
Initialized = true Initialized = true
maxID, _, _, _ := cpuid(0, 0) maxID, _, _, _ := cpuid(0, 0)
@ -73,90 +147,90 @@ func archInit() {
} }
_, _, ecx1, edx1 := cpuid(1, 0) _, _, ecx1, edx1 := cpuid(1, 0)
X86.HasSSE2 = isSet(26, edx1) X86.HasSSE2 = isSet(edx1, cpuid_SSE2)
X86.HasSSE3 = isSet(0, ecx1) X86.HasSSE3 = isSet(ecx1, cpuid_SSE3)
X86.HasPCLMULQDQ = isSet(1, ecx1) X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ)
X86.HasSSSE3 = isSet(9, ecx1) X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3)
X86.HasFMA = isSet(12, ecx1) X86.HasFMA = isSet(ecx1, cpuid_FMA)
X86.HasCX16 = isSet(13, ecx1) X86.HasCX16 = isSet(ecx1, cpuid_CX16)
X86.HasSSE41 = isSet(19, ecx1) X86.HasSSE41 = isSet(ecx1, cpuid_SSE41)
X86.HasSSE42 = isSet(20, ecx1) X86.HasSSE42 = isSet(ecx1, cpuid_SSE42)
X86.HasPOPCNT = isSet(23, ecx1) X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT)
X86.HasAES = isSet(25, ecx1) X86.HasAES = isSet(ecx1, cpuid_AES)
X86.HasOSXSAVE = isSet(27, ecx1) X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE)
X86.HasRDRAND = isSet(30, ecx1) X86.HasRDRAND = isSet(ecx1, cpuid_RDRAND)
var osSupportsAVX, osSupportsAVX512 bool var osSupportsAVX, osSupportsAVX512 bool
// For XGETBV, OSXSAVE bit is required and sufficient. // For XGETBV, OSXSAVE bit is required and sufficient.
if X86.HasOSXSAVE { if X86.HasOSXSAVE {
eax, _ := xgetbv() eax, _ := xgetbv()
// Check if XMM and YMM registers have OS support. // Check if XMM and YMM registers have OS support.
osSupportsAVX = isSet(1, eax) && isSet(2, eax) osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2)
if runtime.GOOS == "darwin" { if runtime.GOOS == "darwin" {
// Darwin requires special AVX512 checks, see cpu_darwin_x86.go // Darwin requires special AVX512 checks, see cpu_darwin_x86.go
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
} else { } else {
// Check if OPMASK and ZMM registers have OS support. // Check if OPMASK and ZMM registers have OS support.
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7)
} }
} }
X86.HasAVX = isSet(28, ecx1) && osSupportsAVX X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX
if maxID < 7 { if maxID < 7 {
return return
} }
eax7, ebx7, ecx7, edx7 := cpuid(7, 0) eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
X86.HasBMI1 = isSet(3, ebx7) X86.HasBMI1 = isSet(ebx7, cpuid_BMI1)
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX
X86.HasBMI2 = isSet(8, ebx7) X86.HasBMI2 = isSet(ebx7, cpuid_BMI2)
X86.HasERMS = isSet(9, ebx7) X86.HasERMS = isSet(ebx7, cpuid_ERMS)
X86.HasRDSEED = isSet(18, ebx7) X86.HasRDSEED = isSet(ebx7, cpuid_RDSEED)
X86.HasADX = isSet(19, ebx7) X86.HasADX = isSet(ebx7, cpuid_ADX)
X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension X86.HasAVX512 = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
if X86.HasAVX512 { if X86.HasAVX512 {
X86.HasAVX512F = true X86.HasAVX512F = true
X86.HasAVX512CD = isSet(28, ebx7) X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD)
X86.HasAVX512ER = isSet(27, ebx7) X86.HasAVX512ER = isSet(ebx7, cpuid_AVX512ER)
X86.HasAVX512PF = isSet(26, ebx7) X86.HasAVX512PF = isSet(ebx7, cpuid_AVX512PF)
X86.HasAVX512VL = isSet(31, ebx7) X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL)
X86.HasAVX512BW = isSet(30, ebx7) X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW)
X86.HasAVX512DQ = isSet(17, ebx7) X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ)
X86.HasAVX512IFMA = isSet(21, ebx7) X86.HasAVX512IFMA = isSet(ebx7, cpuid_AVX512IFMA)
X86.HasAVX512VBMI = isSet(1, ecx7) X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI)
X86.HasAVX5124VNNIW = isSet(2, edx7) X86.HasAVX5124VNNIW = isSet(edx7, cpuid_AVX5124VNNIW)
X86.HasAVX5124FMAPS = isSet(3, edx7) X86.HasAVX5124FMAPS = isSet(edx7, cpuid_AVX5124FMAPS)
X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ)
X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ)
X86.HasAVX512VNNI = isSet(11, ecx7) X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI)
X86.HasAVX512GFNI = isSet(8, ecx7) X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI)
X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VAES = isSet(ecx7, cpuid_AVX512VAES)
X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2)
X86.HasAVX512BITALG = isSet(12, ecx7) X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG)
} }
X86.HasAMXTile = isSet(24, edx7) X86.HasAMXTile = isSet(edx7, cpuid_AMXTile)
X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXInt8 = isSet(edx7, cpuid_AMXInt8)
X86.HasAMXBF16 = isSet(22, edx7) X86.HasAMXBF16 = isSet(edx7, cpuid_AMXBF16)
// These features depend on the second level of extended features. // These features depend on the second level of extended features.
if eax7 >= 1 { if eax7 >= 1 {
eax71, _, _, edx71 := cpuid(7, 1) eax71, _, _, edx71 := cpuid(7, 1)
if X86.HasAVX512 { if X86.HasAVX512 {
X86.HasAVX512BF16 = isSet(5, eax71) X86.HasAVX512BF16 = isSet(eax71, cpuid_AVX512BF16)
} }
if X86.HasAVX { if X86.HasAVX {
X86.HasAVXIFMA = isSet(23, eax71) X86.HasAVXIFMA = isSet(eax71, cpuid_AVXIFMA)
X86.HasAVXVNNI = isSet(4, eax71) X86.HasAVXVNNI = isSet(eax71, cpuid_AVXVNNI)
X86.HasAVXVNNIInt8 = isSet(4, edx71) X86.HasAVXVNNIInt8 = isSet(edx71, cpuid_AVXVNNIInt8)
} }
} }
} }
func isSet(bitpos uint, value uint32) bool { func isSet(hwc uint32, value uint32) bool {
return value&(1<<bitpos) != 0 return hwc&value != 0
} }

View File

@ -1,21 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.5
package plan9
import "syscall"
func fixwd() {
syscall.Fixwd()
}
func Getwd() (wd string, err error) {
return syscall.Getwd()
}
func Chdir(path string) error {
return syscall.Chdir(path)
}

View File

@ -2,22 +2,18 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !go1.5
package plan9 package plan9
import "syscall"
func fixwd() { func fixwd() {
syscall.Fixwd()
} }
func Getwd() (wd string, err error) { func Getwd() (wd string, err error) {
fd, err := open(".", O_RDONLY) return syscall.Getwd()
if err != nil {
return "", err
}
defer Close(fd)
return Fd2path(fd)
} }
func Chdir(path string) error { func Chdir(path string) error {
return chdir(path) return syscall.Chdir(path)
} }

View File

@ -38,8 +38,15 @@ func SchedSetaffinity(pid int, set *CPUSet) error {
// Zero clears the set s, so that it contains no CPUs. // Zero clears the set s, so that it contains no CPUs.
func (s *CPUSet) Zero() { func (s *CPUSet) Zero() {
clear(s[:])
}
// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
// will silently ignore any invalid CPU bits in [CPUSet] so this is an
// efficient way of resetting the CPU affinity of a process.
func (s *CPUSet) Fill() {
for i := range s { for i := range s {
s[i] = 0 s[i] = ^cpuMask(0)
} }
} }

View File

@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool {
// Zero clears the set fds. // Zero clears the set fds.
func (fds *FdSet) Zero() { func (fds *FdSet) Zero() {
for i := range fds.Bits { clear(fds.Bits[:])
fds.Bits[i] = 0
}
} }

View File

@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) {
// clear zeroes the ifreq's union field to prevent trailing garbage data from // clear zeroes the ifreq's union field to prevent trailing garbage data from
// being sent to the kernel if an ifreq is reused. // being sent to the kernel if an ifreq is reused.
func (ifr *Ifreq) clear() { func (ifr *Ifreq) clear() {
for i := range ifr.raw.Ifru { clear(ifr.raw.Ifru[:])
ifr.raw.Ifru[i] = 0
}
} }
// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as

View File

@ -6,9 +6,7 @@
package unix package unix
import ( import "unsafe"
"unsafe"
)
// ioctl itself should not be exposed directly, but additional get/set // ioctl itself should not be exposed directly, but additional get/set
// functions for specific types are permissible. // functions for specific types are permissible.
@ -28,6 +26,13 @@ func IoctlSetPointerInt(fd int, req int, value int) error {
return ioctlPtr(fd, req, unsafe.Pointer(&v)) return ioctlPtr(fd, req, unsafe.Pointer(&v))
} }
// IoctlSetString performs an ioctl operation which sets a string value
// on fd, using the specified request number.
func IoctlSetString(fd int, req int, value string) error {
bs := append([]byte(value), 0)
return ioctlPtr(fd, req, unsafe.Pointer(&bs[0]))
}
// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
// //
// To change fd's window size, the req argument should be TIOCSWINSZ. // To change fd's window size, the req argument should be TIOCSWINSZ.

View File

@ -6,9 +6,7 @@
package unix package unix
import ( import "unsafe"
"unsafe"
)
// ioctl itself should not be exposed directly, but additional get/set // ioctl itself should not be exposed directly, but additional get/set
// functions for specific types are permissible. // functions for specific types are permissible.
@ -28,6 +26,13 @@ func IoctlSetPointerInt(fd int, req uint, value int) error {
return ioctlPtr(fd, req, unsafe.Pointer(&v)) return ioctlPtr(fd, req, unsafe.Pointer(&v))
} }
// IoctlSetString performs an ioctl operation which sets a string value
// on fd, using the specified request number.
func IoctlSetString(fd int, req uint, value string) error {
bs := append([]byte(value), 0)
return ioctlPtr(fd, req, unsafe.Pointer(&bs[0]))
}
// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
// //
// To change fd's window size, the req argument should be TIOCSWINSZ. // To change fd's window size, the req argument should be TIOCSWINSZ.

View File

@ -49,6 +49,7 @@ esac
if [[ "$GOOS" = "linux" ]]; then if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system # Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run) # Files generated through docker (use $cmd so you can Ctl-C the build or run)
set -e
$cmd docker build --tag generate:$GOOS $GOOS $cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit exit

View File

@ -226,6 +226,7 @@ struct ltchars {
#include <linux/cryptouser.h> #include <linux/cryptouser.h>
#include <linux/devlink.h> #include <linux/devlink.h>
#include <linux/dm-ioctl.h> #include <linux/dm-ioctl.h>
#include <linux/elf.h>
#include <linux/errqueue.h> #include <linux/errqueue.h>
#include <linux/ethtool_netlink.h> #include <linux/ethtool_netlink.h>
#include <linux/falloc.h> #include <linux/falloc.h>
@ -255,6 +256,7 @@ struct ltchars {
#include <linux/loop.h> #include <linux/loop.h>
#include <linux/lwtunnel.h> #include <linux/lwtunnel.h>
#include <linux/magic.h> #include <linux/magic.h>
#include <linux/mei.h>
#include <linux/memfd.h> #include <linux/memfd.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mount.h> #include <linux/mount.h>
@ -349,6 +351,9 @@ struct ltchars {
#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN)
#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN)
// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info")
#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME
#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION
' '
includes_NetBSD=' includes_NetBSD='
@ -526,6 +531,7 @@ ccflags="$@"
$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
$2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ ||
$2 ~ /^O?XTABS$/ || $2 ~ /^O?XTABS$/ ||
$2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ || $2 ~ /^IN_/ ||
@ -608,7 +614,7 @@ ccflags="$@"
$2 !~ /IOC_MAGIC/ && $2 !~ /IOC_MAGIC/ &&
$2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ ||
$2 ~ /^(VM|VMADDR)_/ || $2 ~ /^(VM|VMADDR)_/ ||
$2 ~ /^IOCTL_VM_SOCKETS_/ || $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ ||
$2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^(TASKSTATS|TS)_/ ||
$2 ~ /^CGROUPSTATS_/ || $2 ~ /^CGROUPSTATS_/ ||
$2 ~ /^GENL_/ || $2 ~ /^GENL_/ ||

View File

@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI
return return
} }
// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
const minIovec = 8 const minIovec = 8
func Readv(fd int, iovs [][]byte) (n int, err error) { func Readv(fd int, iovs [][]byte) (n int, err error) {
if !darwinKernelVersionMin(11, 0, 0) {
return 0, ENOSYS
}
iovecs := make([]Iovec, 0, minIovec) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs) iovecs = appendBytes(iovecs, iovs)
n, err = readv(fd, iovecs) n, err = readv(fd, iovecs)
@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) {
} }
func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
if !darwinKernelVersionMin(11, 0, 0) {
return 0, ENOSYS
}
iovecs := make([]Iovec, 0, minIovec) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs) iovecs = appendBytes(iovecs, iovs)
n, err = preadv(fd, iovecs, offset) n, err = preadv(fd, iovecs, offset)
@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
} }
func Writev(fd int, iovs [][]byte) (n int, err error) { func Writev(fd int, iovs [][]byte) (n int, err error) {
if !darwinKernelVersionMin(11, 0, 0) {
return 0, ENOSYS
}
iovecs := make([]Iovec, 0, minIovec) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs) iovecs = appendBytes(iovecs, iovs)
if raceenabled { if raceenabled {
@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) {
} }
func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
if !darwinKernelVersionMin(11, 0, 0) {
return 0, ENOSYS
}
iovecs := make([]Iovec, 0, minIovec) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs) iovecs = appendBytes(iovecs, iovs)
if raceenabled { if raceenabled {
@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) {
} }
} }
func darwinMajorMinPatch() (maj, min, patch int, err error) { //sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
var un Utsname
err = Uname(&un)
if err != nil {
return
}
var mmp [3]int
c := 0
Loop:
for _, b := range un.Release[:] {
switch {
case b >= '0' && b <= '9':
mmp[c] = 10*mmp[c] + int(b-'0')
case b == '.':
c++
if c > 2 {
return 0, 0, 0, ENOTSUP
}
case b == 0:
break Loop
default:
return 0, 0, 0, ENOTSUP
}
}
if c != 2 {
return 0, 0, 0, ENOTSUP
}
return mmp[0], mmp[1], mmp[2], nil
}
func darwinKernelVersionMin(maj, min, patch int) bool {
actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch()
if err != nil {
return false
}
return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch)
}
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)

View File

@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
// one. The kernel expects SID to be in network byte order. // one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote) copy(sa.raw[8:14], sa.Remote)
for i := 14; i < 14+IFNAMSIZ; i++ { clear(sa.raw[14 : 14+IFNAMSIZ])
sa.raw[i] = 0
}
copy(sa.raw[14:], sa.Dev) copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
} }
@ -2645,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error)
//sys Mseal(b []byte, flags uint) (err error) //sys Mseal(b []byte, flags uint) (err error)
//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY
func SetMemPolicy(mode int, mask *CPUSet) error {
return setMemPolicy(mode, mask, _CPU_SETSIZE)
}

View File

@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
return Statvfs1(path, buf, ST_WAIT) return Statvfs1(path, buf, ST_WAIT)
} }
func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
var (
_p0 unsafe.Pointer
bufsize uintptr
)
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
}
r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
n = int(r0)
if e1 != 0 {
err = e1
}
return
}
/* /*
* Exposed directly * Exposed directly
*/ */

View File

@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Kill(pid int, signum syscall.Signal) (err error) //sys Kill(pid int, signum syscall.Signal) (err error)
//sys Lchown(path string, uid int, gid int) (err error) //sys Lchown(path string, uid int, gid int) (err error)
//sys Link(path string, link string) (err error) //sys Link(path string, link string) (err error)
//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten //sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen
//sys Lstat(path string, stat *Stat_t) (err error) //sys Lstat(path string, stat *Stat_t) (err error)
//sys Madvise(b []byte, advice int) (err error) //sys Madvise(b []byte, advice int) (err error)
//sys Mkdir(path string, mode uint32) (err error) //sys Mkdir(path string, mode uint32) (err error)
@ -1052,14 +1052,6 @@ func IoctlSetIntRetInt(fd int, req int, arg int) (int, error) {
return ioctlRet(fd, req, uintptr(arg)) return ioctlRet(fd, req, uintptr(arg))
} }
func IoctlSetString(fd int, req int, val string) error {
bs := make([]byte, len(val)+1)
copy(bs[:len(bs)-1], val)
err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0]))
runtime.KeepAlive(&bs[0])
return err
}
// Lifreq Helpers // Lifreq Helpers
func (l *Lifreq) SetName(name string) error { func (l *Lifreq) SetName(name string) error {

View File

@ -367,7 +367,9 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
iov[0].SetLen(len(p)) iov[0].SetLen(len(p))
} }
var rsa RawSockaddrAny var rsa RawSockaddrAny
n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa) if n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa); err != nil {
return
}
// source address is only specified if the socket is unconnected // source address is only specified if the socket is unconnected
if rsa.Addr.Family != AF_UNSPEC { if rsa.Addr.Family != AF_UNSPEC {
from, err = anyToSockaddr(fd, &rsa) from, err = anyToSockaddr(fd, &rsa)
@ -389,8 +391,10 @@ func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn in
} }
} }
var rsa RawSockaddrAny var rsa RawSockaddrAny
n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa) if n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa); err != nil {
if err == nil && rsa.Addr.Family != AF_UNSPEC { return
}
if rsa.Addr.Family != AF_UNSPEC {
from, err = anyToSockaddr(fd, &rsa) from, err = anyToSockaddr(fd, &rsa)
} }
return return

View File

@ -319,6 +319,7 @@ const (
AUDIT_INTEGRITY_POLICY_RULE = 0x70f AUDIT_INTEGRITY_POLICY_RULE = 0x70f
AUDIT_INTEGRITY_RULE = 0x70d AUDIT_INTEGRITY_RULE = 0x70d
AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_INTEGRITY_STATUS = 0x70a
AUDIT_INTEGRITY_USERSPACE = 0x710
AUDIT_IPC = 0x517 AUDIT_IPC = 0x517
AUDIT_IPC_SET_PERM = 0x51f AUDIT_IPC_SET_PERM = 0x51f
AUDIT_IPE_ACCESS = 0x58c AUDIT_IPE_ACCESS = 0x58c
@ -327,6 +328,8 @@ const (
AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL = 0x7d0
AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERNEL_OTHER = 0x524
AUDIT_KERN_MODULE = 0x532 AUDIT_KERN_MODULE = 0x532
AUDIT_LANDLOCK_ACCESS = 0x58f
AUDIT_LANDLOCK_DOMAIN = 0x590
AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_FEATURE = 0x1
AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_KERN_ANOM_MSG = 0x707
AUDIT_LAST_USER_MSG = 0x4af AUDIT_LAST_USER_MSG = 0x4af
@ -491,6 +494,7 @@ const (
BPF_F_BEFORE = 0x8 BPF_F_BEFORE = 0x8
BPF_F_ID = 0x20 BPF_F_ID = 0x20
BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_NETFILTER_IP_DEFRAG = 0x1
BPF_F_PREORDER = 0x40
BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1
BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REDIRECT_FLAGS = 0x19
BPF_F_REPLACE = 0x4 BPF_F_REPLACE = 0x4
@ -527,6 +531,7 @@ const (
BPF_LDX = 0x1 BPF_LDX = 0x1
BPF_LEN = 0x80 BPF_LEN = 0x80
BPF_LL_OFF = -0x200000 BPF_LL_OFF = -0x200000
BPF_LOAD_ACQ = 0x100
BPF_LSH = 0x60 BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1 BPF_MAJOR_VERSION = 0x1
BPF_MAXINSNS = 0x1000 BPF_MAXINSNS = 0x1000
@ -554,6 +559,7 @@ const (
BPF_RET = 0x6 BPF_RET = 0x6
BPF_RSH = 0x70 BPF_RSH = 0x70
BPF_ST = 0x2 BPF_ST = 0x2
BPF_STORE_REL = 0x110
BPF_STX = 0x3 BPF_STX = 0x3
BPF_SUB = 0x10 BPF_SUB = 0x10
BPF_TAG_SIZE = 0x8 BPF_TAG_SIZE = 0x8
@ -843,24 +849,90 @@ const (
DM_UUID_FLAG = 0x4000 DM_UUID_FLAG = 0x4000
DM_UUID_LEN = 0x81 DM_UUID_LEN = 0x81
DM_VERSION = 0xc138fd00 DM_VERSION = 0xc138fd00
DM_VERSION_EXTRA = "-ioctl (2023-03-01)" DM_VERSION_EXTRA = "-ioctl (2025-04-28)"
DM_VERSION_MAJOR = 0x4 DM_VERSION_MAJOR = 0x4
DM_VERSION_MINOR = 0x30 DM_VERSION_MINOR = 0x32
DM_VERSION_PATCHLEVEL = 0x0 DM_VERSION_PATCHLEVEL = 0x0
DT_ADDRRNGHI = 0x6ffffeff
DT_ADDRRNGLO = 0x6ffffe00
DT_BLK = 0x6 DT_BLK = 0x6
DT_CHR = 0x2 DT_CHR = 0x2
DT_DEBUG = 0x15
DT_DIR = 0x4 DT_DIR = 0x4
DT_ENCODING = 0x20
DT_FIFO = 0x1 DT_FIFO = 0x1
DT_FINI = 0xd
DT_FLAGS_1 = 0x6ffffffb
DT_GNU_HASH = 0x6ffffef5
DT_HASH = 0x4
DT_HIOS = 0x6ffff000
DT_HIPROC = 0x7fffffff
DT_INIT = 0xc
DT_JMPREL = 0x17
DT_LNK = 0xa DT_LNK = 0xa
DT_LOOS = 0x6000000d
DT_LOPROC = 0x70000000
DT_NEEDED = 0x1
DT_NULL = 0x0
DT_PLTGOT = 0x3
DT_PLTREL = 0x14
DT_PLTRELSZ = 0x2
DT_REG = 0x8 DT_REG = 0x8
DT_REL = 0x11
DT_RELA = 0x7
DT_RELACOUNT = 0x6ffffff9
DT_RELAENT = 0x9
DT_RELASZ = 0x8
DT_RELCOUNT = 0x6ffffffa
DT_RELENT = 0x13
DT_RELSZ = 0x12
DT_RPATH = 0xf
DT_SOCK = 0xc DT_SOCK = 0xc
DT_SONAME = 0xe
DT_STRSZ = 0xa
DT_STRTAB = 0x5
DT_SYMBOLIC = 0x10
DT_SYMENT = 0xb
DT_SYMTAB = 0x6
DT_TEXTREL = 0x16
DT_UNKNOWN = 0x0 DT_UNKNOWN = 0x0
DT_VALRNGHI = 0x6ffffdff
DT_VALRNGLO = 0x6ffffd00
DT_VERDEF = 0x6ffffffc
DT_VERDEFNUM = 0x6ffffffd
DT_VERNEED = 0x6ffffffe
DT_VERNEEDNUM = 0x6fffffff
DT_VERSYM = 0x6ffffff0
DT_WHT = 0xe DT_WHT = 0xe
ECHO = 0x8 ECHO = 0x8
ECRYPTFS_SUPER_MAGIC = 0xf15f ECRYPTFS_SUPER_MAGIC = 0xf15f
EFD_SEMAPHORE = 0x1 EFD_SEMAPHORE = 0x1
EFIVARFS_MAGIC = 0xde5e81e4 EFIVARFS_MAGIC = 0xde5e81e4
EFS_SUPER_MAGIC = 0x414a53 EFS_SUPER_MAGIC = 0x414a53
EI_CLASS = 0x4
EI_DATA = 0x5
EI_MAG0 = 0x0
EI_MAG1 = 0x1
EI_MAG2 = 0x2
EI_MAG3 = 0x3
EI_NIDENT = 0x10
EI_OSABI = 0x7
EI_PAD = 0x8
EI_VERSION = 0x6
ELFCLASS32 = 0x1
ELFCLASS64 = 0x2
ELFCLASSNONE = 0x0
ELFCLASSNUM = 0x3
ELFDATA2LSB = 0x1
ELFDATA2MSB = 0x2
ELFDATANONE = 0x0
ELFMAG = "\177ELF"
ELFMAG0 = 0x7f
ELFMAG1 = 'E'
ELFMAG2 = 'L'
ELFMAG3 = 'F'
ELFOSABI_LINUX = 0x3
ELFOSABI_NONE = 0x0
EM_386 = 0x3 EM_386 = 0x3
EM_486 = 0x6 EM_486 = 0x6
EM_68K = 0x4 EM_68K = 0x4
@ -936,11 +1008,10 @@ const (
EPOLL_CTL_MOD = 0x3 EPOLL_CTL_MOD = 0x3
EPOLL_IOC_TYPE = 0x8a EPOLL_IOC_TYPE = 0x8a
EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2
ESP_V4_FLOW = 0xa
ESP_V6_FLOW = 0xc
ETHER_FLOW = 0x12
ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_BUSINFO_LEN = 0x20
ETHTOOL_EROMVERS_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20
ETHTOOL_FAMILY_NAME = "ethtool"
ETHTOOL_FAMILY_VERSION = 0x1
ETHTOOL_FEC_AUTO = 0x2 ETHTOOL_FEC_AUTO = 0x2
ETHTOOL_FEC_BASER = 0x10 ETHTOOL_FEC_BASER = 0x10
ETHTOOL_FEC_LLRS = 0x20 ETHTOOL_FEC_LLRS = 0x20
@ -1147,14 +1218,24 @@ const (
ETH_P_WCCP = 0x883e ETH_P_WCCP = 0x883e
ETH_P_X25 = 0x805 ETH_P_X25 = 0x805
ETH_P_XDSA = 0xf8 ETH_P_XDSA = 0xf8
ET_CORE = 0x4
ET_DYN = 0x3
ET_EXEC = 0x2
ET_HIPROC = 0xffff
ET_LOPROC = 0xff00
ET_NONE = 0x0
ET_REL = 0x1
EV_ABS = 0x3 EV_ABS = 0x3
EV_CNT = 0x20 EV_CNT = 0x20
EV_CURRENT = 0x1
EV_FF = 0x15 EV_FF = 0x15
EV_FF_STATUS = 0x17 EV_FF_STATUS = 0x17
EV_KEY = 0x1 EV_KEY = 0x1
EV_LED = 0x11 EV_LED = 0x11
EV_MAX = 0x1f EV_MAX = 0x1f
EV_MSC = 0x4 EV_MSC = 0x4
EV_NONE = 0x0
EV_NUM = 0x2
EV_PWR = 0x16 EV_PWR = 0x16
EV_REL = 0x2 EV_REL = 0x2
EV_REP = 0x14 EV_REP = 0x14
@ -1203,13 +1284,18 @@ const (
FAN_DENY = 0x2 FAN_DENY = 0x2
FAN_ENABLE_AUDIT = 0x40 FAN_ENABLE_AUDIT = 0x40
FAN_EPIDFD = -0x2 FAN_EPIDFD = -0x2
FAN_ERRNO_BITS = 0x8
FAN_ERRNO_MASK = 0xff
FAN_ERRNO_SHIFT = 0x18
FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID = 0x3
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_ERROR = 0x5
FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_INFO_TYPE_FID = 0x1
FAN_EVENT_INFO_TYPE_MNT = 0x7
FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc
FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa
FAN_EVENT_INFO_TYPE_PIDFD = 0x4 FAN_EVENT_INFO_TYPE_PIDFD = 0x4
FAN_EVENT_INFO_TYPE_RANGE = 0x6
FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_METADATA_LEN = 0x18
FAN_EVENT_ON_CHILD = 0x8000000 FAN_EVENT_ON_CHILD = 0x8000000
FAN_FS_ERROR = 0x8000 FAN_FS_ERROR = 0x8000
@ -1224,9 +1310,12 @@ const (
FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORED_SURV_MODIFY = 0x40
FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_IGNORE_SURV = 0x440
FAN_MARK_INODE = 0x0 FAN_MARK_INODE = 0x0
FAN_MARK_MNTNS = 0x110
FAN_MARK_MOUNT = 0x10 FAN_MARK_MOUNT = 0x10
FAN_MARK_ONLYDIR = 0x8 FAN_MARK_ONLYDIR = 0x8
FAN_MARK_REMOVE = 0x2 FAN_MARK_REMOVE = 0x2
FAN_MNT_ATTACH = 0x1000000
FAN_MNT_DETACH = 0x2000000
FAN_MODIFY = 0x2 FAN_MODIFY = 0x2
FAN_MOVE = 0xc0 FAN_MOVE = 0xc0
FAN_MOVED_FROM = 0x40 FAN_MOVED_FROM = 0x40
@ -1240,6 +1329,7 @@ const (
FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC = 0x1000
FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_EXEC_PERM = 0x40000
FAN_OPEN_PERM = 0x10000 FAN_OPEN_PERM = 0x10000
FAN_PRE_ACCESS = 0x100000
FAN_Q_OVERFLOW = 0x4000 FAN_Q_OVERFLOW = 0x4000
FAN_RENAME = 0x10000000 FAN_RENAME = 0x10000000
FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME = 0xc00
@ -1247,6 +1337,7 @@ const (
FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_DIR_FID = 0x400
FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200 FAN_REPORT_FID = 0x200
FAN_REPORT_MNT = 0x4000
FAN_REPORT_NAME = 0x800 FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80 FAN_REPORT_PIDFD = 0x80
FAN_REPORT_TARGET_FID = 0x1000 FAN_REPORT_TARGET_FID = 0x1000
@ -1266,6 +1357,7 @@ const (
FIB_RULE_PERMANENT = 0x1 FIB_RULE_PERMANENT = 0x1
FIB_RULE_UNRESOLVED = 0x4 FIB_RULE_UNRESOLVED = 0x4
FIDEDUPERANGE = 0xc0189436 FIDEDUPERANGE = 0xc0189436
FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1
FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8
FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX = "fscrypt:"
FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8
@ -1523,6 +1615,8 @@ const (
IN_OPEN = 0x20 IN_OPEN = 0x20
IN_Q_OVERFLOW = 0x4000 IN_Q_OVERFLOW = 0x4000
IN_UNMOUNT = 0x2000 IN_UNMOUNT = 0x2000
IOCTL_MEI_CONNECT_CLIENT = 0xc0104801
IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804
IPPROTO_AH = 0x33 IPPROTO_AH = 0x33
IPPROTO_BEETPH = 0x5e IPPROTO_BEETPH = 0x5e
IPPROTO_COMP = 0x6c IPPROTO_COMP = 0x6c
@ -1574,7 +1668,6 @@ const (
IPV6_DONTFRAG = 0x3e IPV6_DONTFRAG = 0x3e
IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DROP_MEMBERSHIP = 0x15
IPV6_DSTOPTS = 0x3b IPV6_DSTOPTS = 0x3b
IPV6_FLOW = 0x11
IPV6_FREEBIND = 0x4e IPV6_FREEBIND = 0x4e
IPV6_HDRINCL = 0x24 IPV6_HDRINCL = 0x24
IPV6_HOPLIMIT = 0x34 IPV6_HOPLIMIT = 0x34
@ -1625,7 +1718,6 @@ const (
IPV6_TRANSPARENT = 0x4b IPV6_TRANSPARENT = 0x4b
IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_HOPS = 0x10
IPV6_UNICAST_IF = 0x4c IPV6_UNICAST_IF = 0x4c
IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a IPV6_V6ONLY = 0x1a
IPV6_VERSION = 0x60 IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0 IPV6_VERSION_MASK = 0xf0
@ -1687,7 +1779,6 @@ const (
IP_TTL = 0x2 IP_TTL = 0x2
IP_UNBLOCK_SOURCE = 0x25 IP_UNBLOCK_SOURCE = 0x25
IP_UNICAST_IF = 0x32 IP_UNICAST_IF = 0x32
IP_USER_FLOW = 0xd
IP_XFRM_POLICY = 0x11 IP_XFRM_POLICY = 0x11
ISOFS_SUPER_MAGIC = 0x9660 ISOFS_SUPER_MAGIC = 0x9660
ISTRIP = 0x20 ISTRIP = 0x20
@ -1809,7 +1900,11 @@ const (
LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2
LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1
LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2
LANDLOCK_CREATE_RULESET_ERRATA = 0x2
LANDLOCK_CREATE_RULESET_VERSION = 0x1 LANDLOCK_CREATE_RULESET_VERSION = 0x1
LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2
LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1
LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4
LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1
LANDLOCK_SCOPE_SIGNAL = 0x2 LANDLOCK_SCOPE_SIGNAL = 0x2
LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_OFF = 0x0
@ -2259,7 +2354,167 @@ const (
NLM_F_REPLACE = 0x100 NLM_F_REPLACE = 0x100
NLM_F_REQUEST = 0x1 NLM_F_REQUEST = 0x1
NLM_F_ROOT = 0x100 NLM_F_ROOT = 0x100
NN_386_IOPERM = "LINUX"
NN_386_TLS = "LINUX"
NN_ARC_V2 = "LINUX"
NN_ARM_FPMR = "LINUX"
NN_ARM_GCS = "LINUX"
NN_ARM_HW_BREAK = "LINUX"
NN_ARM_HW_WATCH = "LINUX"
NN_ARM_PACA_KEYS = "LINUX"
NN_ARM_PACG_KEYS = "LINUX"
NN_ARM_PAC_ENABLED_KEYS = "LINUX"
NN_ARM_PAC_MASK = "LINUX"
NN_ARM_POE = "LINUX"
NN_ARM_SSVE = "LINUX"
NN_ARM_SVE = "LINUX"
NN_ARM_SYSTEM_CALL = "LINUX"
NN_ARM_TAGGED_ADDR_CTRL = "LINUX"
NN_ARM_TLS = "LINUX"
NN_ARM_VFP = "LINUX"
NN_ARM_ZA = "LINUX"
NN_ARM_ZT = "LINUX"
NN_AUXV = "CORE"
NN_FILE = "CORE"
NN_GNU_PROPERTY_TYPE_0 = "GNU"
NN_LOONGARCH_CPUCFG = "LINUX"
NN_LOONGARCH_CSR = "LINUX"
NN_LOONGARCH_HW_BREAK = "LINUX"
NN_LOONGARCH_HW_WATCH = "LINUX"
NN_LOONGARCH_LASX = "LINUX"
NN_LOONGARCH_LBT = "LINUX"
NN_LOONGARCH_LSX = "LINUX"
NN_MIPS_DSP = "LINUX"
NN_MIPS_FP_MODE = "LINUX"
NN_MIPS_MSA = "LINUX"
NN_PPC_DEXCR = "LINUX"
NN_PPC_DSCR = "LINUX"
NN_PPC_EBB = "LINUX"
NN_PPC_HASHKEYR = "LINUX"
NN_PPC_PKEY = "LINUX"
NN_PPC_PMU = "LINUX"
NN_PPC_PPR = "LINUX"
NN_PPC_SPE = "LINUX"
NN_PPC_TAR = "LINUX"
NN_PPC_TM_CDSCR = "LINUX"
NN_PPC_TM_CFPR = "LINUX"
NN_PPC_TM_CGPR = "LINUX"
NN_PPC_TM_CPPR = "LINUX"
NN_PPC_TM_CTAR = "LINUX"
NN_PPC_TM_CVMX = "LINUX"
NN_PPC_TM_CVSX = "LINUX"
NN_PPC_TM_SPR = "LINUX"
NN_PPC_VMX = "LINUX"
NN_PPC_VSX = "LINUX"
NN_PRFPREG = "CORE"
NN_PRPSINFO = "CORE"
NN_PRSTATUS = "CORE"
NN_PRXFPREG = "LINUX"
NN_RISCV_CSR = "LINUX"
NN_RISCV_TAGGED_ADDR_CTRL = "LINUX"
NN_RISCV_VECTOR = "LINUX"
NN_S390_CTRS = "LINUX"
NN_S390_GS_BC = "LINUX"
NN_S390_GS_CB = "LINUX"
NN_S390_HIGH_GPRS = "LINUX"
NN_S390_LAST_BREAK = "LINUX"
NN_S390_PREFIX = "LINUX"
NN_S390_PV_CPU_DATA = "LINUX"
NN_S390_RI_CB = "LINUX"
NN_S390_SYSTEM_CALL = "LINUX"
NN_S390_TDB = "LINUX"
NN_S390_TIMER = "LINUX"
NN_S390_TODCMP = "LINUX"
NN_S390_TODPREG = "LINUX"
NN_S390_VXRS_HIGH = "LINUX"
NN_S390_VXRS_LOW = "LINUX"
NN_SIGINFO = "CORE"
NN_TASKSTRUCT = "CORE"
NN_VMCOREDD = "LINUX"
NN_X86_SHSTK = "LINUX"
NN_X86_XSAVE_LAYOUT = "LINUX"
NN_X86_XSTATE = "LINUX"
NSFS_MAGIC = 0x6e736673 NSFS_MAGIC = 0x6e736673
NT_386_IOPERM = 0x201
NT_386_TLS = 0x200
NT_ARC_V2 = 0x600
NT_ARM_FPMR = 0x40e
NT_ARM_GCS = 0x410
NT_ARM_HW_BREAK = 0x402
NT_ARM_HW_WATCH = 0x403
NT_ARM_PACA_KEYS = 0x407
NT_ARM_PACG_KEYS = 0x408
NT_ARM_PAC_ENABLED_KEYS = 0x40a
NT_ARM_PAC_MASK = 0x406
NT_ARM_POE = 0x40f
NT_ARM_SSVE = 0x40b
NT_ARM_SVE = 0x405
NT_ARM_SYSTEM_CALL = 0x404
NT_ARM_TAGGED_ADDR_CTRL = 0x409
NT_ARM_TLS = 0x401
NT_ARM_VFP = 0x400
NT_ARM_ZA = 0x40c
NT_ARM_ZT = 0x40d
NT_AUXV = 0x6
NT_FILE = 0x46494c45
NT_GNU_PROPERTY_TYPE_0 = 0x5
NT_LOONGARCH_CPUCFG = 0xa00
NT_LOONGARCH_CSR = 0xa01
NT_LOONGARCH_HW_BREAK = 0xa05
NT_LOONGARCH_HW_WATCH = 0xa06
NT_LOONGARCH_LASX = 0xa03
NT_LOONGARCH_LBT = 0xa04
NT_LOONGARCH_LSX = 0xa02
NT_MIPS_DSP = 0x800
NT_MIPS_FP_MODE = 0x801
NT_MIPS_MSA = 0x802
NT_PPC_DEXCR = 0x111
NT_PPC_DSCR = 0x105
NT_PPC_EBB = 0x106
NT_PPC_HASHKEYR = 0x112
NT_PPC_PKEY = 0x110
NT_PPC_PMU = 0x107
NT_PPC_PPR = 0x104
NT_PPC_SPE = 0x101
NT_PPC_TAR = 0x103
NT_PPC_TM_CDSCR = 0x10f
NT_PPC_TM_CFPR = 0x109
NT_PPC_TM_CGPR = 0x108
NT_PPC_TM_CPPR = 0x10e
NT_PPC_TM_CTAR = 0x10d
NT_PPC_TM_CVMX = 0x10a
NT_PPC_TM_CVSX = 0x10b
NT_PPC_TM_SPR = 0x10c
NT_PPC_VMX = 0x100
NT_PPC_VSX = 0x102
NT_PRFPREG = 0x2
NT_PRPSINFO = 0x3
NT_PRSTATUS = 0x1
NT_PRXFPREG = 0x46e62b7f
NT_RISCV_CSR = 0x900
NT_RISCV_TAGGED_ADDR_CTRL = 0x902
NT_RISCV_VECTOR = 0x901
NT_S390_CTRS = 0x304
NT_S390_GS_BC = 0x30c
NT_S390_GS_CB = 0x30b
NT_S390_HIGH_GPRS = 0x300
NT_S390_LAST_BREAK = 0x306
NT_S390_PREFIX = 0x305
NT_S390_PV_CPU_DATA = 0x30e
NT_S390_RI_CB = 0x30d
NT_S390_SYSTEM_CALL = 0x307
NT_S390_TDB = 0x308
NT_S390_TIMER = 0x301
NT_S390_TODCMP = 0x302
NT_S390_TODPREG = 0x303
NT_S390_VXRS_HIGH = 0x30a
NT_S390_VXRS_LOW = 0x309
NT_SIGINFO = 0x53494749
NT_TASKSTRUCT = 0x4
NT_VMCOREDD = 0x700
NT_X86_SHSTK = 0x204
NT_X86_XSAVE_LAYOUT = 0x205
NT_X86_XSTATE = 0x202
OCFS2_SUPER_MAGIC = 0x7461636f OCFS2_SUPER_MAGIC = 0x7461636f
OCRNL = 0x8 OCRNL = 0x8
OFDEL = 0x80 OFDEL = 0x80
@ -2446,6 +2701,59 @@ const (
PERF_RECORD_MISC_USER = 0x2 PERF_RECORD_MISC_USER = 0x2
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
PF_ALG = 0x26
PF_APPLETALK = 0x5
PF_ASH = 0x12
PF_ATMPVC = 0x8
PF_ATMSVC = 0x14
PF_AX25 = 0x3
PF_BLUETOOTH = 0x1f
PF_BRIDGE = 0x7
PF_CAIF = 0x25
PF_CAN = 0x1d
PF_DECnet = 0xc
PF_ECONET = 0x13
PF_FILE = 0x1
PF_IB = 0x1b
PF_IEEE802154 = 0x24
PF_INET = 0x2
PF_INET6 = 0xa
PF_IPX = 0x4
PF_IRDA = 0x17
PF_ISDN = 0x22
PF_IUCV = 0x20
PF_KCM = 0x29
PF_KEY = 0xf
PF_LLC = 0x1a
PF_LOCAL = 0x1
PF_MAX = 0x2e
PF_MCTP = 0x2d
PF_MPLS = 0x1c
PF_NETBEUI = 0xd
PF_NETLINK = 0x10
PF_NETROM = 0x6
PF_NFC = 0x27
PF_PACKET = 0x11
PF_PHONET = 0x23
PF_PPPOX = 0x18
PF_QIPCRTR = 0x2a
PF_R = 0x4
PF_RDS = 0x15
PF_ROSE = 0xb
PF_ROUTE = 0x10
PF_RXRPC = 0x21
PF_SECURITY = 0xe
PF_SMC = 0x2b
PF_SNA = 0x16
PF_TIPC = 0x1e
PF_UNIX = 0x1
PF_UNSPEC = 0x0
PF_VSOCK = 0x28
PF_W = 0x2
PF_WANPIPE = 0x19
PF_X = 0x1
PF_X25 = 0x9
PF_XDP = 0x2c
PID_FS_MAGIC = 0x50494446 PID_FS_MAGIC = 0x50494446
PIPEFS_MAGIC = 0x50495045 PIPEFS_MAGIC = 0x50495045
PPPIOCGNPMODE = 0xc008744c PPPIOCGNPMODE = 0xc008744c
@ -2485,6 +2793,10 @@ const (
PR_FP_EXC_UND = 0x40000 PR_FP_EXC_UND = 0x40000
PR_FP_MODE_FR = 0x1 PR_FP_MODE_FR = 0x1
PR_FP_MODE_FRE = 0x2 PR_FP_MODE_FRE = 0x2
PR_FUTEX_HASH = 0x4e
PR_FUTEX_HASH_GET_IMMUTABLE = 0x3
PR_FUTEX_HASH_GET_SLOTS = 0x2
PR_FUTEX_HASH_SET_SLOTS = 0x1
PR_GET_AUXV = 0x41555856 PR_GET_AUXV = 0x41555856
PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_CHILD_SUBREAPER = 0x25
PR_GET_DUMPABLE = 0x3 PR_GET_DUMPABLE = 0x3
@ -2644,6 +2956,10 @@ const (
PR_TAGGED_ADDR_ENABLE = 0x1 PR_TAGGED_ADDR_ENABLE = 0x1
PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_DISABLE = 0x1f
PR_TASK_PERF_EVENTS_ENABLE = 0x20 PR_TASK_PERF_EVENTS_ENABLE = 0x20
PR_TIMER_CREATE_RESTORE_IDS = 0x4d
PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2
PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0
PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1
PR_TIMING_STATISTICAL = 0x0 PR_TIMING_STATISTICAL = 0x0
PR_TIMING_TIMESTAMP = 0x1 PR_TIMING_TIMESTAMP = 0x1
PR_TSC_ENABLE = 0x1 PR_TSC_ENABLE = 0x1
@ -2724,6 +3040,7 @@ const (
PTRACE_SETREGSET = 0x4205 PTRACE_SETREGSET = 0x4205
PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGINFO = 0x4203
PTRACE_SETSIGMASK = 0x420b PTRACE_SETSIGMASK = 0x420b
PTRACE_SET_SYSCALL_INFO = 0x4212
PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210
PTRACE_SINGLESTEP = 0x9 PTRACE_SINGLESTEP = 0x9
PTRACE_SYSCALL = 0x18 PTRACE_SYSCALL = 0x18
@ -2732,6 +3049,23 @@ const (
PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_NONE = 0x0
PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSCALL_INFO_SECCOMP = 0x3
PTRACE_TRACEME = 0x0 PTRACE_TRACEME = 0x0
PT_AARCH64_MEMTAG_MTE = 0x70000002
PT_DYNAMIC = 0x2
PT_GNU_EH_FRAME = 0x6474e550
PT_GNU_PROPERTY = 0x6474e553
PT_GNU_RELRO = 0x6474e552
PT_GNU_STACK = 0x6474e551
PT_HIOS = 0x6fffffff
PT_HIPROC = 0x7fffffff
PT_INTERP = 0x3
PT_LOAD = 0x1
PT_LOOS = 0x60000000
PT_LOPROC = 0x70000000
PT_NOTE = 0x4
PT_NULL = 0x0
PT_PHDR = 0x6
PT_SHLIB = 0x5
PT_TLS = 0x7
P_ALL = 0x0 P_ALL = 0x0
P_PGID = 0x2 P_PGID = 0x2
P_PID = 0x1 P_PID = 0x1
@ -2787,7 +3121,7 @@ const (
RTAX_UNSPEC = 0x0 RTAX_UNSPEC = 0x0
RTAX_WINDOW = 0x3 RTAX_WINDOW = 0x3
RTA_ALIGNTO = 0x4 RTA_ALIGNTO = 0x4
RTA_MAX = 0x1e RTA_MAX = 0x1f
RTCF_DIRECTSRC = 0x4000000 RTCF_DIRECTSRC = 0x4000000
RTCF_DOREDIRECT = 0x1000000 RTCF_DOREDIRECT = 0x1000000
RTCF_LOG = 0x2000000 RTCF_LOG = 0x2000000
@ -2864,10 +3198,12 @@ const (
RTM_DELACTION = 0x31 RTM_DELACTION = 0x31
RTM_DELADDR = 0x15 RTM_DELADDR = 0x15
RTM_DELADDRLABEL = 0x49 RTM_DELADDRLABEL = 0x49
RTM_DELANYCAST = 0x3d
RTM_DELCHAIN = 0x65 RTM_DELCHAIN = 0x65
RTM_DELLINK = 0x11 RTM_DELLINK = 0x11
RTM_DELLINKPROP = 0x6d RTM_DELLINKPROP = 0x6d
RTM_DELMDB = 0x55 RTM_DELMDB = 0x55
RTM_DELMULTICAST = 0x39
RTM_DELNEIGH = 0x1d RTM_DELNEIGH = 0x1d
RTM_DELNETCONF = 0x51 RTM_DELNETCONF = 0x51
RTM_DELNEXTHOP = 0x69 RTM_DELNEXTHOP = 0x69
@ -2917,11 +3253,13 @@ const (
RTM_NEWACTION = 0x30 RTM_NEWACTION = 0x30
RTM_NEWADDR = 0x14 RTM_NEWADDR = 0x14
RTM_NEWADDRLABEL = 0x48 RTM_NEWADDRLABEL = 0x48
RTM_NEWANYCAST = 0x3c
RTM_NEWCACHEREPORT = 0x60 RTM_NEWCACHEREPORT = 0x60
RTM_NEWCHAIN = 0x64 RTM_NEWCHAIN = 0x64
RTM_NEWLINK = 0x10 RTM_NEWLINK = 0x10
RTM_NEWLINKPROP = 0x6c RTM_NEWLINKPROP = 0x6c
RTM_NEWMDB = 0x54 RTM_NEWMDB = 0x54
RTM_NEWMULTICAST = 0x38
RTM_NEWNDUSEROPT = 0x44 RTM_NEWNDUSEROPT = 0x44
RTM_NEWNEIGH = 0x1c RTM_NEWNEIGH = 0x1c
RTM_NEWNEIGHTBL = 0x40 RTM_NEWNEIGHTBL = 0x40
@ -2970,6 +3308,7 @@ const (
RTPROT_NTK = 0xf RTPROT_NTK = 0xf
RTPROT_OPENR = 0x63 RTPROT_OPENR = 0x63
RTPROT_OSPF = 0xbc RTPROT_OSPF = 0xbc
RTPROT_OVN = 0x54
RTPROT_RA = 0x9 RTPROT_RA = 0x9
RTPROT_REDIRECT = 0x1 RTPROT_REDIRECT = 0x1
RTPROT_RIP = 0xbd RTPROT_RIP = 0xbd
@ -2987,11 +3326,12 @@ const (
RUSAGE_THREAD = 0x1 RUSAGE_THREAD = 0x1
RWF_APPEND = 0x10 RWF_APPEND = 0x10
RWF_ATOMIC = 0x40 RWF_ATOMIC = 0x40
RWF_DONTCACHE = 0x80
RWF_DSYNC = 0x2 RWF_DSYNC = 0x2
RWF_HIPRI = 0x1 RWF_HIPRI = 0x1
RWF_NOAPPEND = 0x20 RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8 RWF_NOWAIT = 0x8
RWF_SUPPORTED = 0x7f RWF_SUPPORTED = 0xff
RWF_SYNC = 0x4 RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0 RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3 SCHED_BATCH = 0x3
@ -3059,6 +3399,47 @@ const (
SEEK_MAX = 0x4 SEEK_MAX = 0x4
SEEK_SET = 0x0 SEEK_SET = 0x0
SELINUX_MAGIC = 0xf97cff8c SELINUX_MAGIC = 0xf97cff8c
SHF_ALLOC = 0x2
SHF_EXCLUDE = 0x8000000
SHF_EXECINSTR = 0x4
SHF_GROUP = 0x200
SHF_INFO_LINK = 0x40
SHF_LINK_ORDER = 0x80
SHF_MASKOS = 0xff00000
SHF_MASKPROC = 0xf0000000
SHF_MERGE = 0x10
SHF_ORDERED = 0x4000000
SHF_OS_NONCONFORMING = 0x100
SHF_RELA_LIVEPATCH = 0x100000
SHF_RO_AFTER_INIT = 0x200000
SHF_STRINGS = 0x20
SHF_TLS = 0x400
SHF_WRITE = 0x1
SHN_ABS = 0xfff1
SHN_COMMON = 0xfff2
SHN_HIPROC = 0xff1f
SHN_HIRESERVE = 0xffff
SHN_LIVEPATCH = 0xff20
SHN_LOPROC = 0xff00
SHN_LORESERVE = 0xff00
SHN_UNDEF = 0x0
SHT_DYNAMIC = 0x6
SHT_DYNSYM = 0xb
SHT_HASH = 0x5
SHT_HIPROC = 0x7fffffff
SHT_HIUSER = 0xffffffff
SHT_LOPROC = 0x70000000
SHT_LOUSER = 0x80000000
SHT_NOBITS = 0x8
SHT_NOTE = 0x7
SHT_NULL = 0x0
SHT_NUM = 0xc
SHT_PROGBITS = 0x1
SHT_REL = 0x9
SHT_RELA = 0x4
SHT_SHLIB = 0xa
SHT_STRTAB = 0x3
SHT_SYMTAB = 0x2
SHUT_RD = 0x0 SHUT_RD = 0x0
SHUT_RDWR = 0x2 SHUT_RDWR = 0x2
SHUT_WR = 0x1 SHUT_WR = 0x1
@ -3271,6 +3652,7 @@ const (
STATX_BTIME = 0x800 STATX_BTIME = 0x800
STATX_CTIME = 0x80 STATX_CTIME = 0x80
STATX_DIOALIGN = 0x2000 STATX_DIOALIGN = 0x2000
STATX_DIO_READ_ALIGN = 0x20000
STATX_GID = 0x10 STATX_GID = 0x10
STATX_INO = 0x100 STATX_INO = 0x100
STATX_MNT_ID = 0x1000 STATX_MNT_ID = 0x1000
@ -3284,6 +3666,16 @@ const (
STATX_UID = 0x8 STATX_UID = 0x8
STATX_WRITE_ATOMIC = 0x10000 STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000 STATX__RESERVED = 0x80000000
STB_GLOBAL = 0x1
STB_LOCAL = 0x0
STB_WEAK = 0x2
STT_COMMON = 0x5
STT_FILE = 0x4
STT_FUNC = 0x2
STT_NOTYPE = 0x0
STT_OBJECT = 0x1
STT_SECTION = 0x3
STT_TLS = 0x6
SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
SYNC_FILE_RANGE_WRITE = 0x2 SYNC_FILE_RANGE_WRITE = 0x2
@ -3322,7 +3714,7 @@ const (
TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_NAME = "TASKSTATS"
TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_GENL_VERSION = 0x1
TASKSTATS_TYPE_MAX = 0x6 TASKSTATS_TYPE_MAX = 0x6
TASKSTATS_VERSION = 0xe TASKSTATS_VERSION = 0x10
TCIFLUSH = 0x0 TCIFLUSH = 0x0
TCIOFF = 0x2 TCIOFF = 0x2
TCIOFLUSH = 0x2 TCIOFLUSH = 0x2
@ -3392,8 +3784,6 @@ const (
TCP_TX_DELAY = 0x25 TCP_TX_DELAY = 0x25
TCP_ULP = 0x1f TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12 TCP_USER_TIMEOUT = 0x12
TCP_V4_FLOW = 0x1
TCP_V6_FLOW = 0x5
TCP_WINDOW_CLAMP = 0xa TCP_WINDOW_CLAMP = 0xa
TCP_ZEROCOPY_RECEIVE = 0x23 TCP_ZEROCOPY_RECEIVE = 0x23
TFD_TIMER_ABSTIME = 0x1 TFD_TIMER_ABSTIME = 0x1
@ -3503,6 +3893,7 @@ const (
TP_STATUS_WRONG_FORMAT = 0x4 TP_STATUS_WRONG_FORMAT = 0x4
TRACEFS_MAGIC = 0x74726163 TRACEFS_MAGIC = 0x74726163
TS_COMM_LEN = 0x20 TS_COMM_LEN = 0x20
UBI_IOCECNFO = 0xc01c6f06
UDF_SUPER_MAGIC = 0x15013346 UDF_SUPER_MAGIC = 0x15013346
UDP_CORK = 0x1 UDP_CORK = 0x1
UDP_ENCAP = 0x64 UDP_ENCAP = 0x64
@ -3515,14 +3906,14 @@ const (
UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_RX = 0x66
UDP_NO_CHECK6_TX = 0x65 UDP_NO_CHECK6_TX = 0x65
UDP_SEGMENT = 0x67 UDP_SEGMENT = 0x67
UDP_V4_FLOW = 0x2
UDP_V6_FLOW = 0x6
UMOUNT_NOFOLLOW = 0x8 UMOUNT_NOFOLLOW = 0x8
USBDEVICE_SUPER_MAGIC = 0x9fa2 USBDEVICE_SUPER_MAGIC = 0x9fa2
UTIME_NOW = 0x3fffffff UTIME_NOW = 0x3fffffff
UTIME_OMIT = 0x3ffffffe UTIME_OMIT = 0x3ffffffe
V9FS_MAGIC = 0x1021997 V9FS_MAGIC = 0x1021997
VERASE = 0x2 VERASE = 0x2
VER_FLG_BASE = 0x1
VER_FLG_WEAK = 0x2
VINTR = 0x0 VINTR = 0x0
VKILL = 0x3 VKILL = 0x3
VLNEXT = 0xf VLNEXT = 0xf
@ -3559,7 +3950,7 @@ const (
WDIOS_TEMPPANIC = 0x4 WDIOS_TEMPPANIC = 0x4
WDIOS_UNKNOWN = -0x1 WDIOS_UNKNOWN = -0x1
WEXITED = 0x4 WEXITED = 0x4
WGALLOWEDIP_A_MAX = 0x3 WGALLOWEDIP_A_MAX = 0x4
WGDEVICE_A_MAX = 0x8 WGDEVICE_A_MAX = 0x8
WGPEER_A_MAX = 0xa WGPEER_A_MAX = 0xa
WG_CMD_MAX = 0x1 WG_CMD_MAX = 0x1
@ -3673,6 +4064,7 @@ const (
XDP_SHARED_UMEM = 0x1 XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7 XDP_STATISTICS = 0x7
XDP_TXMD_FLAGS_CHECKSUM = 0x2 XDP_TXMD_FLAGS_CHECKSUM = 0x2
XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4
XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TXMD_FLAGS_TIMESTAMP = 0x1
XDP_TX_METADATA = 0x2 XDP_TX_METADATA = 0x2
XDP_TX_RING = 0x3 XDP_TX_RING = 0x3

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11 ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12 ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -115,6 +116,8 @@ const (
IEXTEN = 0x8000 IEXTEN = 0x8000
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x80044803
IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
@ -360,6 +363,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10 SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11 SO_PEERCRED = 0x11
@ -372,6 +376,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12 SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14 SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14 SO_RCVTIMEO_OLD = 0x14

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11 ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12 ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -115,6 +116,8 @@ const (
IEXTEN = 0x8000 IEXTEN = 0x8000
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x80044803
IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
@ -361,6 +364,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10 SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11 SO_PEERCRED = 0x11
@ -373,6 +377,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12 SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14 SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14 SO_RCVTIMEO_OLD = 0x14

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11 ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12 ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -114,6 +115,8 @@ const (
IEXTEN = 0x8000 IEXTEN = 0x8000
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x80044803
IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
@ -366,6 +369,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10 SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11 SO_PEERCRED = 0x11
@ -378,6 +382,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12 SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14 SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14 SO_RCVTIMEO_OLD = 0x14

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11 ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12 ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -119,6 +120,8 @@ const (
IEXTEN = 0x8000 IEXTEN = 0x8000
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x80044803
IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
@ -359,6 +362,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10 SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11 SO_PEERCRED = 0x11
@ -371,6 +375,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12 SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14 SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14 SO_RCVTIMEO_OLD = 0x14

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11 ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12 ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -115,6 +116,8 @@ const (
IEXTEN = 0x8000 IEXTEN = 0x8000
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x80044803
IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
@ -353,6 +356,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10 SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11 SO_PEERCRED = 0x11
@ -365,6 +369,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12 SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14 SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14 SO_RCVTIMEO_OLD = 0x14

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11 ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12 ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -114,6 +115,8 @@ const (
IEXTEN = 0x100 IEXTEN = 0x100
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80 IN_NONBLOCK = 0x80
IOCTL_MEI_NOTIFY_GET = 0x40044803
IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff IPV6_FLOWLABEL_MASK = 0xfffff
@ -359,6 +362,7 @@ const (
SO_OOBINLINE = 0x100 SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11 SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12 SO_PEERCRED = 0x12
@ -371,6 +375,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004 SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006 SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006 SO_RCVTIMEO_OLD = 0x1006

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11 ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12 ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -114,6 +115,8 @@ const (
IEXTEN = 0x100 IEXTEN = 0x100
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80 IN_NONBLOCK = 0x80
IOCTL_MEI_NOTIFY_GET = 0x40044803
IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff IPV6_FLOWLABEL_MASK = 0xfffff
@ -359,6 +362,7 @@ const (
SO_OOBINLINE = 0x100 SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11 SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12 SO_PEERCRED = 0x12
@ -371,6 +375,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004 SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006 SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006 SO_RCVTIMEO_OLD = 0x1006

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11 ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12 ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -114,6 +115,8 @@ const (
IEXTEN = 0x100 IEXTEN = 0x100
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80 IN_NONBLOCK = 0x80
IOCTL_MEI_NOTIFY_GET = 0x40044803
IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
@ -359,6 +362,7 @@ const (
SO_OOBINLINE = 0x100 SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11 SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12 SO_PEERCRED = 0x12
@ -371,6 +375,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004 SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006 SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006 SO_RCVTIMEO_OLD = 0x1006

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11 ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12 ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -114,6 +115,8 @@ const (
IEXTEN = 0x100 IEXTEN = 0x100
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80 IN_NONBLOCK = 0x80
IOCTL_MEI_NOTIFY_GET = 0x40044803
IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
@ -359,6 +362,7 @@ const (
SO_OOBINLINE = 0x100 SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11 SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12 SO_PEERCRED = 0x12
@ -371,6 +375,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004 SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006 SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006 SO_RCVTIMEO_OLD = 0x1006

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x300 CS8 = 0x300
CSIZE = 0x300 CSIZE = 0x300
CSTOPB = 0x400 CSTOPB = 0x400
DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11 ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12 ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40 ECHOCTL = 0x40
@ -114,6 +115,8 @@ const (
IEXTEN = 0x400 IEXTEN = 0x400
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x40044803
IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff IPV6_FLOWLABEL_MASK = 0xfffff
@ -414,6 +417,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14 SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15 SO_PEERCRED = 0x15
@ -426,6 +430,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10 SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12 SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12 SO_RCVTIMEO_OLD = 0x12

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x300 CS8 = 0x300
CSIZE = 0x300 CSIZE = 0x300
CSTOPB = 0x400 CSTOPB = 0x400
DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11 ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12 ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40 ECHOCTL = 0x40
@ -114,6 +115,8 @@ const (
IEXTEN = 0x400 IEXTEN = 0x400
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x40044803
IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff IPV6_FLOWLABEL_MASK = 0xfffff
@ -418,6 +421,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14 SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15 SO_PEERCRED = 0x15
@ -430,6 +434,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10 SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12 SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12 SO_RCVTIMEO_OLD = 0x12

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x300 CS8 = 0x300
CSIZE = 0x300 CSIZE = 0x300
CSTOPB = 0x400 CSTOPB = 0x400
DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11 ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12 ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40 ECHOCTL = 0x40
@ -114,6 +115,8 @@ const (
IEXTEN = 0x400 IEXTEN = 0x400
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x40044803
IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
@ -418,6 +421,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14 SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15 SO_PEERCRED = 0x15
@ -430,6 +434,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10 SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12 SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12 SO_RCVTIMEO_OLD = 0x12

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11 ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12 ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -114,6 +115,8 @@ const (
IEXTEN = 0x8000 IEXTEN = 0x8000
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x80044803
IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
@ -350,6 +353,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10 SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11 SO_PEERCRED = 0x11
@ -362,6 +366,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12 SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14 SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14 SO_RCVTIMEO_OLD = 0x14

View File

@ -68,6 +68,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11 ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12 ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -114,6 +115,8 @@ const (
IEXTEN = 0x8000 IEXTEN = 0x8000
IN_CLOEXEC = 0x80000 IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800 IN_NONBLOCK = 0x800
IOCTL_MEI_NOTIFY_GET = 0x80044803
IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff IPV6_FLOWLABEL_MASK = 0xfffff
@ -422,6 +425,7 @@ const (
SO_OOBINLINE = 0xa SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10 SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c SO_PASSPIDFD = 0x4c
SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22 SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11 SO_PEERCRED = 0x11
@ -434,6 +438,7 @@ const (
SO_RCVBUFFORCE = 0x21 SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12 SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b SO_RCVMARK = 0x4b
SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14 SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14 SO_RCVTIMEO_OLD = 0x14

View File

@ -71,6 +71,7 @@ const (
CS8 = 0x30 CS8 = 0x30
CSIZE = 0x30 CSIZE = 0x30
CSTOPB = 0x40 CSTOPB = 0x40
DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11 ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12 ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200 ECHOCTL = 0x200
@ -118,6 +119,8 @@ const (
IEXTEN = 0x8000 IEXTEN = 0x8000
IN_CLOEXEC = 0x400000 IN_CLOEXEC = 0x400000
IN_NONBLOCK = 0x4000 IN_NONBLOCK = 0x4000
IOCTL_MEI_NOTIFY_GET = 0x40044803
IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff IPV6_FLOWLABEL_MASK = 0xfffff
@ -461,6 +464,7 @@ const (
SO_OOBINLINE = 0x100 SO_OOBINLINE = 0x100
SO_PASSCRED = 0x2 SO_PASSCRED = 0x2
SO_PASSPIDFD = 0x55 SO_PASSPIDFD = 0x55
SO_PASSRIGHTS = 0x5c
SO_PASSSEC = 0x1f SO_PASSSEC = 0x1f
SO_PEEK_OFF = 0x26 SO_PEEK_OFF = 0x26
SO_PEERCRED = 0x40 SO_PEERCRED = 0x40
@ -473,6 +477,7 @@ const (
SO_RCVBUFFORCE = 0x100b SO_RCVBUFFORCE = 0x100b
SO_RCVLOWAT = 0x800 SO_RCVLOWAT = 0x800
SO_RCVMARK = 0x54 SO_RCVMARK = 0x54
SO_RCVPRIORITY = 0x5b
SO_RCVTIMEO = 0x2000 SO_RCVTIMEO = 0x2000
SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_NEW = 0x44
SO_RCVTIMEO_OLD = 0x2000 SO_RCVTIMEO_OLD = 0x2000

View File

@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) {
} }
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setMemPolicy(mode int, mask *CPUSet, size int) (err error) {
_, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size))
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View File

@ -72,7 +72,7 @@ import (
//go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_kill kill "libc.so"
//go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so"
//go:cgo_import_dynamic libc_link link "libc.so" //go:cgo_import_dynamic libc_link link "libc.so"
//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" //go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so"
//go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so"
//go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so"
//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
@ -221,7 +221,7 @@ import (
//go:linkname procKill libc_kill //go:linkname procKill libc_kill
//go:linkname procLchown libc_lchown //go:linkname procLchown libc_lchown
//go:linkname procLink libc_link //go:linkname procLink libc_link
//go:linkname proc__xnet_llisten libc___xnet_llisten //go:linkname proc__xnet_listen libc___xnet_listen
//go:linkname procLstat libc_lstat //go:linkname procLstat libc_lstat
//go:linkname procMadvise libc_madvise //go:linkname procMadvise libc_madvise
//go:linkname procMkdir libc_mkdir //go:linkname procMkdir libc_mkdir
@ -371,7 +371,7 @@ var (
procKill, procKill,
procLchown, procLchown,
procLink, procLink,
proc__xnet_llisten, proc__xnet_listen,
procLstat, procLstat,
procMadvise, procMadvise,
procMkdir, procMkdir,
@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) { func Listen(s int, backlog int) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
if e1 != 0 { if e1 != 0 {
err = errnoErr(e1) err = errnoErr(e1)
} }

View File

@ -462,4 +462,5 @@ const (
SYS_GETXATTRAT = 464 SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465 SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466 SYS_REMOVEXATTRAT = 466
SYS_OPEN_TREE_ATTR = 467
) )

View File

@ -385,4 +385,5 @@ const (
SYS_GETXATTRAT = 464 SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465 SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466 SYS_REMOVEXATTRAT = 466
SYS_OPEN_TREE_ATTR = 467
) )

View File

@ -426,4 +426,5 @@ const (
SYS_GETXATTRAT = 464 SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465 SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466 SYS_REMOVEXATTRAT = 466
SYS_OPEN_TREE_ATTR = 467
) )

View File

@ -329,4 +329,5 @@ const (
SYS_GETXATTRAT = 464 SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465 SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466 SYS_REMOVEXATTRAT = 466
SYS_OPEN_TREE_ATTR = 467
) )

Some files were not shown because too many files have changed in this diff Show More