TUN-5995: Update prometheus to 1.12.1 to avoid vulnerabilities

This commit is contained in:
Nuno Diegues 2022-04-06 09:32:05 +01:00
parent a0f6eb9d5e
commit b12272529f
67 changed files with 2675 additions and 477 deletions

8
go.mod
View File

@ -22,7 +22,7 @@ require (
github.com/miekg/dns v1.1.45
github.com/mitchellh/go-homedir v1.1.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_model v0.2.0
github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92
github.com/rs/zerolog v1.20.0
@ -32,7 +32,7 @@ require (
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
gopkg.in/coreos/go-oidc.v2 v2.2.1
gopkg.in/natefinch/lumberjack.v2 v2.0.0
@ -46,7 +46,7 @@ require (
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/coredns/caddy v1.1.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
@ -79,7 +79,7 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.1.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
golang.org/x/mod v0.4.2 // indirect

14
go.sum
View File

@ -103,9 +103,11 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA=
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/chungthuang/quic-go v0.24.1-0.20220110095058-981dc498cb62 h1:PLTB4iA6sOgAItzQY642tYdcGKfG/7i2gu93JQGgUcM=
@ -477,8 +479,9 @@ github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -494,8 +497,9 @@ github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@ -508,6 +512,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
@ -796,8 +801,9 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=

View File

@ -1,8 +0,0 @@
language: go
go:
- "1.x"
- master
env:
- TAGS=""
- TAGS="-tags purego"
script: go test $TAGS -v ./...

View File

@ -1,7 +1,7 @@
# xxhash
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

View File

@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
b = b[len(d.mem):]
d.n = int(d.total % uint64(len(d.mem)))
return nil
}

View File

@ -6,7 +6,7 @@
// Register allocation:
// AX h
// CX pointer to advance through b
// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
@ -16,39 +16,39 @@
// R12 tmp
// R13 prime1v
// R14 prime2v
// R15 prime4v
// DI prime4v
// round reads from and advances the buffer pointer in CX.
// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (CX), R12 \
ADDQ $8, CX \
MOVQ (SI), R12 \
ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ R15, acc
ADDQ DI, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), R15
MOVQ ·prime4v(SB), DI
// Load slice.
MOVQ b_base+0(FP), CX
MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
LEAQ (CX)(DX*1), BX
LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
XORQ R11, R11
SUBQ R13, R11
// Loop until CX > BX.
// Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ CX, BX
CMPQ SI, BX
JLE blockLoop
MOVQ R8, AX
@ -100,16 +100,16 @@ noBlocks:
afterBlocks:
ADDQ DX, AX
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
CMPQ CX, BX
CMPQ SI, BX
JG fourByte
wordLoop:
// Calculate k1.
MOVQ (CX), R8
ADDQ $8, CX
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
@ -117,18 +117,18 @@ wordLoop:
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ R15, AX
ADDQ DI, AX
CMPQ CX, BX
CMPQ SI, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
CMPQ CX, BX
CMPQ SI, BX
JG singles
MOVL (CX), R8
ADDQ $4, CX
MOVL (SI), R8
ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
@ -138,19 +138,19 @@ fourByte:
singles:
ADDQ $4, BX
CMPQ CX, BX
CMPQ SI, BX
JGE finalize
singlesLoop:
MOVBQZX (CX), R12
ADDQ $1, CX
MOVBQZX (SI), R12
ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
CMPQ CX, BX
CMPQ SI, BX
JL singlesLoop
finalize:
@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
MOVQ ·prime2v(SB), R14
// Load slice.
MOVQ b_base+8(FP), CX
MOVQ b_base+8(FP), SI
MOVQ b_len+16(FP), DX
LEAQ (CX)(DX*1), BX
LEAQ (SI)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
@ -199,7 +199,7 @@ blockLoop:
round(R10)
round(R11)
CMPQ CX, BX
CMPQ SI, BX
JLE blockLoop
// Copy vN back to d.
@ -208,8 +208,8 @@ blockLoop:
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
// The number of bytes written is CX minus the old base pointer.
SUBQ b_base+8(FP), CX
MOVQ CX, ret+32(FP)
// The number of bytes written is SI minus the old base pointer.
SUBQ b_base+8(FP), SI
MOVQ SI, ret+32(FP)
RET

View File

@ -6,41 +6,52 @@
package xxhash
import (
"reflect"
"unsafe"
)
// Notes:
//
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
// for some discussion about these unsafe conversions.
//
// In the future it's possible that compiler optimizations will make these
// unsafe operations unnecessary: https://golang.org/issue/2205.
// XxxString functions unnecessary by realizing that calls such as
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
//
// Both of these wrapper functions still incur function call overhead since they
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
// for strings to squeeze out a bit more speed. Mid-stack inlining should
// eventually fix this.
// var b []byte
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
// bh.Len = len(s)
// bh.Cap = len(s)
//
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
// weight to this sequence of expressions that any function that uses it will
// not be inlined. Instead, the functions below use a different unsafe
// conversion designed to minimize the inliner weight and allow both to be
// inlined. There is also a test (TestInlining) which verifies that these are
// inlined.
//
// See https://github.com/golang/go/issues/42739 for discussion.
// Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
var b []byte
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
return Sum64(b)
}
// WriteString adds more data to d. It always returns len(s), nil.
// It may be faster than Write([]byte(s)) by avoiding a copy.
func (d *Digest) WriteString(s string) (n int, err error) {
var b []byte
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
return d.Write(b)
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
// d.Write always returns len(s), nil.
// Ignoring the return output and returning these fixed values buys a
// savings of 6 in the inliner's cost model.
return len(s), nil
}
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
// of the first two words is the same as the layout of a string.
type sliceHeader struct {
s string
cap int
}

View File

@ -1 +1 @@
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).

View File

@ -0,0 +1,38 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "runtime/debug"
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewBuildInfoCollector instead.
func NewBuildInfoCollector() Collector {
path, version, sum := "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
}
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
"Build information about the main Go module.",
nil, Labels{"path": path, "version": version, "checksum": sum},
),
GaugeValue, 1)}
c.init(c.self)
return c
}

View File

@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
// collectorMetric is a metric that is also a collector.
// Because of selfCollector, most (if not all) Metrics in
// this package are also collectors.
type collectorMetric interface {
Metric
Collector
}

View File

@ -133,10 +133,14 @@ func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
func (c *counter) Write(out *dto.Metric) error {
func (c *counter) get() float64 {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
val := fval + float64(ival)
return fval + float64(ival)
}
func (c *counter) Write(out *dto.Metric) error {
val := c.get()
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {

View File

@ -16,53 +16,11 @@ package prometheus
import (
"runtime"
"runtime/debug"
"sync"
"time"
)
type goCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
goInfoDesc *Desc
// ms... are memstats related.
msLast *runtime.MemStats // Previously collected memstats.
msLastTimestamp time.Time
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
msMetrics memStatsMetrics
msRead func(*runtime.MemStats) // For mocking in tests.
msMaxWait time.Duration // Wait time for fresh memstats.
msMaxAge time.Duration // Maximum allowed age of old memstats.
}
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
return &goCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
nil, nil),
threadsDesc: NewDesc(
"go_threads",
"Number of OS threads created.",
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: memStatsMetrics{
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
@ -239,14 +197,6 @@ func NewGoCollector() Collector {
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("last_gc_time_seconds"),
"Number of seconds since 1970 of last garbage collection.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
@ -256,41 +206,53 @@ func NewGoCollector() Collector {
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
},
},
}
}
func memstatNamespace(s string) string {
return "go_memstats_" + s
type baseGoCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
gcLastTimeDesc *Desc
goInfoDesc *Desc
}
func newBaseGoCollector() baseGoCollector {
return baseGoCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
nil, nil),
threadsDesc: NewDesc(
"go_threads",
"Number of OS threads created.",
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
gcLastTimeDesc: NewDesc(
memstatNamespace("last_gc_time_seconds"),
"Number of seconds since 1970 of last garbage collection.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
}
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
func (c *baseGoCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
ch <- c.gcLastTimeDesc
ch <- c.goInfoDesc
for _, i := range c.msMetrics {
ch <- i.desc
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
var (
ms = &runtime.MemStats{}
done = make(chan struct{})
)
// Start reading memstats first as it might take a while.
go func() {
c.msRead(ms)
c.msMtx.Lock()
c.msLast = ms
c.msLastTimestamp = time.Now()
c.msMtx.Unlock()
close(done)
}()
func (c *baseGoCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
@ -305,63 +267,19 @@ func (c *goCollector) Collect(ch chan<- Metric) {
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
timer := time.NewTimer(c.msMaxWait)
select {
case <-done: // Our own ReadMemStats succeeded in time. Use it.
timer.Stop() // Important for high collection frequencies to not pile up timers.
c.msCollect(ch, ms)
return
case <-timer.C: // Time out, use last memstats if possible. Continue below.
}
c.msMtx.Lock()
if time.Since(c.msLastTimestamp) < c.msMaxAge {
// Last memstats are recent enough. Collect from them under the lock.
c.msCollect(ch, c.msLast)
c.msMtx.Unlock()
return
}
// If we are here, the last memstats are too old or don't exist. We have
// to wait until our own ReadMemStats finally completes. For that to
// happen, we have to release the lock.
c.msMtx.Unlock()
<-done
c.msCollect(ch, ms)
}
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
}
func memstatNamespace(s string) string {
return "go_memstats_" + s
}
// memStatsMetrics provide description, value, and value type for memstat metrics.
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
// value type for memstat metrics.
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewBuildInfoCollector instead.
func NewBuildInfoCollector() Collector {
path, version, sum := "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
}
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
"Build information about the main Go module.",
nil, Labels{"path": path, "version": version, "checksum": sum},
),
GaugeValue, 1)}
c.init(c.self)
return c
}

View File

@ -0,0 +1,107 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !go1.17
// +build !go1.17
package prometheus
import (
"runtime"
"sync"
"time"
)
type goCollector struct {
base baseGoCollector
// ms... are memstats related.
msLast *runtime.MemStats // Previously collected memstats.
msLastTimestamp time.Time
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
msMetrics memStatsMetrics
msRead func(*runtime.MemStats) // For mocking in tests.
msMaxWait time.Duration // Wait time for fresh memstats.
msMaxAge time.Duration // Maximum allowed age of old memstats.
}
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
return &goCollector{
base: newBaseGoCollector(),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: goRuntimeMemStats(),
}
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
for _, i := range c.msMetrics {
ch <- i.desc
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
var (
ms = &runtime.MemStats{}
done = make(chan struct{})
)
// Start reading memstats first as it might take a while.
go func() {
c.msRead(ms)
c.msMtx.Lock()
c.msLast = ms
c.msLastTimestamp = time.Now()
c.msMtx.Unlock()
close(done)
}()
// Collect base non-memory metrics.
c.base.Collect(ch)
timer := time.NewTimer(c.msMaxWait)
select {
case <-done: // Our own ReadMemStats succeeded in time. Use it.
timer.Stop() // Important for high collection frequencies to not pile up timers.
c.msCollect(ch, ms)
return
case <-timer.C: // Time out, use last memstats if possible. Continue below.
}
c.msMtx.Lock()
if time.Since(c.msLastTimestamp) < c.msMaxAge {
// Last memstats are recent enough. Collect from them under the lock.
c.msCollect(ch, c.msLast)
c.msMtx.Unlock()
return
}
// If we are here, the last memstats are too old or don't exist. We have
// to wait until our own ReadMemStats finally completes. For that to
// happen, we have to release the lock.
c.msMtx.Unlock()
<-done
c.msCollect(ch, ms)
}
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
}
}

View File

@ -0,0 +1,408 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.17
// +build go1.17
package prometheus
import (
"math"
"runtime"
"runtime/metrics"
"strings"
"sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
)
type goCollector struct {
base baseGoCollector
// mu protects updates to all fields ensuring a consistent
// snapshot is always produced by Collect.
mu sync.Mutex
// rm... fields all pertain to the runtime/metrics package.
rmSampleBuf []metrics.Sample
rmSampleMap map[string]*metrics.Sample
rmMetrics []collectorMetric
// With Go 1.17, the runtime/metrics package was introduced.
// From that point on, metric names produced by the runtime/metrics
// package could be generated from runtime/metrics names. However,
// these differ from the old names for the same values.
//
// This field exist to export the same values under the old names
// as well.
msMetrics memStatsMetrics
}
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
descriptions := metrics.All()
// Collect all histogram samples so that we can get their buckets.
// The API guarantees that the buckets are always fixed for the lifetime
// of the process.
var histograms []metrics.Sample
for _, d := range descriptions {
if d.Kind == metrics.KindFloat64Histogram {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
metrics.Read(histograms)
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
}
// Generate a Desc and ValueType for each runtime/metrics metric.
metricSet := make([]collectorMetric, 0, len(descriptions))
sampleBuf := make([]metrics.Sample, 0, len(descriptions))
sampleMap := make(map[string]*metrics.Sample, len(descriptions))
for i := range descriptions {
d := &descriptions[i]
namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d)
if !ok {
// Just ignore this metric; we can't do anything with it here.
// If a user decides to use the latest version of Go, we don't want
// to fail here. This condition is tested elsewhere.
continue
}
// Set up sample buffer for reading, and a map
// for quick lookup of sample values.
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
var m collectorMetric
if d.Kind == metrics.KindFloat64Histogram {
_, hasSum := rmExactSumMap[d.Name]
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
d.Description,
nil,
nil,
),
internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
hasSum,
)
} else if d.Cumulative {
m = NewCounter(CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: d.Description,
})
} else {
m = NewGauge(GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: d.Description,
})
}
metricSet = append(metricSet, m)
}
return &goCollector{
base: newBaseGoCollector(),
rmSampleBuf: sampleBuf,
rmSampleMap: sampleMap,
rmMetrics: metricSet,
msMetrics: goRuntimeMemStats(),
}
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
for _, i := range c.msMetrics {
ch <- i.desc
}
for _, m := range c.rmMetrics {
ch <- m.Desc()
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
// Collect base non-memory metrics.
c.base.Collect(ch)
// Collect must be thread-safe, so prevent concurrent use of
// rmSampleBuf. Just read into rmSampleBuf but write all the data
// we get into our Metrics or MemStats.
//
// This lock also ensures that the Metrics we send out are all from
// the same updates, ensuring their mutual consistency insofar as
// is guaranteed by the runtime/metrics package.
//
// N.B. This locking is heavy-handed, but Collect is expected to be called
// relatively infrequently. Also the core operation here, metrics.Read,
// is fast (O(tens of microseconds)) so contention should certainly be
// low, though channel operations and any allocations may add to that.
c.mu.Lock()
defer c.mu.Unlock()
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
// Update all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
switch m := c.rmMetrics[i].(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
// a metrics collection bug.
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
if v1 > v0 {
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
}
m.Collect(ch)
case *gauge:
m.Set(unwrapScalarRMValue(sample.Value))
m.Collect(ch)
case *batchHistogram:
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
m.Collect(ch)
default:
panic("unexpected metric type")
}
}
// ms is a dummy MemStats that we populate ourselves so that we can
// populate the old metrics from it.
var ms runtime.MemStats
memStatsFromRM(&ms, c.rmSampleMap)
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
}
}
// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
// to be scalar and returns the equivalent float64 value. Panics if the
// value is not scalar.
func unwrapScalarRMValue(v metrics.Value) float64 {
switch v.Kind() {
case metrics.KindUint64:
return float64(v.Uint64())
case metrics.KindFloat64:
return v.Float64()
case metrics.KindBad:
// Unsupported metric.
//
// This should never happen because we always populate our metric
// set from the runtime/metrics package.
panic("unexpected unsupported metric")
default:
// Unsupported metric kind.
//
// This should never happen because we check for this during initialization
// and flag and filter metrics whose kinds we don't understand.
panic("unexpected unsupported metric kind")
}
}
var rmExactSumMap = map[string]string{
"/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes",
"/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes",
}
// exactSumFor takes a runtime/metrics metric name (that is assumed to
// be of kind KindFloat64Histogram) and returns its exact sum and whether
// its exact sum exists.
//
// The runtime/metrics API for histograms doesn't currently expose exact
// sums, but some of the other metrics are in fact exact sums of histograms.
func (c *goCollector) exactSumFor(rmName string) float64 {
sumName, ok := rmExactSumMap[rmName]
if !ok {
return 0
}
s, ok := c.rmSampleMap[sumName]
if !ok {
return 0
}
return unwrapScalarRMValue(s.Value)
}
func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
lookupOrZero := func(name string) uint64 {
if s, ok := rm[name]; ok {
return s.Value.Uint64()
}
return 0
}
// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
// The reason for this is because MemStats couldn't be extended at the time
// but there was a desire to have Mallocs at least be a little more representative,
// while having Mallocs - Frees still represent a live object count.
// Unfortunately, MemStats doesn't actually export a large allocation count,
// so it's impossible to pull this number out directly.
tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
ms.Sys = lookupOrZero("/memory/classes/total:bytes")
ms.Lookups = 0 // Already always zero.
ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
ms.Alloc = ms.HeapAlloc
ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
// N.B. LastGC is omitted because runtime.GCStats already has this.
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
// for more details.
ms.LastGC = 0
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
// and often misleading due to the fact that it's an average over the lifetime
// of the process.
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
// for more details.
ms.GCCPUFraction = 0
}
// batchHistogram is a mutable histogram that is updated
// in batches.
type batchHistogram struct {
selfCollector
// Static fields updated only once.
desc *Desc
hasSum bool
// Because this histogram operates in batches, it just uses a
// single mutex for everything. updates are always serialized
// but Write calls may operate concurrently with updates.
// Contention between these two sources should be rare.
mu sync.Mutex
buckets []float64 // Inclusive lower bounds, like runtime/metrics.
counts []uint64
sum float64 // Used if hasSum is true.
}
// newBatchHistogram creates a new batch histogram value with the given
// Desc, buckets, and whether or not it has an exact sum available.
//
// buckets must always be from the runtime/metrics package, following
// the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
h := &batchHistogram{
desc: desc,
buckets: buckets,
// Because buckets follows runtime/metrics conventions, there's
// 1 more value in the buckets list than there are buckets represented,
// because in runtime/metrics, the bucket values represent *boundaries*,
// and non-Inf boundaries are inclusive lower bounds for that bucket.
counts: make([]uint64, len(buckets)-1),
hasSum: hasSum,
}
h.init(h)
return h
}
// update updates the batchHistogram from a runtime/metrics histogram.
//
// sum must be provided if the batchHistogram was created to have an exact sum.
// h.buckets must be a strict subset of his.Buckets.
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
counts, buckets := his.Counts, his.Buckets
h.mu.Lock()
defer h.mu.Unlock()
// Clear buckets.
for i := range h.counts {
h.counts[i] = 0
}
// Copy and reduce buckets.
var j int
for i, count := range counts {
h.counts[j] += count
if buckets[i+1] == h.buckets[j+1] {
j++
}
}
if h.hasSum {
h.sum = sum
}
}
func (h *batchHistogram) Desc() *Desc {
return h.desc
}
func (h *batchHistogram) Write(out *dto.Metric) error {
h.mu.Lock()
defer h.mu.Unlock()
sum := float64(0)
if h.hasSum {
sum = h.sum
}
dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
totalCount := uint64(0)
for i, count := range h.counts {
totalCount += count
if !h.hasSum {
// N.B. This computed sum is an underestimate.
sum += h.buckets[i] * float64(count)
}
// Skip the +Inf bucket, but only for the bucket list.
// It must still count for sum and totalCount.
if math.IsInf(h.buckets[i+1], 1) {
break
}
// Float64Histogram's upper bound is exclusive, so make it inclusive
// by obtaining the next float64 value down, in order.
upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
dtoBuckets = append(dtoBuckets, &dto.Bucket{
CumulativeCount: proto.Uint64(totalCount),
UpperBound: proto.Float64(upperBound),
})
}
out.Histogram = &dto.Histogram{
Bucket: dtoBuckets,
SampleCount: proto.Uint64(totalCount),
SampleSum: proto.Float64(sum),
}
return nil
}

View File

@ -116,6 +116,34 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
return buckets
}
// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
// and not included in the returned slice. The returned slice is meant to be
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
func ExponentialBucketsRange(min, max float64, count int) []float64 {
if count < 1 {
panic("ExponentialBucketsRange count needs a positive count")
}
if min <= 0 {
panic("ExponentialBucketsRange min needs to be greater than 0")
}
// Formula for exponential buckets.
// max = min*growthFactor^(bucketCount-1)
// We know max/min and highest bucket. Solve for growthFactor.
growthFactor := math.Pow(max/min, 1.0/float64(count-1))
// Now that we know growthFactor, solve for each bucket.
buckets := make([]float64, count)
for i := 1; i <= count; i++ {
buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
}
return buckets
}
// HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name to a non-empty string. All other fields are optional
// and can safely be left at their zero value, although it is strongly

View File

@ -0,0 +1,142 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.17
// +build go1.17
package internal
import (
"math"
"path"
"runtime/metrics"
"strings"
"github.com/prometheus/common/model"
)
// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
// metric description and validates whether the metric is suitable for integration
// with Prometheus.
//
// Returns false if a name could not be produced, or if Prometheus does not understand
// the runtime/metrics Kind.
//
// Note that the main reason a name couldn't be produced is if the runtime/metrics
// package exports a name with characters outside the valid Prometheus metric name
// character set. This is theoretically possible, but should never happen in practice.
// Still, don't rely on it.
func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
namespace := "go"
comp := strings.SplitN(d.Name, ":", 2)
key := comp[0]
unit := comp[1]
// The last path element in the key is the name,
// the rest is the subsystem.
subsystem := path.Dir(key[1:] /* remove leading / */)
name := path.Base(key)
// subsystem is translated by replacing all / and - with _.
subsystem = strings.ReplaceAll(subsystem, "/", "_")
subsystem = strings.ReplaceAll(subsystem, "-", "_")
// unit is translated assuming that the unit contains no
// non-ASCII characters.
unit = strings.ReplaceAll(unit, "-", "_")
unit = strings.ReplaceAll(unit, "*", "_")
unit = strings.ReplaceAll(unit, "/", "_per_")
// name has - replaced with _ and is concatenated with the unit and
// other data.
name = strings.ReplaceAll(name, "-", "_")
name = name + "_" + unit
if d.Cumulative {
name = name + "_total"
}
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
switch d.Kind {
case metrics.KindUint64:
case metrics.KindFloat64:
case metrics.KindFloat64Histogram:
default:
valid = false
}
return namespace, subsystem, name, valid
}
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
// as the bottom-most upper-bound inclusive bucket in Prometheus.
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
switch unit {
case "bytes":
// Rebucket as powers of 2.
return rebucketExp(buckets, 2)
case "seconds":
// Rebucket as powers of 10 and then merge all buckets greater
// than 1 second into the +Inf bucket.
b := rebucketExp(buckets, 10)
for i := range b {
if b[i] <= 1 {
continue
}
b[i] = math.Inf(1)
b = b[:i+1]
break
}
return b
}
return buckets
}
// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
// downsamples the buckets to those a multiple of base apart. The end result
// is a roughly exponential (in many cases, perfectly exponential) bucketing
// scheme.
func rebucketExp(buckets []float64, base float64) []float64 {
bucket := buckets[0]
var newBuckets []float64
// We may see a -Inf here, in which case, add it and skip it
// since we risk producing NaNs otherwise.
//
// We need to preserve -Inf values to maintain runtime/metrics
// conventions. We'll strip it out later.
if bucket == math.Inf(-1) {
newBuckets = append(newBuckets, bucket)
buckets = buckets[1:]
bucket = buckets[0]
}
// From now on, bucket should always have a non-Inf value because
// Infs are only ever at the ends of the bucket lists, so
// arithmetic operations on it are non-NaN.
for i := 1; i < len(buckets); i++ {
if bucket >= 0 && buckets[i] < bucket*base {
// The next bucket we want to include is at least bucket*base.
continue
} else if bucket < 0 && buckets[i] < bucket/base {
// In this case the bucket we're targeting is negative, and since
// we're ascending through buckets here, we need to divide to get
// closer to zero exponentially.
continue
}
// The +Inf bucket will always be the last one, and we'll always
// end up including it here because bucket
newBuckets = append(newBuckets, bucket)
bucket = buckets[i]
}
return append(newBuckets, bucket)
}

View File

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package prometheus

View File

@ -49,7 +49,10 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// http.RoundTripper to observe the request result with the provided CounterVec.
// The CounterVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
// panics otherwise. For the "method" label a predefined default label value set
// is used to filter given values. Values besides predefined values will count
// as `unknown` method.`WithExtraMethods` can be used to add more
// methods to the set. Partitioning of the CounterVec happens by HTTP status code
// and/or HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
//
@ -57,13 +60,18 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// is not incremented.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
rtOpts := &option{}
for _, o := range opts {
o(rtOpts)
}
code, method := checkLabels(counter)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
}
return resp, err
})
@ -73,7 +81,10 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
// http.RoundTripper to observe the request duration with the provided
// ObserverVec. The ObserverVec must have zero, one, or two non-const
// non-curried labels. For those, the only allowed label names are "code" and
// "method". The function panics otherwise. The Observe method of the Observer
// "method". The function panics otherwise. For the "method" label a predefined
// default label value set is used to filter given values. Values besides
// predefined values will count as `unknown` method. `WithExtraMethods`
// can be used to add more methods to the set. The Observe method of the Observer
// in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
@ -85,14 +96,19 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
rtOpts := &option{}
for _, o := range opts {
o(rtOpts)
}
code, method := checkLabels(obs)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
}
return resp, err
})

View File

@ -45,7 +45,10 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// http.Handler to observe the request duration with the provided ObserverVec.
// The ObserverVec must have valid metric and label names and must have zero,
// one, or two non-const non-curried labels. For those, the only allowed label
// names are "code" and "method". The function panics otherwise. The Observe
// names are "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
//`WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the request duration
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
// the respective instance label names are present in the ObserverVec. For
@ -58,7 +61,12 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs)
if code {
@ -67,14 +75,14 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
})
}
@ -82,7 +90,10 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
// to observe the request result with the provided CounterVec. The CounterVec
// must have valid metric and label names and must have zero, one, or two
// non-const non-curried labels. For those, the only allowed label names are
// "code" and "method". The function panics otherwise. Partitioning of the
// "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the
// CounterVec happens by HTTP status code and/or HTTP method if the respective
// instance label names are present in the CounterVec. For unpartitioned
// counting, use a CounterVec with zero labels.
@ -92,20 +103,25 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
// If the wrapped Handler panics, the Counter is not incremented.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(counter)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
counter.With(labels(code, method, r.Method, d.Status())).Inc()
counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
counter.With(labels(code, method, r.Method, 0)).Inc()
counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
})
}
@ -114,7 +130,10 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
// until the response headers are written. The ObserverVec must have valid
// metric and label names and must have zero, one, or two non-const non-curried
// labels. For those, the only allowed label names are "code" and "method". The
// function panics otherwise. The Observe method of the Observer in the
// function panics otherwise. For the "method" label a predefined default label
// value set is used to filter given values. Values besides predefined values
// will count as `unknown` method.`WithExtraMethods` can be used to add more
// methods to the set. The Observe method of the Observer in the
// ObserverVec is called with the request duration in seconds. Partitioning
// happens by HTTP status code and/or HTTP method if the respective instance
// label names are present in the ObserverVec. For unpartitioned observations,
@ -128,13 +147,18 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
// if used with Go1.9+.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
})
next.ServeHTTP(d, r)
})
@ -144,8 +168,11 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the request size in
// are "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
// `WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the request size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
@ -156,7 +183,12 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs)
if code {
@ -164,14 +196,14 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
})
}
@ -179,8 +211,11 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the response size in
// are "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
// `WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the response size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
@ -191,12 +226,18 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
})
}
@ -290,7 +331,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
@ -300,7 +341,7 @@ func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
labels["code"] = sanitizeCode(status)
}
if method {
labels["method"] = sanitizeMethod(reqMethod)
labels["method"] = sanitizeMethod(reqMethod, extraMethods...)
}
return labels
@ -330,7 +371,12 @@ func computeApproximateRequestSize(r *http.Request) int {
return s
}
func sanitizeMethod(m string) string {
// If the wrapped http.Handler has a known method, it will be sanitized and returned.
// Otherwise, "unknown" will be returned. The known method list can be extended
// as needed by using extraMethods parameter.
func sanitizeMethod(m string, extraMethods ...string) string {
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for
// the methods chosen as default.
switch m {
case "GET", "get":
return "get"
@ -348,15 +394,25 @@ func sanitizeMethod(m string) string {
return "options"
case "NOTIFY", "notify":
return "notify"
case "TRACE", "trace":
return "trace"
case "PATCH", "patch":
return "patch"
default:
for _, method := range extraMethods {
if strings.EqualFold(m, method) {
return strings.ToLower(m)
}
}
return "unknown"
}
}
// If the wrapped http.Handler has not set a status code, i.e. the value is
// currently 0, santizeCode will return 200, for consistency with behavior in
// currently 0, sanitizeCode will return 200, for consistency with behavior in
// the stdlib.
func sanitizeCode(s int) string {
// See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
switch s {
case 100:
return "100"
@ -453,6 +509,9 @@ func sanitizeCode(s int) string {
return "511"
default:
if s >= 100 && s <= 599 {
return strconv.Itoa(s)
}
return "unknown"
}
}

View File

@ -0,0 +1,31 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
// Option are used to configure a middleware or round tripper..
type Option func(*option)
type option struct {
extraMethods []string
}
// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
//
// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
func WithExtraMethods(methods ...string) Option {
return func(o *option) {
o.extraMethods = methods
}
}

View File

@ -21,7 +21,7 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"google.golang.org/protobuf/types/known/timestamppb"
dto "github.com/prometheus/client_model/go"
)
@ -183,8 +183,8 @@ const ExemplarMaxRunes = 64
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
e := &dto.Exemplar{}
e.Value = proto.Float64(value)
tsProto, err := ptypes.TimestampProto(ts)
if err != nil {
tsProto := timestamppb.New(ts)
if err := tsProto.CheckValid(); err != nil {
return nil, err
}
e.Timestamp = tsProto

View File

@ -18,6 +18,8 @@ include Makefile.common
./ttar -C $(dir $*) -x -f $*.ttar
touch $@
fixtures: fixtures/.unpacked
update_fixtures:
rm -vf fixtures/.unpacked
./ttar -c -f fixtures.ttar fixtures/

View File

@ -78,12 +78,12 @@ ifneq ($(shell which gotestsum),)
endif
endif
PROMU_VERSION ?= 0.7.0
PROMU_VERSION ?= 0.12.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.18.0
GOLANGCI_LINT_VERSION ?= v1.39.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -118,7 +118,7 @@ endif
%: common-% ;
.PHONY: common-all
common-all: precheck style check_license lint unused build test
common-all: precheck style check_license lint yamllint unused build test
.PHONY: common-style
common-style:
@ -198,6 +198,15 @@ else
endif
endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell which yamllint))
@echo "yamllint not installed so skipping"
else
yamllint .
endif
# For backward-compatibility.
.PHONY: common-staticcheck
common-staticcheck: lint

View File

@ -6,8 +6,8 @@ metrics from the pseudo-filesystems /proc and /sys.
*WARNING*: This package is a work in progress. Its API may still break in
backwards-incompatible ways without warnings. Use it at your own risk.
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs)
[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
## Usage

30
vendor/github.com/prometheus/procfs/cmdline.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"strings"
"github.com/prometheus/procfs/internal/util"
)
// CmdLine returns the command line of the kernel.
func (fs FS) CmdLine() ([]string, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
if err != nil {
return nil, err
}
return strings.Fields(string(data)), nil
}

View File

@ -31,7 +31,7 @@
// log.Fatalf("could not get process: %s", err)
// }
//
// stat, err := p.NewStat()
// stat, err := p.Stat()
// if err != nil {
// log.Fatalf("could not get process stat: %s", err)
// }

File diff suppressed because it is too large Load Diff

View File

@ -22,8 +22,11 @@ import (
)
var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
)
@ -39,12 +42,20 @@ type MDStat struct {
DisksTotal int64
// Number of failed disks.
DisksFailed int64
// Number of "down" disks. (the _ indicator in the status line)
DisksDown int64
// Spare disks in the device.
DisksSpare int64
// Number of blocks the device holds.
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
// progress percentage of current sync
BlocksSyncedPct float64
// estimated finishing time for current sync (in minutes)
BlocksSyncedFinishTime float64
// current sync speed (in Kilobytes/sec)
BlocksSyncedSpeed float64
// Name of md component devices
Devices []string
}
@ -91,7 +102,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
fail := int64(strings.Count(line, "(F)"))
spare := int64(strings.Count(line, "(S)"))
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %w", err)
@ -105,6 +116,9 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
speed := float64(0)
finish := float64(0)
pct := float64(0)
recovering := strings.Contains(lines[syncLineIdx], "recovery")
resyncing := strings.Contains(lines[syncLineIdx], "resync")
checking := strings.Contains(lines[syncLineIdx], "check")
@ -124,7 +138,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
strings.Contains(lines[syncLineIdx], "DELAYED") {
syncedBlocks = 0
} else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
}
@ -136,10 +150,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
ActivityState: state,
DisksActive: active,
DisksFailed: fail,
DisksDown: down,
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
BlocksSyncedPct: pct,
BlocksSyncedFinishTime: finish,
BlocksSyncedSpeed: speed,
Devices: evalComponentDevices(deviceFields),
})
}
@ -147,54 +165,85 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
return mdStats, nil
}
func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
// In the device deviceLine, only disks have a number associated with them in [].
total = int64(strings.Count(deviceLine, "["))
return total, total, size, nil
return total, total, 0, size, nil
}
if strings.Contains(deviceLine, "inactive") {
return 0, 0, size, nil
return 0, 0, 0, size, nil
}
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
if len(matches) != 5 {
return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
down = int64(strings.Count(matches[4], "_"))
return active, total, size, nil
return active, total, down, size, nil
}
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
}
return syncedBlocks, nil
// Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
}
return syncedBlocks, pct, finish, speed, nil
}
func evalComponentDevices(deviceFields []string) []string {

View File

@ -65,6 +65,7 @@ type (
TxQueue uint64
RxQueue uint64
UID uint64
Inode uint64
}
)
@ -150,9 +151,9 @@ func parseIP(hexIP string) (net.IP, error) {
// parseNetIPSocketLine parses a single line, represented by a list of fields.
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 8 {
if len(fields) < 10 {
return nil, fmt.Errorf(
"cannot parse net socket line as it has less then 8 columns %q",
"cannot parse net socket line as it has less then 10 columns %q",
strings.Join(fields, " "),
)
}
@ -216,5 +217,10 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
}
// inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err)
}
return line, nil
}

68
vendor/github.com/prometheus/procfs/netstat.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"os"
"path/filepath"
"strconv"
"strings"
)
// NetStat contains statistics for all the counters from one file
type NetStat struct {
Filename string
Stats map[string][]uint64
}
// NetStat retrieves stats from /proc/net/stat/
func (fs FS) NetStat() ([]NetStat, error) {
statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
if err != nil {
return nil, err
}
var netStatsTotal []NetStat
for _, filePath := range statFiles {
file, err := os.Open(filePath)
if err != nil {
return nil, err
}
netStatFile := NetStat{
Filename: filepath.Base(filePath),
Stats: make(map[string][]uint64),
}
scanner := bufio.NewScanner(file)
scanner.Scan()
// First string is always a header for stats
var headers []string
headers = append(headers, strings.Fields(scanner.Text())...)
// Other strings represent per-CPU counters
for scanner.Scan() {
for num, counter := range strings.Fields(scanner.Text()) {
value, err := strconv.ParseUint(counter, 16, 32)
if err != nil {
return nil, err
}
netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
}
}
netStatsTotal = append(netStatsTotal, netStatFile)
}
return netStatsTotal, nil
}

View File

@ -90,7 +90,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) {
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
// so the len of the returned struct is equal to the number of active hierarchies on this system
func (p Proc) Cgroups() ([]Cgroup, error) {
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID))
data, err := util.ReadFileNoStat(p.path("cgroup"))
if err != nil {
return nil, err
}

View File

@ -100,6 +100,15 @@ type ProcStat struct {
VSize uint
// Resident set size in pages.
RSS int
// Soft limit in bytes on the rss of the process.
RSSLimit uint64
// Real-time scheduling priority, a number in the range 1 to 99 for processes
// scheduled under a real-time policy, or 0, for non-real-time processes.
RTPriority uint
// Scheduling policy.
Policy uint
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
proc fs.FS
}
@ -119,7 +128,8 @@ func (p Proc) Stat() (ProcStat, error) {
}
var (
ignore int
ignoreInt64 int64
ignoreUint64 uint64
s = ProcStat{PID: p.PID, proc: p.fs}
l = bytes.Index(data, []byte("("))
@ -151,10 +161,28 @@ func (p Proc) Stat() (ProcStat, error) {
&s.Priority,
&s.Nice,
&s.NumThreads,
&ignore,
&ignoreInt64,
&s.Starttime,
&s.VSize,
&s.RSS,
&s.RSSLimit,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreInt64,
&ignoreInt64,
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
)
if err != nil {
return ProcStat{}, err

View File

@ -99,7 +99,6 @@ func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
continue
}
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
zoneinfoElement.Zone = ""
continue
}
parts := strings.Fields(strings.TrimSpace(line))

View File

@ -741,6 +741,7 @@ const (
ETH_P_QINQ2 = 0x9200
ETH_P_QINQ3 = 0x9300
ETH_P_RARP = 0x8035
ETH_P_REALTEK = 0x8899
ETH_P_SCA = 0x6007
ETH_P_SLOW = 0x8809
ETH_P_SNAP = 0x5
@ -810,10 +811,12 @@ const (
FAN_EPIDFD = -0x2
FAN_EVENT_INFO_TYPE_DFID = 0x3
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
FAN_EVENT_INFO_TYPE_ERROR = 0x5
FAN_EVENT_INFO_TYPE_FID = 0x1
FAN_EVENT_INFO_TYPE_PIDFD = 0x4
FAN_EVENT_METADATA_LEN = 0x18
FAN_EVENT_ON_CHILD = 0x8000000
FAN_FS_ERROR = 0x8000
FAN_MARK_ADD = 0x1
FAN_MARK_DONT_FOLLOW = 0x4
FAN_MARK_FILESYSTEM = 0x100
@ -1827,6 +1830,8 @@ const (
PERF_MEM_BLK_DATA = 0x2
PERF_MEM_BLK_NA = 0x1
PERF_MEM_BLK_SHIFT = 0x28
PERF_MEM_HOPS_0 = 0x1
PERF_MEM_HOPS_SHIFT = 0x2b
PERF_MEM_LOCK_LOCKED = 0x2
PERF_MEM_LOCK_NA = 0x1
PERF_MEM_LOCK_SHIFT = 0x18
@ -1986,6 +1991,9 @@ const (
PR_SCHED_CORE_CREATE = 0x1
PR_SCHED_CORE_GET = 0x0
PR_SCHED_CORE_MAX = 0x4
PR_SCHED_CORE_SCOPE_PROCESS_GROUP = 0x2
PR_SCHED_CORE_SCOPE_THREAD = 0x0
PR_SCHED_CORE_SCOPE_THREAD_GROUP = 0x1
PR_SCHED_CORE_SHARE_FROM = 0x3
PR_SCHED_CORE_SHARE_TO = 0x2
PR_SET_CHILD_SUBREAPER = 0x24
@ -2167,12 +2175,23 @@ const (
RTCF_NAT = 0x800000
RTCF_VALVE = 0x200000
RTC_AF = 0x20
RTC_BSM_DIRECT = 0x1
RTC_BSM_DISABLED = 0x0
RTC_BSM_LEVEL = 0x2
RTC_BSM_STANDBY = 0x3
RTC_FEATURE_ALARM = 0x0
RTC_FEATURE_ALARM_RES_2S = 0x3
RTC_FEATURE_ALARM_RES_MINUTE = 0x1
RTC_FEATURE_CNT = 0x3
RTC_FEATURE_BACKUP_SWITCH_MODE = 0x6
RTC_FEATURE_CNT = 0x7
RTC_FEATURE_CORRECTION = 0x5
RTC_FEATURE_NEED_WEEK_DAY = 0x2
RTC_FEATURE_UPDATE_INTERRUPT = 0x4
RTC_IRQF = 0x80
RTC_MAX_FREQ = 0x2000
RTC_PARAM_BACKUP_SWITCH_MODE = 0x2
RTC_PARAM_CORRECTION = 0x1
RTC_PARAM_FEATURES = 0x0
RTC_PF = 0x40
RTC_UF = 0x10
RTF_ADDRCLASSMASK = 0xf8000000
@ -2532,6 +2551,8 @@ const (
SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1
SO_VM_SOCKETS_BUFFER_SIZE = 0x0
SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6
SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW = 0x8
SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD = 0x6
SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7
SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3
SO_VM_SOCKETS_TRUSTED = 0x5

View File

@ -250,6 +250,8 @@ const (
RTC_EPOCH_SET = 0x4004700e
RTC_IRQP_READ = 0x8004700b
RTC_IRQP_SET = 0x4004700c
RTC_PARAM_GET = 0x40187013
RTC_PARAM_SET = 0x40187014
RTC_PIE_OFF = 0x7006
RTC_PIE_ON = 0x7005
RTC_PLL_GET = 0x801c7011
@ -327,6 +329,7 @@ const (
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28

View File

@ -251,6 +251,8 @@ const (
RTC_EPOCH_SET = 0x4008700e
RTC_IRQP_READ = 0x8008700b
RTC_IRQP_SET = 0x4008700c
RTC_PARAM_GET = 0x40187013
RTC_PARAM_SET = 0x40187014
RTC_PIE_OFF = 0x7006
RTC_PIE_ON = 0x7005
RTC_PLL_GET = 0x80207011
@ -328,6 +330,7 @@ const (
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28

View File

@ -257,6 +257,8 @@ const (
RTC_EPOCH_SET = 0x4004700e
RTC_IRQP_READ = 0x8004700b
RTC_IRQP_SET = 0x4004700c
RTC_PARAM_GET = 0x40187013
RTC_PARAM_SET = 0x40187014
RTC_PIE_OFF = 0x7006
RTC_PIE_ON = 0x7005
RTC_PLL_GET = 0x801c7011
@ -334,6 +336,7 @@ const (
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28

View File

@ -247,6 +247,8 @@ const (
RTC_EPOCH_SET = 0x4008700e
RTC_IRQP_READ = 0x8008700b
RTC_IRQP_SET = 0x4008700c
RTC_PARAM_GET = 0x40187013
RTC_PARAM_SET = 0x40187014
RTC_PIE_OFF = 0x7006
RTC_PIE_ON = 0x7005
RTC_PLL_GET = 0x80207011
@ -324,6 +326,7 @@ const (
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28

View File

@ -250,6 +250,8 @@ const (
RTC_EPOCH_SET = 0x8004700e
RTC_IRQP_READ = 0x4004700b
RTC_IRQP_SET = 0x8004700c
RTC_PARAM_GET = 0x80187013
RTC_PARAM_SET = 0x80187014
RTC_PIE_OFF = 0x20007006
RTC_PIE_ON = 0x20007005
RTC_PLL_GET = 0x401c7011
@ -327,6 +329,7 @@ const (
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x28

View File

@ -250,6 +250,8 @@ const (
RTC_EPOCH_SET = 0x8008700e
RTC_IRQP_READ = 0x4008700b
RTC_IRQP_SET = 0x8008700c
RTC_PARAM_GET = 0x80187013
RTC_PARAM_SET = 0x80187014
RTC_PIE_OFF = 0x20007006
RTC_PIE_ON = 0x20007005
RTC_PLL_GET = 0x40207011
@ -327,6 +329,7 @@ const (
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x28

View File

@ -250,6 +250,8 @@ const (
RTC_EPOCH_SET = 0x8008700e
RTC_IRQP_READ = 0x4008700b
RTC_IRQP_SET = 0x8008700c
RTC_PARAM_GET = 0x80187013
RTC_PARAM_SET = 0x80187014
RTC_PIE_OFF = 0x20007006
RTC_PIE_ON = 0x20007005
RTC_PLL_GET = 0x40207011
@ -327,6 +329,7 @@ const (
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x28

View File

@ -250,6 +250,8 @@ const (
RTC_EPOCH_SET = 0x8004700e
RTC_IRQP_READ = 0x4004700b
RTC_IRQP_SET = 0x8004700c
RTC_PARAM_GET = 0x80187013
RTC_PARAM_SET = 0x80187014
RTC_PIE_OFF = 0x20007006
RTC_PIE_ON = 0x20007005
RTC_PLL_GET = 0x401c7011
@ -327,6 +329,7 @@ const (
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x28

View File

@ -305,6 +305,8 @@ const (
RTC_EPOCH_SET = 0x8004700e
RTC_IRQP_READ = 0x4004700b
RTC_IRQP_SET = 0x8004700c
RTC_PARAM_GET = 0x80187013
RTC_PARAM_SET = 0x80187014
RTC_PIE_OFF = 0x20007006
RTC_PIE_ON = 0x20007005
RTC_PLL_GET = 0x401c7011
@ -382,6 +384,7 @@ const (
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28

View File

@ -309,6 +309,8 @@ const (
RTC_EPOCH_SET = 0x8008700e
RTC_IRQP_READ = 0x4008700b
RTC_IRQP_SET = 0x8008700c
RTC_PARAM_GET = 0x80187013
RTC_PARAM_SET = 0x80187014
RTC_PIE_OFF = 0x20007006
RTC_PIE_ON = 0x20007005
RTC_PLL_GET = 0x40207011
@ -386,6 +388,7 @@ const (
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28

View File

@ -309,6 +309,8 @@ const (
RTC_EPOCH_SET = 0x8008700e
RTC_IRQP_READ = 0x4008700b
RTC_IRQP_SET = 0x8008700c
RTC_PARAM_GET = 0x80187013
RTC_PARAM_SET = 0x80187014
RTC_PIE_OFF = 0x20007006
RTC_PIE_ON = 0x20007005
RTC_PLL_GET = 0x40207011
@ -386,6 +388,7 @@ const (
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28

View File

@ -238,6 +238,8 @@ const (
RTC_EPOCH_SET = 0x4008700e
RTC_IRQP_READ = 0x8008700b
RTC_IRQP_SET = 0x4008700c
RTC_PARAM_GET = 0x40187013
RTC_PARAM_SET = 0x40187014
RTC_PIE_OFF = 0x7006
RTC_PIE_ON = 0x7005
RTC_PLL_GET = 0x80207011
@ -315,6 +317,7 @@ const (
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28

View File

@ -313,6 +313,8 @@ const (
RTC_EPOCH_SET = 0x4008700e
RTC_IRQP_READ = 0x8008700b
RTC_IRQP_SET = 0x4008700c
RTC_PARAM_GET = 0x40187013
RTC_PARAM_SET = 0x40187014
RTC_PIE_OFF = 0x7006
RTC_PIE_ON = 0x7005
RTC_PLL_GET = 0x80207011
@ -390,6 +392,7 @@ const (
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28

View File

@ -304,6 +304,8 @@ const (
RTC_EPOCH_SET = 0x8008700e
RTC_IRQP_READ = 0x4008700b
RTC_IRQP_SET = 0x8008700c
RTC_PARAM_GET = 0x80187013
RTC_PARAM_SET = 0x80187014
RTC_PIE_OFF = 0x20007006
RTC_PIE_ON = 0x20007005
RTC_PLL_GET = 0x40207011
@ -381,6 +383,7 @@ const (
SO_RCVTIMEO = 0x2000
SO_RCVTIMEO_NEW = 0x44
SO_RCVTIMEO_OLD = 0x2000
SO_RESERVE_MEM = 0x52
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x24

View File

@ -445,4 +445,5 @@ const (
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_MEMFD_SECRET = 447
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -367,4 +367,5 @@ const (
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_MEMFD_SECRET = 447
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -409,4 +409,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 445
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -312,4 +312,5 @@ const (
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_MEMFD_SECRET = 447
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -429,4 +429,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 4445
SYS_LANDLOCK_RESTRICT_SELF = 4446
SYS_PROCESS_MRELEASE = 4448
SYS_FUTEX_WAITV = 4449
)

View File

@ -359,4 +359,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 5445
SYS_LANDLOCK_RESTRICT_SELF = 5446
SYS_PROCESS_MRELEASE = 5448
SYS_FUTEX_WAITV = 5449
)

View File

@ -359,4 +359,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 5445
SYS_LANDLOCK_RESTRICT_SELF = 5446
SYS_PROCESS_MRELEASE = 5448
SYS_FUTEX_WAITV = 5449
)

View File

@ -429,4 +429,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 4445
SYS_LANDLOCK_RESTRICT_SELF = 4446
SYS_PROCESS_MRELEASE = 4448
SYS_FUTEX_WAITV = 4449
)

View File

@ -436,4 +436,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 445
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -408,4 +408,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 445
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -408,4 +408,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 445
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -310,4 +310,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 445
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -373,4 +373,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 445
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -387,4 +387,5 @@ const (
SYS_LANDLOCK_ADD_RULE = 445
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
)

View File

@ -1144,7 +1144,8 @@ const (
PERF_RECORD_BPF_EVENT = 0x12
PERF_RECORD_CGROUP = 0x13
PERF_RECORD_TEXT_POKE = 0x14
PERF_RECORD_MAX = 0x15
PERF_RECORD_AUX_OUTPUT_HW_ID = 0x15
PERF_RECORD_MAX = 0x16
PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0x0
PERF_RECORD_KSYMBOL_TYPE_BPF = 0x1
PERF_RECORD_KSYMBOL_TYPE_OOL = 0x2
@ -1784,7 +1785,8 @@ const (
const (
NF_NETDEV_INGRESS = 0x0
NF_NETDEV_NUMHOOKS = 0x1
NF_NETDEV_EGRESS = 0x1
NF_NETDEV_NUMHOOKS = 0x2
)
const (
@ -3166,7 +3168,13 @@ const (
DEVLINK_ATTR_RELOAD_ACTION_INFO = 0xa2
DEVLINK_ATTR_RELOAD_ACTION_STATS = 0xa3
DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 0xa4
DEVLINK_ATTR_MAX = 0xa9
DEVLINK_ATTR_RATE_TYPE = 0xa5
DEVLINK_ATTR_RATE_TX_SHARE = 0xa6
DEVLINK_ATTR_RATE_TX_MAX = 0xa7
DEVLINK_ATTR_RATE_NODE_NAME = 0xa8
DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9
DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa
DEVLINK_ATTR_MAX = 0xaa
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1
DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0
@ -3463,7 +3471,14 @@ const (
ETHTOOL_MSG_CABLE_TEST_ACT = 0x1a
ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 0x1b
ETHTOOL_MSG_TUNNEL_INFO_GET = 0x1c
ETHTOOL_MSG_USER_MAX = 0x21
ETHTOOL_MSG_FEC_GET = 0x1d
ETHTOOL_MSG_FEC_SET = 0x1e
ETHTOOL_MSG_MODULE_EEPROM_GET = 0x1f
ETHTOOL_MSG_STATS_GET = 0x20
ETHTOOL_MSG_PHC_VCLOCKS_GET = 0x21
ETHTOOL_MSG_MODULE_GET = 0x22
ETHTOOL_MSG_MODULE_SET = 0x23
ETHTOOL_MSG_USER_MAX = 0x23
ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@ -3494,7 +3509,14 @@ const (
ETHTOOL_MSG_CABLE_TEST_NTF = 0x1b
ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 0x1c
ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 0x1d
ETHTOOL_MSG_KERNEL_MAX = 0x22
ETHTOOL_MSG_FEC_GET_REPLY = 0x1e
ETHTOOL_MSG_FEC_NTF = 0x1f
ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 0x20
ETHTOOL_MSG_STATS_GET_REPLY = 0x21
ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 0x22
ETHTOOL_MSG_MODULE_GET_REPLY = 0x23
ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_KERNEL_MAX = 0x24
ETHTOOL_A_HEADER_UNSPEC = 0x0
ETHTOOL_A_HEADER_DEV_INDEX = 0x1
ETHTOOL_A_HEADER_DEV_NAME = 0x2

View File

@ -363,6 +363,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error)
//sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error)
//sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error)
//sys GetActiveProcessorCount(groupNumber uint16) (ret uint32)
//sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32)
// Volume Management Functions
//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW

View File

@ -3172,3 +3172,5 @@ type ModuleInfo struct {
SizeOfImage uint32
EntryPoint uintptr
}
const ALL_PROCESSOR_GROUPS = 0xFFFF

View File

@ -226,6 +226,7 @@ var (
procFreeLibrary = modkernel32.NewProc("FreeLibrary")
procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent")
procGetACP = modkernel32.NewProc("GetACP")
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts")
procGetCommandLineW = modkernel32.NewProc("GetCommandLineW")
procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW")
@ -251,6 +252,7 @@ var (
procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW")
procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives")
procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW")
procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount")
procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW")
procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
@ -1967,6 +1969,12 @@ func GetACP() (acp uint32) {
return
}
func GetActiveProcessorCount(groupNumber uint16) (ret uint32) {
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
ret = uint32(r0)
return
}
func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0)
if r1 == 0 {
@ -2169,6 +2177,12 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er
return
}
func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) {
r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
ret = uint32(r0)
return
}
func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) {
r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
n = uint32(r0)

8
vendor/modules.txt vendored
View File

@ -10,7 +10,7 @@ github.com/beorn7/perks/quantile
# github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894
## explicit; go 1.12
github.com/certifi/gocertifi
# github.com/cespare/xxhash/v2 v2.1.1
# github.com/cespare/xxhash/v2 v2.1.2
## explicit; go 1.11
github.com/cespare/xxhash/v2
# github.com/cheekybits/genny v1.0.0
@ -291,7 +291,7 @@ github.com/pmezard/go-difflib/difflib
## explicit
github.com/pquerna/cachecontrol
github.com/pquerna/cachecontrol/cacheobject
# github.com/prometheus/client_golang v1.11.0
# github.com/prometheus/client_golang v1.12.1
## explicit; go 1.13
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
@ -305,7 +305,7 @@ github.com/prometheus/client_model/go
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
# github.com/prometheus/procfs v0.6.0
# github.com/prometheus/procfs v0.7.3
## explicit; go 1.13
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
@ -389,7 +389,7 @@ golang.org/x/oauth2/internal
# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
## explicit
golang.org/x/sync/errgroup
# golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
# golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
## explicit; go 1.17
golang.org/x/sys/cpu
golang.org/x/sys/execabs