diff --git a/.bingo/.gitignore b/.bingo/.gitignore deleted file mode 100644 index 9efccf683..000000000 --- a/.bingo/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ - -# Ignore everything -* - -# But not these files: -!.gitignore -!*.mod -!*.sum -!README.md -!Variables.mk -!variables.env - -*tmp.mod diff --git a/.bingo/README.md b/.bingo/README.md deleted file mode 100644 index 7a5c2d4f6..000000000 --- a/.bingo/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Project Development Dependencies. - -This is directory which stores Go modules with pinned buildable package that is used within this repository, managed by https://github.com/bwplotka/bingo. - -* Run `bingo get` to install all tools having each own module file in this directory. -* Run `bingo get ` to install that have own module file in this directory. -* For Makefile: Make sure to put `include .bingo/Variables.mk` in your Makefile, then use $() variable where is the .bingo/.mod. -* For shell: Run `source .bingo/variables.env` to source all environment variable for each tool. -* For go: Import `.bingo/variables.go` to for variable names. -* See https://github.com/bwplotka/bingo or -h on how to add, remove or change binaries dependencies. - -## Requirements - -* Go 1.14+ diff --git a/.bingo/Variables.mk b/.bingo/Variables.mk deleted file mode 100644 index 9c12d9ebd..000000000 --- a/.bingo/Variables.mk +++ /dev/null @@ -1,25 +0,0 @@ -# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.8. DO NOT EDIT. -# All tools are designed to be build inside $GOBIN. -BINGO_DIR := $(dir $(lastword $(MAKEFILE_LIST))) -GOPATH ?= $(shell go env GOPATH) -GOBIN ?= $(firstword $(subst :, ,${GOPATH}))/bin -GO ?= $(shell which go) - -# Below generated variables ensure that every time a tool under each variable is invoked, the correct version -# will be used; reinstalling only if needed. -# For example for goimports variable: -# -# In your main Makefile (for non array binaries): -# -#include .bingo/Variables.mk # Assuming -dir was set to .bingo . -# -#command: $(GOIMPORTS) -# @echo "Running goimports" -# @$(GOIMPORTS) -# -GOIMPORTS := $(GOBIN)/goimports-v0.9.3 -$(GOIMPORTS): $(BINGO_DIR)/goimports.mod - @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/goimports-v0.9.3" - @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=goimports.mod -o=$(GOBIN)/goimports-v0.9.3 "golang.org/x/tools/cmd/goimports" - diff --git a/.bingo/go.mod b/.bingo/go.mod deleted file mode 100644 index 610249af0..000000000 --- a/.bingo/go.mod +++ /dev/null @@ -1 +0,0 @@ -module _ // Fake go.mod auto-created by 'bingo' for go -moddir compatibility with non-Go projects. Commit this file, together with other .mod files. \ No newline at end of file diff --git a/.bingo/goimports.mod b/.bingo/goimports.mod deleted file mode 100644 index e98966994..000000000 --- a/.bingo/goimports.mod +++ /dev/null @@ -1,5 +0,0 @@ -module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT - -go 1.20 - -require golang.org/x/tools v0.9.3 // cmd/goimports diff --git a/.bingo/goimports.sum b/.bingo/goimports.sum deleted file mode 100644 index 5a2e60559..000000000 --- a/.bingo/goimports.sum +++ /dev/null @@ -1,6 +0,0 @@ -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= -golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= diff --git a/.bingo/variables.env b/.bingo/variables.env deleted file mode 100644 index 7d2f9f557..000000000 --- a/.bingo/variables.env +++ /dev/null @@ -1,12 +0,0 @@ -# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.8. DO NOT EDIT. -# All tools are designed to be build inside $GOBIN. -# Those variables will work only until 'bingo get' was invoked, or if tools were installed via Makefile's Variables.mk. -GOBIN=${GOBIN:=$(go env GOBIN)} - -if [ -z "$GOBIN" ]; then - GOBIN="$(go env GOPATH)/bin" -fi - - -GOIMPORTS="${GOBIN}/goimports-v0.9.3" - diff --git a/.github/dependabot.yml b/.github/dependabot.yml index ef59576e0..a43a0645b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,15 +5,11 @@ updates: schedule: interval: "monthly" - package-ecosystem: "gomod" - directory: "/dagger" - schedule: - interval: "monthly" - - package-ecosystem: "gomod" - directory: "/examples/middleware" + directory: "/tutorial/whatsup" schedule: interval: "monthly" - package-ecosystem: "gomod" - directory: "/tutorial/whatsup" + directory: "/exp" schedule: interval: "monthly" - package-ecosystem: "github-actions" diff --git a/.github/settings.yml b/.github/settings.yml index f2a465bda..e78499030 100644 --- a/.github/settings.yml +++ b/.github/settings.yml @@ -21,8 +21,6 @@ branches: # Required. The list of status checks to require in order to merge into this branch contexts: - DCO - - "ci/circleci: go-1-17" - - "ci/circleci: go-1-18" # Required. Enforce all configured restrictions for administrators. Set to true to enforce required status checks for repository administrators. Set to null to disable. enforce_admins: false # Prevent merge commits from being pushed to matching branches diff --git a/.github/workflows/automerge-dependabot.yml b/.github/workflows/automerge-dependabot.yml index 7b2687ec3..b028dfa4e 100644 --- a/.github/workflows/automerge-dependabot.yml +++ b/.github/workflows/automerge-dependabot.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Dependabot metadata id: metadata - uses: dependabot/fetch-metadata@dbb049abf0d677abbd7f7eee0375145b417fdd34 # v2.2.0 + uses: dependabot/fetch-metadata@08eff52bf64351f401fb50d4972fa95b9f2c2d1b # v2.4.0 with: github-token: "${{ secrets.GITHUB_TOKEN }}" - name: Enable auto-merge for Dependabot PRs diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index a84a2ffbf..e2257dd38 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -50,7 +50,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -61,7 +61,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -75,4 +75,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index dcca16ff3..7de8bb8da 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -19,6 +19,8 @@ jobs: steps: - name: git checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - name: Set docker hub repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to Dockerhub @@ -41,6 +43,8 @@ jobs: steps: - name: git checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - name: Set quay.io org name run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - name: Set quay.io repo name diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index e63714bb9..362939ddc 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -7,8 +7,10 @@ on: - main - "release-*" +# Modified to avoid canceling all matrix jobs when one fails +# Each job type will have its own concurrency group concurrency: - group: ${{ github.workflow }}-${{ (github.event.pull_request && github.event.pull_request.number) || github.ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.job }}-${{ (github.event.pull_request && github.event.pull_request.number) || github.ref || github.run_id }} cancel-in-progress: true # Minimal permissions to be inherited by any job that don't declare it's own permissions @@ -32,29 +34,29 @@ jobs: echo "supported_versions=$matrix" >> $GITHUB_OUTPUT test: - name: Tests + name: Tests (${{ matrix.go_version }}) runs-on: ubuntu-latest needs: supportedVersions - + # Set fail-fast to false to ensure all Go versions are tested regardless of failures strategy: + fail-fast: false matrix: go_version: ${{ fromJSON(needs.supportedVersions.outputs.supported_versions) }} + # Define concurrency at the job level for matrix jobs + concurrency: + group: ${{ github.workflow }}-test-${{ matrix.go_version }}-${{ (github.event.pull_request && github.event.pull_request.number) || github.ref || github.run_id }} + cancel-in-progress: true steps: - name: Checkout code uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set up Go ${{ matrix.go_version }} - uses: actions/setup-go@v5.0.2 + uses: actions/setup-go@v5.5.0 with: go-version: ${{ matrix.go_version }} - - - name: Cache Go modules - id: cache - uses: actions/cache@v4 - with: - path: ~/go/pkg/mod - key: v1-go${{ matrix.go_version }} + check-latest: true + cache-dependency-path: go.sum - name: Run tests and check license run: make check_license test @@ -62,5 +64,5 @@ jobs: CI: true - name: Run style and unused - if: ${{ matrix.go_version == '1.21' }} + if: ${{ matrix.go_version == '1.22' }} run: make style unused diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 0c00c410a..d5d9ca2eb 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -25,15 +25,17 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false - name: Install Go - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: - go-version: 1.23.x + go-version: 1.24.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 with: args: --verbose - version: v1.63.4 + version: v2.2.1 diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 2cff641b6..b83fe2d92 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -34,7 +34,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 + uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 with: results_file: results.sarif results_format: sarif @@ -43,7 +43,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: SARIF file path: results.sarif @@ -51,6 +51,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: sarif_file: results.sarif diff --git a/.github/workflows/update-go-versions.yml b/.github/workflows/update-go-versions.yml index 1d0b38ba8..99766c1cc 100644 --- a/.github/workflows/update-go-versions.yml +++ b/.github/workflows/update-go-versions.yml @@ -22,7 +22,7 @@ jobs: # no pull request will be created and the action exits silently. - name: Create a Pull Request if: github.event_name != 'pull_request' - uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7.0.6 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 with: token: ${{ secrets.GITHUB_TOKEN }} commit-message: "Update Go Collector metrics for new Go version" diff --git a/.golangci.yml b/.golangci.yml index d65572ecc..99966715f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,83 +1,80 @@ ---- -run: - timeout: 5m - skip-files: - # Skip autogenerated files. - - ^.*\.(pb|y)\.go$ - -output: - sort-results: true - +version: "2" +issues: + max-same-issues: 0 linters: enable: + - copyloopvar - depguard - durationcheck - errorlint - - exportloopref - - gofmt - - gofumpt - - goimports - - gosimple - - ineffassign - misspell - nolintlint - perfsprint - predeclared - revive - - staticcheck - unconvert - - unused - usestdlibvars - wastedassign - -issues: - max-same-issues: 0 - exclude-rules: - - path: _test.go - linters: - - errcheck - - govet - - structcheck - -linters-settings: - depguard: - rules: - main: - deny: - #- pkg: "sync/atomic" - # desc: "Use go.uber.org/atomic instead of sync/atomic" - - pkg: "github.com/stretchr/testify/assert" - desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" - - pkg: "github.com/go-kit/kit/log" - desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" - - pkg: "io/ioutil" - desc: "Use corresponding 'os' or 'io' functions instead." - #- pkg: "regexp" - # desc: "Use github.com/grafana/regexp instead of regexp" - errcheck: - exclude-functions: - # The following 2 methods always return nil as the error - - (*github.com/cespare/xxhash/v2.Digest).Write - - (*github.com/cespare/xxhash/v2.Digest).WriteString - - (*bufio.Writer).WriteRune - goimports: - local-prefixes: github.com/prometheus/client_golang - gofumpt: - extra-rules: true - perfsprint: - # Optimizes even if it requires an int or uint type cast. - int-conversion: true - # Optimizes into `err.Error()` even if it is only equivalent for non-nil errors. - err-error: true - # Optimizes `fmt.Errorf`. - errorf: true - # Optimizes `fmt.Sprintf` with only one argument. - sprintf1: true - # Optimizes into strings concatenation. - strconcat: true - revive: + exclusions: + presets: + - comments + - common-false-positives + - legacy + - std-error-handling rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter - - name: unused-parameter - severity: warning - disabled: true + - linters: + - errcheck + - govet + - nolintlint + - structcheck + path: _test.go + paths: + - ^.*\.(pb|y)\.go$ + warn-unused: true + settings: + depguard: + rules: + main: + deny: + - pkg: github.com/stretchr/testify/assert + desc: Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert + - pkg: github.com/go-kit/kit/log + desc: Use github.com/go-kit/log instead of github.com/go-kit/kit/log + - pkg: io/ioutil + desc: Use corresponding 'os' or 'io' functions instead. + errcheck: + exclude-functions: + # The following 2 methods always return nil as the error + - (*github.com/cespare/xxhash/v2.Digest).Write + - (*github.com/cespare/xxhash/v2.Digest).WriteString + - (*bufio.Writer).WriteRune + perfsprint: + # Optimizes even if it requires an int or uint type cast. + int-conversion: true + # Optimizes into `err.Error()` even if it is only equivalent for non-nil errors. + err-error: true + # Optimizes `fmt.Errorf`. + errorf: true + # Optimizes `fmt.Sprintf` with only one argument. + sprintf1: true + # Optimizes into strings concatenation. + strconcat: true + revive: + rules: + - name: unused-parameter + severity: warning + disabled: true +formatters: + enable: + - gofmt + - gofumpt + - goimports + settings: + gofumpt: + extra-rules: true + goimports: + local-prefixes: + - github.com/prometheus/client_golang + exclusions: + paths: + - ^.*\.(pb|y)\.go$ diff --git a/CHANGELOG.md b/CHANGELOG.md index 654a8d039..6bcf1cd8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,68 @@ ## Unreleased +## 1.23.2 / 2025-09-05 + +This release is made to upgrade to prometheus/common v0.66.1, which drops the dependencies github.com/grafana/regexp and go.uber.org/atomic and replaces gopkg.in/yaml.v2 with go.yaml.in/yaml/v2 (a drop-in replacement). +There are no functional changes. + +## 1.23.1 / 2025-09-04 + +This release is made to be compatible with a backwards incompatible API change +in prometheus/common v0.66.0. There are no functional changes. + +## 1.23.0 / 2025-07-30 + +* [CHANGE] Minimum required Go version is now 1.23, only the two latest Go versions are supported from now on. #1812 +* [FEATURE] Add WrapCollectorWith and WrapCollectorWithPrefix #1766 +* [FEATURE] Add exemplars for native histograms #1686 +* [ENHANCEMENT] exp/api: Bubble up status code from writeResponse #1823 +* [ENHANCEMENT] collector/go: Update runtime metrics for Go v1.23 and v1.24 #1833 +* [BUGFIX] exp/api: client prompt return on context cancellation #1729 + +## 1.22.0 / 2025-04-07 + +:warning: This release contains potential breaking change if you use experimental `zstd` support introduce in #1496 :warning: + +Experimental support for `zstd` on scrape was added, controlled by the request `Accept-Encoding` header. +It was enabled by default since version 1.20, but now you need to add a blank import to enable it. +The decision to make it opt-in by default was originally made because the Go standard library was expected to have default zstd support added soon, +https://github.com/golang/go/issues/62513 however, the work took longer than anticipated and it will be postponed to upcoming major Go versions. + + +e.g.: +> ```go +> import ( +> _ "github.com/prometheus/client_golang/prometheus/promhttp/zstd" +> ) +> ``` + +* [FEATURE] prometheus: Add new CollectorFunc utility #1724 +* [CHANGE] Minimum required Go version is now 1.22 (we also test client_golang against latest go version - 1.24) #1738 +* [FEATURE] api: `WithLookbackDelta` and `WithStats` options have been added to API client. #1743 +* [CHANGE] :warning: promhttp: Isolate zstd support and klauspost/compress library use to promhttp/zstd package. #1765 + +## 1.21.1 / 2025-03-04 + +* [BUGFIX] prometheus: Revert of `Inc`, `Add` and `Observe` cumulative metric CAS optimizations (#1661), causing regressions on low contention cases. +* [BUGFIX] prometheus: Fix GOOS=ios build, broken due to process_collector_* wrong build tags. + +## 1.21.0 / 2025-02-17 + +:warning: This release contains potential breaking change if you upgrade `github.com/prometheus/common` to 0.62+ together with client_golang. :warning: + +New common version [changes `model.NameValidationScheme` global variable](https://github.com/prometheus/common/pull/724), which relaxes the validation of label names and metric name, allowing all UTF-8 characters. Typically, this should not break any user, unless your test or usage expects strict certain names to panic/fail on client_golang metric registration, gathering or scrape. In case of problems change `model.NameValidationScheme` to old `model.LegacyValidation` value in your project `init` function. + +* [BUGFIX] gocollector: Fix help message for runtime/metric metrics. #1583 +* [BUGFIX] prometheus: Fix `Desc.String()` method for no labels case. #1687 +* [ENHANCEMENT] prometheus: Optimize popular `prometheus.BuildFQName` function; now up to 30% faster. #1665 +* [ENHANCEMENT] prometheus: Optimize `Inc`, `Add` and `Observe` cumulative metrics; now up to 50% faster under high concurrent contention. #1661 +* [CHANGE] Upgrade prometheus/common to 0.62.0 which changes `model.NameValidationScheme` global variable. #1712 +* [CHANGE] Add support for Go 1.23. #1602 +* [FEATURE] process_collector: Add support for Darwin systems. #1600 #1616 #1625 #1675 #1715 +* [FEATURE] api: Add ability to invoke `CloseIdleConnections` on api.Client using `api.Client.(CloseIdler).CloseIdleConnections()` casting. #1513 +* [FEATURE] promhttp: Add `promhttp.HandlerOpts.EnableOpenMetricsTextCreatedSamples` option to create OpenMetrics _created lines. Not recommended unless you want to use opt-in Created Timestamp feature. Community works on OpenMetrics 2.0 format that should make those lines obsolete (they increase cardinality significantly). #1408 +* [FEATURE] prometheus: Add `NewConstNativeHistogram` function. #1654 + ## 1.20.5 / 2024-10-15 * [BUGFIX] testutil: Reverted #1424; functions using compareMetricFamilies are (again) only failing if filtered metricNames are in the expected input. diff --git a/Makefile b/Makefile index 7aef4681a..2a5817c02 100644 --- a/Makefile +++ b/Makefile @@ -11,14 +11,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -include .bingo/Variables.mk include Makefile.common +BUF := $(FIRST_GOPATH)/bin/buf +BUF_VERSION ?= v1.39.0 + +$(BUF): + go install github.com/bufbuild/buf/cmd/buf@$(BUF_VERSION) + +.PHONY: deps +deps: + $(MAKE) common-deps + cd exp && $(GO) mod tidy && $(GO) mod download + .PHONY: test -test: deps common-test +test: deps common-test test-exp .PHONY: test-short -test-short: deps common-test-short +test-short: deps common-test-short test-exp-short .PHONY: generate-go-collector-test-files file := supported_go_versions.txt @@ -36,4 +46,22 @@ generate-go-collector-test-files: .PHONY: fmt fmt: common-format - $(GOIMPORTS) -local github.com/prometheus/client_golang -w . + +.PHONY: proto +proto: ## Regenerate Go from remote write proto. +proto: $(BUF) + @echo ">> regenerating Prometheus Remote Write proto" + @cd exp/api/remote/genproto && $(BUF) generate + @cd exp/api/remote && find genproto/ -type f -exec sed -i '' 's/protohelpers "github.com\/planetscale\/vtprotobuf\/protohelpers"/protohelpers "github.com\/prometheus\/client_golang\/exp\/internal\/github.com\/planetscale\/vtprotobuf\/protohelpers"/g' {} \; + # For some reasons buf generates this unused import, kill it manually for now and reformat. + @cd exp/api/remote && find genproto/ -type f -exec sed -i '' 's/_ "github.com\/gogo\/protobuf\/gogoproto"//g' {} \; + @cd exp/api/remote && go fmt ./genproto/... + $(MAKE) fmt + +.PHONY: test-exp +test-exp: + cd exp && $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +.PHONY: test-exp-short +test-exp-short: + cd exp && $(GOTEST) -short $(GOOPTS) $(pkgs) diff --git a/Makefile.common b/Makefile.common index d1576bb31..1f4c9025a 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,8 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.63.4 +GOLANGCI_LINT_VERSION ?= v2.2.1 +GOLANGCI_FMT_OPTS ?= # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -138,7 +139,7 @@ common-deps: update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get -d $$m; \ + $(GO) get $$m; \ done $(GO) mod tidy @@ -156,9 +157,13 @@ $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format -common-format: +common-format: $(GOLANGCI_LINT) @echo ">> formatting code" $(GO) fmt $(pkgs) +ifdef GOLANGCI_LINT + @echo ">> formatting code with golangci-lint" + $(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS) +endif .PHONY: common-vet common-vet: @@ -248,8 +253,8 @@ $(PROMU): cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) -.PHONY: proto -proto: +.PHONY: common-proto +common-proto: @echo ">> generating code from proto files" @./scripts/genproto.sh diff --git a/README.md b/README.md index de62e6be3..7ad4fc0b2 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Prometheus HTTP API. ## Version Compatibility -This library supports the three most recent major releases of Go. While it may function with older versions, we only provide fixes and support for the currently supported Go releases. +This library supports the two most recent major releases of Go. While it may function with older versions, we only provide fixes and support for the currently supported Go releases. > [!NOTE] > See our [Release Process](RELEASE.md#supported-go-versions) for details on compatibility and support policies. diff --git a/RELEASE.md b/RELEASE.md index cbbe72a78..35f42a5f4 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -121,7 +121,7 @@ If conflicts occur merging to main: ### Supported Go Versions -- Support provided only for the three most recent major Go releases +- Support provided only for the two most recent major Go releases - While the library may work with older Go versions, support and fixes are best-effort for those. - Each release documents the minimum required Go version diff --git a/VERSION b/VERSION index 7bf9455f0..14bee92c9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.20.5 +1.23.2 diff --git a/api/client.go b/api/client.go index afcf122ef..0e647b675 100644 --- a/api/client.go +++ b/api/client.go @@ -131,34 +131,26 @@ func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, req = req.WithContext(ctx) } resp, err := c.client.Do(req) - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - if err != nil { return nil, nil, err } var body []byte - done := make(chan struct{}) + done := make(chan error, 1) go func() { var buf bytes.Buffer - _, err = buf.ReadFrom(resp.Body) + _, err := buf.ReadFrom(resp.Body) body = buf.Bytes() - close(done) + done <- err }() select { case <-ctx.Done(): + resp.Body.Close() <-done - err = resp.Body.Close() - if err == nil { - err = ctx.Err() - } - case <-done: + return resp, nil, ctx.Err() + case err = <-done: + resp.Body.Close() + return resp, body, err } - - return resp, body, err } diff --git a/api/client_test.go b/api/client_test.go index 874387868..720084aa0 100644 --- a/api/client_test.go +++ b/api/client_test.go @@ -16,11 +16,13 @@ package api import ( "bytes" "context" + "errors" "fmt" "net/http" "net/http/httptest" "net/url" "testing" + "time" ) func TestConfig(t *testing.T) { @@ -116,6 +118,52 @@ func TestClientURL(t *testing.T) { } } +func TestDoContextCancellation(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("partial")) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + + <-r.Context().Done() + })) + + defer ts.Close() + + client, err := NewClient(Config{ + Address: ts.URL, + }) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + + req, err := http.NewRequest(http.MethodGet, ts.URL, nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + start := time.Now() + resp, body, err := client.Do(ctx, req) + elapsed := time.Since(start) + + if !errors.Is(err, context.DeadlineExceeded) { + t.Errorf("expected error %v, got: %v", context.DeadlineExceeded, err) + } + if body != nil { + t.Errorf("expected no body due to cancellation, got: %q", string(body)) + } + if elapsed > 200*time.Millisecond { + t.Errorf("Do did not return promptly on cancellation: took %v", elapsed) + } + + if resp != nil && resp.Body != nil { + resp.Body.Close() + } +} + // Serve any http request with a response of N KB of spaces. type serveSpaces struct { sizeKB int diff --git a/api/prometheus/v1/api.go b/api/prometheus/v1/api.go index cddf027fd..1c71d7274 100644 --- a/api/prometheus/v1/api.go +++ b/api/prometheus/v1/api.go @@ -381,6 +381,7 @@ const ( epRuntimeinfo = apiPrefix + "/status/runtimeinfo" epTSDB = apiPrefix + "/status/tsdb" epWalReplay = apiPrefix + "/status/walreplay" + epFormatQuery = apiPrefix + "/format_query" ) // AlertState models the state of an alert. @@ -475,7 +476,7 @@ type API interface { // Flags returns the flag values that Prometheus was launched with. Flags(ctx context.Context) (FlagsResult, error) // LabelNames returns the unique label names present in the block in sorted order by given time range and matchers. - LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]string, Warnings, error) + LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) (model.LabelNames, Warnings, error) // LabelValues performs a query for the values of the given label, time range and matchers. LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time, opts ...Option) (model.LabelValues, Warnings, error) // Query performs a query for the given time. @@ -494,7 +495,7 @@ type API interface { // under the TSDB's data directory and returns the directory as response. Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) // Rules returns a list of alerting and recording rules that are currently loaded. - Rules(ctx context.Context) (RulesResult, error) + Rules(ctx context.Context, matches []string) (RulesResult, error) // Targets returns an overview of the current state of the Prometheus target discovery. Targets(ctx context.Context) (TargetsResult, error) // TargetsMetadata returns metadata about metrics currently scraped by the target. @@ -505,6 +506,8 @@ type API interface { TSDB(ctx context.Context, opts ...Option) (TSDBResult, error) // WalReplay returns the current replay status of the wal. WalReplay(ctx context.Context) (WalReplayStatus, error) + // FormatQuery formats a PromQL expression in a prettified way. + FormatQuery(ctx context.Context, query string) (string, error) } // AlertsResult contains the result from querying the alerts endpoint. @@ -1024,7 +1027,7 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) { return res, err } -func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]string, Warnings, error) { +func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) (model.LabelNames, Warnings, error) { u := h.client.URL(epLabels, nil) q := addOptionalURLParams(u.Query(), opts) @@ -1042,7 +1045,7 @@ func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, e if err != nil { return nil, w, err } - var labelNames []string + var labelNames model.LabelNames err = json.Unmarshal(body, &labelNames) return labelNames, w, err } @@ -1076,9 +1079,19 @@ func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []strin return labelValues, w, err } +// StatsValue is a type for `stats` query parameter. +type StatsValue string + +// AllStatsValue is the query parameter value to return all the query statistics. +const ( + AllStatsValue StatsValue = "all" +) + type apiOptions struct { - timeout time.Duration - limit uint64 + timeout time.Duration + lookbackDelta time.Duration + stats StatsValue + limit uint64 } type Option func(c *apiOptions) @@ -1091,6 +1104,24 @@ func WithTimeout(timeout time.Duration) Option { } } +// WithLookbackDelta can be used to provide an optional query lookback delta for Query and QueryRange. +// This URL variable is not documented on Prometheus HTTP API. +// https://github.com/prometheus/prometheus/blob/e04913aea2792a5c8bc7b3130c389ca1b027dd9b/promql/engine.go#L162-L167 +func WithLookbackDelta(lookbackDelta time.Duration) Option { + return func(o *apiOptions) { + o.lookbackDelta = lookbackDelta + } +} + +// WithStats can be used to provide an optional per step stats for Query and QueryRange. +// This URL variable is not documented on Prometheus HTTP API. +// https://github.com/prometheus/prometheus/blob/e04913aea2792a5c8bc7b3130c389ca1b027dd9b/promql/engine.go#L162-L167 +func WithStats(stats StatsValue) Option { + return func(o *apiOptions) { + o.stats = stats + } +} + // WithLimit provides an optional maximum number of returned entries for APIs that support limit parameter // e.g. https://prometheus.io/docs/prometheus/latest/querying/api/#instant-querie:~:text=%3A%20End%20timestamp.-,limit%3D%3Cnumber%3E,-%3A%20Maximum%20number%20of func WithLimit(limit uint64) Option { @@ -1109,6 +1140,14 @@ func addOptionalURLParams(q url.Values, opts []Option) url.Values { q.Set("timeout", opt.timeout.String()) } + if opt.lookbackDelta > 0 { + q.Set("lookback_delta", opt.lookbackDelta.String()) + } + + if opt.stats != "" { + q.Set("stats", string(opt.stats)) + } + if opt.limit > 0 { q.Set("limit", strconv.FormatUint(opt.limit, 10)) } @@ -1199,8 +1238,15 @@ func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, return res, err } -func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) { +func (h *httpAPI) Rules(ctx context.Context, matches []string) (RulesResult, error) { u := h.client.URL(epRules, nil) + q := u.Query() + + for _, m := range matches { + q.Add("match[]", m) + } + + u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { @@ -1344,6 +1390,19 @@ func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime, e return res, err } +func (h *httpAPI) FormatQuery(ctx context.Context, query string) (string, error) { + u := h.client.URL(epFormatQuery, nil) + q := u.Query() + q.Set("query", query) + + _, body, _, err := h.client.DoGetFallback(ctx, u, q) + if err != nil { + return "", err + } + + return string(body), nil +} + // Warnings is an array of non critical errors type Warnings []string diff --git a/api/prometheus/v1/api_test.go b/api/prometheus/v1/api_test.go index bacf6a096..190fb1e22 100644 --- a/api/prometheus/v1/api_test.go +++ b/api/prometheus/v1/api_test.go @@ -191,9 +191,9 @@ func TestAPIs(t *testing.T) { } } - doRules := func() func() (interface{}, Warnings, error) { + doRules := func(matches []string) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { - v, err := promAPI.Rules(context.Background()) + v, err := promAPI.Rules(context.Background(), matches) return v, nil, err } } @@ -240,6 +240,13 @@ func TestAPIs(t *testing.T) { } } + doFormatQuery := func(query string) func() (interface{}, Warnings, error) { + return func() (interface{}, Warnings, error) { + v, err := promAPI.FormatQuery(context.Background(), query) + return v, nil, err + } + } + queryTests := []apiTest{ { do: doQuery("2", testTime, WithTimeout(5*time.Second)), @@ -348,7 +355,7 @@ func TestAPIs(t *testing.T) { inRes: []string{"val1", "val2"}, reqMethod: "POST", reqPath: "/api/v1/labels", - res: []string{"val1", "val2"}, + res: model.LabelNames{"val1", "val2"}, }, { do: doLabelNames(nil, testTime.Add(-100*time.Hour), testTime), @@ -356,7 +363,7 @@ func TestAPIs(t *testing.T) { inWarnings: []string{"a"}, reqMethod: "POST", reqPath: "/api/v1/labels", - res: []string{"val1", "val2"}, + res: model.LabelNames{"val1", "val2"}, }, { @@ -379,7 +386,7 @@ func TestAPIs(t *testing.T) { inRes: []string{"val1", "val2"}, reqMethod: "POST", reqPath: "/api/v1/labels", - res: []string{"val1", "val2"}, + res: model.LabelNames{"val1", "val2"}, }, { @@ -689,7 +696,7 @@ func TestAPIs(t *testing.T) { }, { - do: doRules(), + do: doRules(nil), reqMethod: "GET", reqPath: "/api/v1/rules", inRes: map[string]interface{}{ @@ -784,7 +791,7 @@ func TestAPIs(t *testing.T) { // This has the newer API elements like lastEvaluation, evaluationTime, etc. { - do: doRules(), + do: doRules(nil), reqMethod: "GET", reqPath: "/api/v1/rules", inRes: map[string]interface{}{ @@ -888,7 +895,63 @@ func TestAPIs(t *testing.T) { }, { - do: doRules(), + do: doRules([]string{`severity="info"`}), + reqMethod: "GET", + reqPath: "/api/v1/rules", + inRes: map[string]interface{}{ + "groups": []map[string]interface{}{ + { + "file": "/rules.yaml", + "interval": 60, + "name": "example", + "rules": []map[string]interface{}{ + { + "alerts": []map[string]interface{}{}, + "annotations": map[string]interface{}{ + "summary": "High request latency", + }, + "duration": 600, + "health": "ok", + "labels": map[string]interface{}{ + "severity": "info", + }, + "name": "HighRequestLatency", + "query": "job:request_latency_seconds:mean5m{job=\"myjob\"} > 0.5", + "type": "alerting", + }, + }, + }, + }, + }, + res: RulesResult{ + Groups: []RuleGroup{ + { + Name: "example", + File: "/rules.yaml", + Interval: 60, + Rules: []interface{}{ + AlertingRule{ + Alerts: []*Alert{}, + Annotations: model.LabelSet{ + "summary": "High request latency", + }, + Labels: model.LabelSet{ + "severity": "info", + }, + Duration: 600, + Health: RuleHealthGood, + Name: "HighRequestLatency", + Query: "job:request_latency_seconds:mean5m{job=\"myjob\"} > 0.5", + LastError: "", + }, + }, + }, + }, + }, + }, + + { + do: doRules(nil), reqMethod: "GET", reqPath: "/api/v1/rules", inErr: errors.New("some error"), @@ -1206,6 +1269,13 @@ func TestAPIs(t *testing.T) { }, }, }, + { + do: doFormatQuery("foo/bar"), + reqMethod: "POST", + reqPath: "/api/v1/format_query", + inRes: "foo / bar", + res: "\"foo / bar\"", + }, } var tests []apiTest diff --git a/api/prometheus/v1/example_test.go b/api/prometheus/v1/example_test.go index 9cd42863e..66f45bf38 100644 --- a/api/prometheus/v1/example_test.go +++ b/api/prometheus/v1/example_test.go @@ -28,9 +28,11 @@ import ( v1 "github.com/prometheus/client_golang/api/prometheus/v1" ) +const DemoPrometheusURL = "https://demo.prometheus.io:443" + func ExampleAPI_query() { client, err := api.NewClient(api.Config{ - Address: "http://demo.robustperception.io:9090", + Address: DemoPrometheusURL, }) if err != nil { fmt.Printf("Error creating client: %v\n", err) @@ -53,7 +55,7 @@ func ExampleAPI_query() { func ExampleAPI_queryRange() { client, err := api.NewClient(api.Config{ - Address: "http://demo.robustperception.io:9090", + Address: DemoPrometheusURL, }) if err != nil { fmt.Printf("Error creating client: %v\n", err) @@ -104,7 +106,7 @@ func (u userAgentRoundTripper) RoundTrip(r *http.Request) (*http.Response, error func ExampleAPI_queryRangeWithUserAgent() { client, err := api.NewClient(api.Config{ - Address: "http://demo.robustperception.io:9090", + Address: DemoPrometheusURL, RoundTripper: userAgentRoundTripper{name: "Client-Golang", rt: api.DefaultRoundTripper}, }) if err != nil { @@ -133,7 +135,7 @@ func ExampleAPI_queryRangeWithUserAgent() { func ExampleAPI_queryRangeWithBasicAuth() { client, err := api.NewClient(api.Config{ - Address: "http://demo.robustperception.io:9090", + Address: DemoPrometheusURL, // We can use amazing github.com/prometheus/common/config helper! RoundTripper: config.NewBasicAuthRoundTripper( config.NewInlineSecret("me"), @@ -167,7 +169,7 @@ func ExampleAPI_queryRangeWithBasicAuth() { func ExampleAPI_queryRangeWithAuthBearerToken() { client, err := api.NewClient(api.Config{ - Address: "http://demo.robustperception.io:9090", + Address: DemoPrometheusURL, // We can use amazing github.com/prometheus/common/config helper! RoundTripper: config.NewAuthorizationCredentialsRoundTripper( "Bearer", @@ -201,7 +203,7 @@ func ExampleAPI_queryRangeWithAuthBearerToken() { func ExampleAPI_queryRangeWithAuthBearerTokenHeadersRoundTripper() { client, err := api.NewClient(api.Config{ - Address: "http://demo.robustperception.io:9090", + Address: DemoPrometheusURL, // We can use amazing github.com/prometheus/common/config helper! RoundTripper: config.NewHeadersRoundTripper( &config.Headers{ @@ -240,7 +242,7 @@ func ExampleAPI_queryRangeWithAuthBearerTokenHeadersRoundTripper() { func ExampleAPI_series() { client, err := api.NewClient(api.Config{ - Address: "http://demo.robustperception.io:9090", + Address: DemoPrometheusURL, }) if err != nil { fmt.Printf("Error creating client: %v\n", err) diff --git a/exp/README.md b/exp/README.md new file mode 100644 index 000000000..7a509adac --- /dev/null +++ b/exp/README.md @@ -0,0 +1,62 @@ +# client_golang experimental module + +Contains experimental utilities and APIs for Prometheus. +The module may be contain breaking changes or be removed in the future. + +Packages within this module are listed below. + +## Remote + +Implements bindings from Prometheus remote APIs (remote write v1 and v2 for now). + +Contains flexible method for building API clients, that can send remote write protocol messages. + +```go + import ( + "github.com/prometheus/client_golang/exp/api/remote" + ) + ... + + remoteAPI, err := remote.NewAPI( + "https://your-remote-endpoint", + remote.WithAPIHTTPClient(httpClient), + remote.WithAPILogger(logger.With("component", "remote_write_api")), + ) + ... + + stats, err := remoteAPI.Write(ctx, remote.WriteV2MessageType, protoWriteReq) +``` + +Also contains handler methods for applications that would like to handle and store remote write requests. + +```go + import ( + "net/http" + "log" + + "github.com/prometheus/client_golang/exp/api/remote" + ) + ... + + type db {} + + func NewStorage() *db {} + + func (d *db) Store(ctx context.Context, msgType remote.WriteMessageType, req *http.Request) (*remote.WriteResponse, error) {} + ... + + mux := http.NewServeMux() + + remoteWriteHandler := remote.NewHandler(storage, remote.WithHandlerLogger(logger.With("component", "remote_write_handler"))) + mux.Handle("/api/v1/write", remoteWriteHandler) + + server := &http.Server{ + Addr: ":8080", + Handler: mux, + } + if err := server.ListenAndServe(); err != nil { + log.Fatal(err) + } +``` + +For more details, see [go doc](https://pkg.go.dev/github.com/prometheus/client_golang/exp/api/remote). \ No newline at end of file diff --git a/exp/api/remote/genproto/buf.gen.yaml b/exp/api/remote/genproto/buf.gen.yaml new file mode 100644 index 000000000..63e34a0d7 --- /dev/null +++ b/exp/api/remote/genproto/buf.gen.yaml @@ -0,0 +1,21 @@ +# buf.gen.yaml +version: v2 + +plugins: +- remote: buf.build/protocolbuffers/go:v1.31.0 + out: . + opt: + - Mio/prometheus/write/v2/types.proto=./v2 + +# vtproto for efficiency utilities like pooling etc. +# https://buf.build/community/planetscale-vtprotobuf?version=v0.6.0 +- remote: buf.build/community/planetscale-vtprotobuf:v0.6.0 + out: . + opt: + - Mio/prometheus/write/v2/types.proto=./v2 + - features=marshal+unmarshal+size + +inputs: +- module: buf.build/prometheus/prometheus:5b212ab78fb7460e831cf7ff2d83e385 + types: + - "io.prometheus.write.v2.Request" diff --git a/exp/api/remote/genproto/v2/symbols.go b/exp/api/remote/genproto/v2/symbols.go new file mode 100644 index 000000000..5e7494656 --- /dev/null +++ b/exp/api/remote/genproto/v2/symbols.go @@ -0,0 +1,97 @@ +// Copyright (c) Bartłomiej Płotka @bwplotka +// Licensed under the Apache License 2.0. + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +// SymbolsTable implements table for easy symbol use. +type SymbolsTable struct { + strings []string + symbolsMap map[string]uint32 +} + +// NewSymbolTable returns a symbol table. +func NewSymbolTable() SymbolsTable { + return SymbolsTable{ + // Empty string is required as a first element. + symbolsMap: map[string]uint32{"": 0}, + strings: []string{""}, + } +} + +// Symbolize adds (if not added before) a string to the symbols table, +// while returning its reference number. +func (t *SymbolsTable) Symbolize(str string) uint32 { + if ref, ok := t.symbolsMap[str]; ok { + return ref + } + ref := uint32(len(t.strings)) + t.strings = append(t.strings, str) + t.symbolsMap[str] = ref + return ref +} + +// SymbolizeLabels symbolize Prometheus labels. +func (t *SymbolsTable) SymbolizeLabels(lbls []string, buf []uint32) []uint32 { + result := buf[:0] + for i := 0; i < len(lbls); i += 2 { + off := t.Symbolize(lbls[i]) + result = append(result, off) + off = t.Symbolize(lbls[i+1]) + result = append(result, off) + } + return result +} + +// Symbols returns computes symbols table to put in e.g. Request.Symbols. +// As per spec, order does not matter. +func (t *SymbolsTable) Symbols() []string { + return t.strings +} + +// Reset clears symbols table. +func (t *SymbolsTable) Reset() { + // NOTE: Make sure to keep empty symbol. + t.strings = t.strings[:1] + for k := range t.symbolsMap { + if k == "" { + continue + } + delete(t.symbolsMap, k) + } +} + +// DesymbolizeLabels decodes label references, with given symbols to labels. +func DesymbolizeLabels(labelRefs []uint32, symbols, buf []string) []string { + result := buf[:0] + for i := 0; i < len(labelRefs); i += 2 { + result = append(result, symbols[labelRefs[i]], symbols[labelRefs[i+1]]) + } + return result +} diff --git a/exp/api/remote/genproto/v2/symbols_test.go b/exp/api/remote/genproto/v2/symbols_test.go new file mode 100644 index 000000000..161cdbddb --- /dev/null +++ b/exp/api/remote/genproto/v2/symbols_test.go @@ -0,0 +1,80 @@ +// Copyright (c) Bartłomiej Płotka @bwplotka +// Licensed under the Apache License 2.0. + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func requireEqual(t testing.TB, expected, got any) { + if diff := cmp.Diff(expected, got); diff != "" { + t.Fatal(diff) + } +} + +func TestSymbolsTable(t *testing.T) { + s := NewSymbolTable() + requireEqual(t, []string{""}, s.Symbols()) + requireEqual(t, uint32(0), s.Symbolize("")) + requireEqual(t, []string{""}, s.Symbols()) + + requireEqual(t, uint32(1), s.Symbolize("abc")) + requireEqual(t, []string{"", "abc"}, s.Symbols()) + + requireEqual(t, uint32(2), s.Symbolize("__name__")) + requireEqual(t, []string{"", "abc", "__name__"}, s.Symbols()) + + requireEqual(t, uint32(3), s.Symbolize("foo")) + requireEqual(t, []string{"", "abc", "__name__", "foo"}, s.Symbols()) + + s.Reset() + requireEqual(t, []string{""}, s.Symbols()) + requireEqual(t, uint32(0), s.Symbolize("")) + + requireEqual(t, uint32(1), s.Symbolize("__name__")) + requireEqual(t, []string{"", "__name__"}, s.Symbols()) + + requireEqual(t, uint32(2), s.Symbolize("abc")) + requireEqual(t, []string{"", "__name__", "abc"}, s.Symbols()) + + ls := []string{"__name__", "qwer", "zxcv", "1234"} + encoded := s.SymbolizeLabels(ls, nil) + requireEqual(t, []uint32{1, 3, 4, 5}, encoded) + decoded := DesymbolizeLabels(encoded, s.Symbols(), nil) + requireEqual(t, ls, decoded) + + // Different buf. + ls = []string{"__name__", "qwer", "zxcv2222", "1234"} + encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5}) + requireEqual(t, []uint32{1, 3, 6, 5}, encoded) +} diff --git a/exp/api/remote/genproto/v2/types.pb.go b/exp/api/remote/genproto/v2/types.pb.go new file mode 100644 index 000000000..767a555cd --- /dev/null +++ b/exp/api/remote/genproto/v2/types.pb.go @@ -0,0 +1,1183 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: io/prometheus/write/v2/types.proto + +package writev2 + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Metadata_MetricType int32 + +const ( + Metadata_METRIC_TYPE_UNSPECIFIED Metadata_MetricType = 0 + Metadata_METRIC_TYPE_COUNTER Metadata_MetricType = 1 + Metadata_METRIC_TYPE_GAUGE Metadata_MetricType = 2 + Metadata_METRIC_TYPE_HISTOGRAM Metadata_MetricType = 3 + Metadata_METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4 + Metadata_METRIC_TYPE_SUMMARY Metadata_MetricType = 5 + Metadata_METRIC_TYPE_INFO Metadata_MetricType = 6 + Metadata_METRIC_TYPE_STATESET Metadata_MetricType = 7 +) + +// Enum value maps for Metadata_MetricType. +var ( + Metadata_MetricType_name = map[int32]string{ + 0: "METRIC_TYPE_UNSPECIFIED", + 1: "METRIC_TYPE_COUNTER", + 2: "METRIC_TYPE_GAUGE", + 3: "METRIC_TYPE_HISTOGRAM", + 4: "METRIC_TYPE_GAUGEHISTOGRAM", + 5: "METRIC_TYPE_SUMMARY", + 6: "METRIC_TYPE_INFO", + 7: "METRIC_TYPE_STATESET", + } + Metadata_MetricType_value = map[string]int32{ + "METRIC_TYPE_UNSPECIFIED": 0, + "METRIC_TYPE_COUNTER": 1, + "METRIC_TYPE_GAUGE": 2, + "METRIC_TYPE_HISTOGRAM": 3, + "METRIC_TYPE_GAUGEHISTOGRAM": 4, + "METRIC_TYPE_SUMMARY": 5, + "METRIC_TYPE_INFO": 6, + "METRIC_TYPE_STATESET": 7, + } +) + +func (x Metadata_MetricType) Enum() *Metadata_MetricType { + p := new(Metadata_MetricType) + *p = x + return p +} + +func (x Metadata_MetricType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Metadata_MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_write_v2_types_proto_enumTypes[0].Descriptor() +} + +func (Metadata_MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_write_v2_types_proto_enumTypes[0] +} + +func (x Metadata_MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Metadata_MetricType.Descriptor instead. +func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{4, 0} +} + +type Histogram_ResetHint int32 + +const ( + Histogram_RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0 // Need to test for a counter reset explicitly. + Histogram_RESET_HINT_YES Histogram_ResetHint = 1 // This is the 1st histogram after a counter reset. + Histogram_RESET_HINT_NO Histogram_ResetHint = 2 // There was no counter reset between this and the previous Histogram. + Histogram_RESET_HINT_GAUGE Histogram_ResetHint = 3 // This is a gauge histogram where counter resets don't happen. +) + +// Enum value maps for Histogram_ResetHint. +var ( + Histogram_ResetHint_name = map[int32]string{ + 0: "RESET_HINT_UNSPECIFIED", + 1: "RESET_HINT_YES", + 2: "RESET_HINT_NO", + 3: "RESET_HINT_GAUGE", + } + Histogram_ResetHint_value = map[string]int32{ + "RESET_HINT_UNSPECIFIED": 0, + "RESET_HINT_YES": 1, + "RESET_HINT_NO": 2, + "RESET_HINT_GAUGE": 3, + } +) + +func (x Histogram_ResetHint) Enum() *Histogram_ResetHint { + p := new(Histogram_ResetHint) + *p = x + return p +} + +func (x Histogram_ResetHint) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Histogram_ResetHint) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_write_v2_types_proto_enumTypes[1].Descriptor() +} + +func (Histogram_ResetHint) Type() protoreflect.EnumType { + return &file_io_prometheus_write_v2_types_proto_enumTypes[1] +} + +func (x Histogram_ResetHint) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Histogram_ResetHint.Descriptor instead. +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{5, 0} +} + +// Request represents a request to write the given timeseries to a remote destination. +// This message was introduced in the Remote Write 2.0 specification: +// https://prometheus.io/docs/concepts/remote_write_spec_2_0/ +// +// The canonical Content-Type request header value for this message is +// "application/x-protobuf;proto=io.prometheus.write.v2.Request" +// +// NOTE: gogoproto options might change in future for this file, they +// are not part of the spec proto (they only modify the generated Go code, not +// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908 +type Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // symbols contains a de-duplicated array of string elements used for various + // items in a Request message, like labels and metadata items. For the sender's convenience + // around empty values for optional fields like unit_ref, symbols array MUST start with + // empty string. + // + // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you + // need to lookup the actual string by index from symbols array. The order of + // strings is up to the sender. The receiver should not assume any particular encoding. + Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"` + // timeseries represents an array of distinct series with 0 or more samples. + Timeseries []*TimeSeries `protobuf:"bytes,5,rep,name=timeseries,proto3" json:"timeseries,omitempty"` +} + +func (x *Request) Reset() { + *x = Request{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Request.ProtoReflect.Descriptor instead. +func (*Request) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Request) GetSymbols() []string { + if x != nil { + return x.Symbols + } + return nil +} + +func (x *Request) GetTimeseries() []*TimeSeries { + if x != nil { + return x.Timeseries + } + return nil +} + +// TimeSeries represents a single series. +type TimeSeries struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // labels_refs is a list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's length is always + // a multiple of two, and the underlying labels should be sorted lexicographically. + // + // Note that there might be multiple TimeSeries objects in the same + // Requests with the same labels e.g. for different exemplars, metadata + // or created timestamp. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + Samples []*Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples,omitempty"` + Histograms []*Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms,omitempty"` + // exemplars represents an optional set of exemplars attached to this series' samples. + Exemplars []*Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + // metadata represents the metadata associated with the given series' samples. + Metadata *Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` + // created_timestamp represents an optional created timestamp associated with + // this series' samples in ms format, typically for counter or histogram type + // metrics. Created timestamp represents the time when the counter started + // counting (sometimes referred to as start timestamp), which can increase + // the accuracy of query results. + // + // Note that some receivers might require this and in return fail to + // ingest such samples within the Request. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` +} + +func (x *TimeSeries) Reset() { + *x = TimeSeries{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeries) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeries) ProtoMessage() {} + +func (x *TimeSeries) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead. +func (*TimeSeries) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{1} +} + +func (x *TimeSeries) GetLabelsRefs() []uint32 { + if x != nil { + return x.LabelsRefs + } + return nil +} + +func (x *TimeSeries) GetSamples() []*Sample { + if x != nil { + return x.Samples + } + return nil +} + +func (x *TimeSeries) GetHistograms() []*Histogram { + if x != nil { + return x.Histograms + } + return nil +} + +func (x *TimeSeries) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + +func (x *TimeSeries) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *TimeSeries) GetCreatedTimestamp() int64 { + if x != nil { + return x.CreatedTimestamp + } + return 0 +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +type Exemplar struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents the timestamp of the exemplar in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. +func (*Exemplar) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{2} +} + +func (x *Exemplar) GetLabelsRefs() []uint32 { + if x != nil { + return x.LabelsRefs + } + return nil +} + +func (x *Exemplar) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *Exemplar) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +// Sample represents series sample. +type Sample struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // value of the sample. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *Sample) Reset() { + *x = Sample{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Sample) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Sample) ProtoMessage() {} + +func (x *Sample) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Sample.ProtoReflect.Descriptor instead. +func (*Sample) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{3} +} + +func (x *Sample) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *Sample) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +// Metadata represents the metadata associated with the given series' samples. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=io.prometheus.write.v2.Metadata_MetricType" json:"type,omitempty"` + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"` + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{4} +} + +func (x *Metadata) GetType() Metadata_MetricType { + if x != nil { + return x.Type + } + return Metadata_METRIC_TYPE_UNSPECIFIED +} + +func (x *Metadata) GetHelpRef() uint32 { + if x != nil { + return x.HelpRef + } + return 0 +} + +func (x *Metadata) GetUnitRef() uint32 { + if x != nil { + return x.UnitRef + } + return 0 +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be + // added in future for new bucketing layouts. + // + // The schema equal to -53 means custom buckets. See + // custom_values field description for more details. + // + // Values between -4 and 8 represent base-2 bucket schema, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, + // each bucket boundary is the previous boundary times 2^(2^-n). + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + // Types that are assignable to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []*BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans,omitempty"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` // Absolute count of each bucket. + // Positive Buckets. + // + // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: + // * The span offset+length points to an the index of the custom_values array + // or +Inf if pointing to the len of the array. + // * The counts and deltas have the same meaning as for exponential histograms. + PositiveSpans []*BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans,omitempty"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` // Absolute count of each bucket. + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=io.prometheus.write.v2.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // custom_values is an additional field used by non-exponential bucketing layouts. + // + // For custom buckets (-53 schema value) custom_values specify monotonically + // increasing upper inclusive boundaries for the bucket counts with arbitrary + // widths for this histogram. In other words, custom_values represents custom, + // explicit bucketing that could have been converted from the classic histograms. + // + // Those bounds are then referenced by spans in positive_spans with corresponding positive + // counts of deltas (refer to positive_spans for more details). This way we can + // have encode sparse histograms with custom bucketing (many buckets are often + // not used). + // + // Note that for custom bounds, even negative observations are placed in the positive + // counts to simplify the implementation and avoid ambiguity of where to place + // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and + // the zero bucket are unused, if the schema indicates custom bucketing. + // + // For each upper boundary the previous boundary represent the lower exclusive + // boundary for that bucket. The first element is the upper inclusive boundary + // for the first bucket, which implicitly has a lower inclusive bound of -Inf. + // This is similar to "le" label semantics on classic histograms. You may add a + // bucket with an upper bound of 0 to make sure that you really have no negative + // observations, but in practice, native histogram rendering will show both with + // or without first upper boundary 0 and no negative counts as the same case. + // + // The last element is not only the upper inclusive bound of the last regular + // bucket, but implicitly the lower exclusive bound of the +Inf bucket. + CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` +} + +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. +func (*Histogram) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{5} +} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} + +func (x *Histogram) GetCountInt() uint64 { + if x, ok := x.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (x *Histogram) GetCountFloat() float64 { + if x, ok := x.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (x *Histogram) GetSum() float64 { + if x != nil { + return x.Sum + } + return 0 +} + +func (x *Histogram) GetSchema() int32 { + if x != nil { + return x.Schema + } + return 0 +} + +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil { + return x.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (x *Histogram) GetZeroCountInt() uint64 { + if x, ok := x.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (x *Histogram) GetZeroCountFloat() float64 { + if x, ok := x.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (x *Histogram) GetNegativeSpans() []*BucketSpan { + if x != nil { + return x.NegativeSpans + } + return nil +} + +func (x *Histogram) GetNegativeDeltas() []int64 { + if x != nil { + return x.NegativeDeltas + } + return nil +} + +func (x *Histogram) GetNegativeCounts() []float64 { + if x != nil { + return x.NegativeCounts + } + return nil +} + +func (x *Histogram) GetPositiveSpans() []*BucketSpan { + if x != nil { + return x.PositiveSpans + } + return nil +} + +func (x *Histogram) GetPositiveDeltas() []int64 { + if x != nil { + return x.PositiveDeltas + } + return nil +} + +func (x *Histogram) GetPositiveCounts() []float64 { + if x != nil { + return x.PositiveCounts + } + return nil +} + +func (x *Histogram) GetResetHint() Histogram_ResetHint { + if x != nil { + return x.ResetHint + } + return Histogram_RESET_HINT_UNSPECIFIED +} + +func (x *Histogram) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *Histogram) GetCustomValues() []float64 { + if x != nil { + return x.CustomValues + } + return nil +} + +type isHistogram_Count interface { + isHistogram_Count() +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof"` +} + +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} + +func (*Histogram_CountFloat) isHistogram_Count() {} + +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() +} + +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof"` +} + +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof"` +} + +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} + +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` // Length of consecutive buckets. +} + +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. +func (*BucketSpan) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{6} +} + +func (x *BucketSpan) GetOffset() int32 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *BucketSpan) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +var File_io_prometheus_write_v2_types_proto protoreflect.FileDescriptor + +var file_io_prometheus_write_v2_types_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x14, 0x67, 0x6f, + 0x67, 0x6f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x67, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x12, 0x48, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, + 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x04, 0x22, 0xed, 0x02, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x52, 0x65, 0x66, 0x73, 0x12, 0x3e, 0x0a, 0x07, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x07, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0a, 0x68, 0x69, 0x73, 0x74, 0x6f, + 0x67, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x04, + 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x0a, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x73, + 0x12, 0x44, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x5f, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x5f, 0x72, 0x65, + 0x66, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x52, 0x65, 0x66, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x06, 0x53, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xe1, 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x65, 0x6c, 0x70, 0x5f, 0x72, 0x65, 0x66, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x68, 0x65, 0x6c, 0x70, 0x52, 0x65, 0x66, 0x12, + 0x19, 0x0a, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x66, 0x22, 0xdd, 0x01, 0x0a, 0x0a, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, 0x54, + 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x15, 0x0a, 0x11, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, + 0x41, 0x55, 0x47, 0x45, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, + 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, + 0x04, 0x12, 0x17, 0x0a, 0x13, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x45, + 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x06, + 0x12, 0x18, 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x53, 0x45, 0x54, 0x10, 0x07, 0x22, 0xc4, 0x06, 0x0a, 0x09, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x1d, 0x0a, 0x09, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x08, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0a, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, + 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, + 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x7a, + 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x0c, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x49, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x01, 0x48, 0x01, 0x52, + 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, + 0x4f, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x42, 0x04, 0xc8, 0xde, 0x1f, + 0x00, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, + 0x74, 0x61, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x65, 0x67, + 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x01, 0x52, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x12, 0x4f, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x42, 0x04, + 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, + 0x61, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0e, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, + 0x0d, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, + 0x69, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x73, + 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74, 0x52, 0x09, 0x72, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, + 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x23, 0x0a, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x18, 0x10, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, + 0x74, 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49, 0x4e, 0x54, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, + 0x0e, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49, 0x4e, 0x54, 0x5f, 0x59, 0x45, 0x53, 0x10, + 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49, 0x4e, 0x54, 0x5f, + 0x4e, 0x4f, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49, + 0x4e, 0x54, 0x5f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, 0x03, 0x42, 0x07, 0x0a, 0x05, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x42, 0x0c, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, + 0x09, 0x5a, 0x07, 0x77, 0x72, 0x69, 0x74, 0x65, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_io_prometheus_write_v2_types_proto_rawDescOnce sync.Once + file_io_prometheus_write_v2_types_proto_rawDescData = file_io_prometheus_write_v2_types_proto_rawDesc +) + +func file_io_prometheus_write_v2_types_proto_rawDescGZIP() []byte { + file_io_prometheus_write_v2_types_proto_rawDescOnce.Do(func() { + file_io_prometheus_write_v2_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_write_v2_types_proto_rawDescData) + }) + return file_io_prometheus_write_v2_types_proto_rawDescData +} + +var file_io_prometheus_write_v2_types_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_io_prometheus_write_v2_types_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_io_prometheus_write_v2_types_proto_goTypes = []interface{}{ + (Metadata_MetricType)(0), // 0: io.prometheus.write.v2.Metadata.MetricType + (Histogram_ResetHint)(0), // 1: io.prometheus.write.v2.Histogram.ResetHint + (*Request)(nil), // 2: io.prometheus.write.v2.Request + (*TimeSeries)(nil), // 3: io.prometheus.write.v2.TimeSeries + (*Exemplar)(nil), // 4: io.prometheus.write.v2.Exemplar + (*Sample)(nil), // 5: io.prometheus.write.v2.Sample + (*Metadata)(nil), // 6: io.prometheus.write.v2.Metadata + (*Histogram)(nil), // 7: io.prometheus.write.v2.Histogram + (*BucketSpan)(nil), // 8: io.prometheus.write.v2.BucketSpan +} +var file_io_prometheus_write_v2_types_proto_depIdxs = []int32{ + 3, // 0: io.prometheus.write.v2.Request.timeseries:type_name -> io.prometheus.write.v2.TimeSeries + 5, // 1: io.prometheus.write.v2.TimeSeries.samples:type_name -> io.prometheus.write.v2.Sample + 7, // 2: io.prometheus.write.v2.TimeSeries.histograms:type_name -> io.prometheus.write.v2.Histogram + 4, // 3: io.prometheus.write.v2.TimeSeries.exemplars:type_name -> io.prometheus.write.v2.Exemplar + 6, // 4: io.prometheus.write.v2.TimeSeries.metadata:type_name -> io.prometheus.write.v2.Metadata + 0, // 5: io.prometheus.write.v2.Metadata.type:type_name -> io.prometheus.write.v2.Metadata.MetricType + 8, // 6: io.prometheus.write.v2.Histogram.negative_spans:type_name -> io.prometheus.write.v2.BucketSpan + 8, // 7: io.prometheus.write.v2.Histogram.positive_spans:type_name -> io.prometheus.write.v2.BucketSpan + 1, // 8: io.prometheus.write.v2.Histogram.reset_hint:type_name -> io.prometheus.write.v2.Histogram.ResetHint + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_io_prometheus_write_v2_types_proto_init() } +func file_io_prometheus_write_v2_types_proto_init() { + if File_io_prometheus_write_v2_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_write_v2_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TimeSeries); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Sample); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_write_v2_types_proto_rawDesc, + NumEnums: 2, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_write_v2_types_proto_goTypes, + DependencyIndexes: file_io_prometheus_write_v2_types_proto_depIdxs, + EnumInfos: file_io_prometheus_write_v2_types_proto_enumTypes, + MessageInfos: file_io_prometheus_write_v2_types_proto_msgTypes, + }.Build() + File_io_prometheus_write_v2_types_proto = out.File + file_io_prometheus_write_v2_types_proto_rawDesc = nil + file_io_prometheus_write_v2_types_proto_goTypes = nil + file_io_prometheus_write_v2_types_proto_depIdxs = nil +} diff --git a/exp/api/remote/genproto/v2/types_vtproto.pb.go b/exp/api/remote/genproto/v2/types_vtproto.pb.go new file mode 100644 index 000000000..0f1418539 --- /dev/null +++ b/exp/api/remote/genproto/v2/types_vtproto.pb.go @@ -0,0 +1,2265 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.0 +// source: io/prometheus/write/v2/types.proto + +package writev2 + +import ( + binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + + protohelpers "github.com/prometheus/client_golang/exp/internal/github.com/planetscale/vtprotobuf/protohelpers" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Request) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Request) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Timeseries[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Symbols) > 0 { + for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Symbols[iNdEx]) + copy(dAtA[i:], m.Symbols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Symbols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CreatedTimestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x30 + } + if m.Metadata != nil { + size, err := m.Metadata.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Exemplars[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Histograms[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Samples[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.LabelsRefs) > 0 { + var pksize2 int + for _, num := range m.LabelsRefs { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.LabelsRefs) > 0 { + var pksize2 int + for _, num := range m.LabelsRefs { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Sample) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Metadata) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UnitRef != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UnitRef)) + i-- + dAtA[i] = 0x20 + } + if m.HelpRef != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HelpRef)) + i-- + dAtA[i] = 0x18 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Histogram) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if vtmsg, ok := m.ZeroCount.(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if vtmsg, ok := m.Count.(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.CustomValues) > 0 { + for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.CustomValues[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CustomValues)*8)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Timestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f2 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var pksize4 int + for _, num := range m.PositiveDeltas { + pksize4 += protohelpers.SizeOfZigzag(uint64(num)) + } + i -= pksize4 + j3 := i + for _, num := range m.PositiveDeltas { + x5 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x5 >= 1<<7 { + dAtA[j3] = uint8(uint64(x5)&0x7f | 0x80) + j3++ + x5 >>= 7 + } + dAtA[j3] = uint8(x5) + j3++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize4)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + } + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f6 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var pksize8 int + for _, num := range m.NegativeDeltas { + pksize8 += protohelpers.SizeOfZigzag(uint64(num)) + } + i -= pksize8 + j7 := i + for _, num := range m.NegativeDeltas { + x9 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x9 >= 1<<7 { + dAtA[j7] = uint8(uint64(x9)&0x7f | 0x80) + j7++ + x9 >>= 7 + } + dAtA[j7] = uint8(x9) + j7++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize8)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.ZeroThreshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + return len(dAtA) - i, nil +} + +func (m *Histogram_CountInt) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Length != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Request) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Symbols) > 0 { + for _, s := range m.Symbols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *TimeSeries) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CreatedTimestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CreatedTimestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *Exemplar) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *Sample) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + if m.HelpRef != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HelpRef)) + } + if m.UnitRef != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.UnitRef)) + } + n += len(m.unknownFields) + return n +} + +func (m *Histogram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Count.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + protohelpers.SizeOfZigzag(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if vtmsg, ok := m.ZeroCount.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += protohelpers.SizeOfZigzag(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += protohelpers.SizeOfZigzag(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timestamp)) + } + if len(m.CustomValues) > 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 + } + n += len(m.unknownFields) + return n +} + +func (m *Histogram_CountInt) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + protohelpers.SizeOfZigzag(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Length)) + } + n += len(m.unknownFields) + return n +} + +func (m *Request) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, &TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, &Sample{}) + if err := m.Samples[len(m.Samples)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, &Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &Metadata{} + } + if err := m.Metadata.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + m.CreatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Metadata_MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType) + } + m.HelpRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HelpRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType) + } + m.UnitRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnitRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &Histogram_CountInt{CountInt: v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{CountFloat: float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{ZeroCountInt: v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{ZeroCountFloat: float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, &BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, &BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.CustomValues) == 0 { + m.CustomValues = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/exp/api/remote/remote_api.go b/exp/api/remote/remote_api.go new file mode 100644 index 000000000..afacd4110 --- /dev/null +++ b/exp/api/remote/remote_api.go @@ -0,0 +1,562 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package remote implements bindings for Prometheus Remote APIs. +package remote + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/klauspost/compress/snappy" + "google.golang.org/protobuf/proto" + + "github.com/prometheus/client_golang/exp/internal/github.com/efficientgo/core/backoff" +) + +// API is a client for Prometheus Remote Protocols. +// NOTE(bwplotka): Only https://prometheus.io/docs/specs/remote_write_spec_2_0/ is currently implemented, +// read protocols to be implemented if there will be a demand. +type API struct { + baseURL *url.URL + + opts apiOpts + bufPool sync.Pool +} + +// APIOption represents a remote API option. +type APIOption func(o *apiOpts) error + +// TODO(bwplotka): Add "too old sample" handling one day. +type apiOpts struct { + logger *slog.Logger + client *http.Client + backoff backoff.Config + compression Compression + path string + retryOnRateLimit bool +} + +var defaultAPIOpts = &apiOpts{ + backoff: backoff.Config{ + Min: 1 * time.Second, + Max: 10 * time.Second, + MaxRetries: 10, + }, + client: http.DefaultClient, + retryOnRateLimit: true, + compression: SnappyBlockCompression, + path: "api/v1/write", +} + +// WithAPILogger returns APIOption that allows providing slog logger. +// By default, nothing is logged. +func WithAPILogger(logger *slog.Logger) APIOption { + return func(o *apiOpts) error { + o.logger = logger + return nil + } +} + +// WithAPIHTTPClient returns APIOption that allows providing http client. +func WithAPIHTTPClient(client *http.Client) APIOption { + return func(o *apiOpts) error { + o.client = client + return nil + } +} + +// WithAPIPath returns APIOption that allows providing path to send remote write requests to. +func WithAPIPath(path string) APIOption { + return func(o *apiOpts) error { + o.path = path + return nil + } +} + +// WithAPINoRetryOnRateLimit returns APIOption that disables retrying on rate limit status code. +func WithAPINoRetryOnRateLimit() APIOption { + return func(o *apiOpts) error { + o.retryOnRateLimit = false + return nil + } +} + +// WithAPIBackoff returns APIOption that allows overriding backoff configuration. +func WithAPIBackoff(backoff backoff.Config) APIOption { + return func(o *apiOpts) error { + o.backoff = backoff + return nil + } +} + +type nopSlogHandler struct{} + +func (n nopSlogHandler) Enabled(context.Context, slog.Level) bool { return false } +func (n nopSlogHandler) Handle(context.Context, slog.Record) error { return nil } +func (n nopSlogHandler) WithAttrs([]slog.Attr) slog.Handler { return n } +func (n nopSlogHandler) WithGroup(string) slog.Handler { return n } + +// NewAPI returns a new API for the clients of Remote Write Protocol. +func NewAPI(baseURL string, opts ...APIOption) (*API, error) { + parsedURL, err := url.Parse(baseURL) + if err != nil { + return nil, fmt.Errorf("invalid base URL: %w", err) + } + + o := *defaultAPIOpts + for _, opt := range opts { + if err := opt(&o); err != nil { + return nil, err + } + } + + if o.logger == nil { + o.logger = slog.New(nopSlogHandler{}) + } + + parsedURL.Path = path.Join(parsedURL.Path, o.path) + + api := &API{ + opts: o, + baseURL: parsedURL, + bufPool: sync.Pool{ + New: func() any { + b := make([]byte, 0, 1024*16) // Initial capacity of 16KB. + return &b + }, + }, + } + return api, nil +} + +type retryableError struct { + error + retryAfter time.Duration +} + +func (r retryableError) RetryAfter() time.Duration { + return r.retryAfter +} + +type vtProtoEnabled interface { + SizeVT() int + MarshalToSizedBufferVT(dAtA []byte) (int, error) +} + +type gogoProtoEnabled interface { + Size() (n int) + MarshalToSizedBuffer(dAtA []byte) (n int, err error) +} + +// Write writes given, non-empty, protobuf message to a remote storage. +// +// Depending on serialization methods, +// - https://github.com/planetscale/vtprotobuf methods will be used if your msg +// supports those (e.g. SizeVT() and MarshalToSizedBufferVT(...)), for efficiency +// - Otherwise https://github.com/gogo/protobuf methods (e.g. Size() and MarshalToSizedBuffer(...)) +// will be used +// - If neither is supported, it will marshaled using generic google.golang.org/protobuf methods and +// error out on unknown scheme. +func (r *API) Write(ctx context.Context, msgType WriteMessageType, msg any) (_ WriteResponseStats, err error) { + buf := r.bufPool.Get().(*[]byte) + + if err := msgType.Validate(); err != nil { + return WriteResponseStats{}, err + } + + // Encode the payload. + switch m := msg.(type) { + case vtProtoEnabled: + // Use optimized vtprotobuf if supported. + size := m.SizeVT() + if cap(*buf) < size { + *buf = make([]byte, size) + } else { + *buf = (*buf)[:size] + } + + if _, err := m.MarshalToSizedBufferVT(*buf); err != nil { + return WriteResponseStats{}, fmt.Errorf("encoding request %w", err) + } + case gogoProtoEnabled: + // Gogo proto if supported. + size := m.Size() + if cap(*buf) < size { + *buf = make([]byte, size) + } else { + *buf = (*buf)[:size] + } + + if _, err := m.MarshalToSizedBuffer(*buf); err != nil { + return WriteResponseStats{}, fmt.Errorf("encoding request %w", err) + } + case proto.Message: + // Generic proto. + *buf, err = (proto.MarshalOptions{}).MarshalAppend(*buf, m) + if err != nil { + return WriteResponseStats{}, fmt.Errorf("encoding request %w", err) + } + default: + return WriteResponseStats{}, fmt.Errorf("unknown message type %T", m) + } + + comprBuf := r.bufPool.Get().(*[]byte) + payload, err := compressPayload(comprBuf, r.opts.compression, *buf) + if err != nil { + return WriteResponseStats{}, fmt.Errorf("compressing %w", err) + } + r.bufPool.Put(buf) + r.bufPool.Put(comprBuf) + + // Since we retry writes we need to track the total amount of accepted data + // across the various attempts. + accumulatedStats := WriteResponseStats{} + + b := backoff.New(ctx, r.opts.backoff) + for { + rs, err := r.attemptWrite(ctx, r.opts.compression, msgType, payload, b.NumRetries()) + accumulatedStats.Add(rs) + if err == nil { + // Check the case mentioned in PRW 2.0. + // https://prometheus.io/docs/specs/remote_write_spec_2_0/#required-written-response-headers. + if msgType == WriteV2MessageType && !accumulatedStats.confirmed && accumulatedStats.NoDataWritten() { + // TODO(bwplotka): Allow users to disable this check or provide their stats for us to know if it's empty. + return accumulatedStats, fmt.Errorf("sent v2 request; "+ + "got 2xx, but PRW 2.0 response header statistics indicate %v samples, %v histograms "+ + "and %v exemplars were accepted; assumining failure e.g. the target only supports "+ + "PRW 1.0 prometheus.WriteRequest, but does not check the Content-Type header correctly", + accumulatedStats.Samples, accumulatedStats.Histograms, accumulatedStats.Exemplars, + ) + } + // Success! + // TODO(bwplotka): Debug log with retry summary? + return accumulatedStats, nil + } + + var retryableErr retryableError + if !errors.As(err, &retryableErr) { + // TODO(bwplotka): More context in the error e.g. about retries. + return accumulatedStats, err + } + + if !b.Ongoing() { + // TODO(bwplotka): More context in the error e.g. about retries. + return accumulatedStats, err + } + + backoffDelay := b.NextDelay() + retryableErr.RetryAfter() + r.opts.logger.Error("failed to send remote write request; retrying after backoff", "err", err, "backoff", backoffDelay) + select { + case <-ctx.Done(): + // TODO(bwplotka): More context in the error e.g. about retries. + return WriteResponseStats{}, ctx.Err() + case <-time.After(backoffDelay): + // Retry. + } + } +} + +func compressPayload(tmpbuf *[]byte, enc Compression, inp []byte) (compressed []byte, _ error) { + switch enc { + case SnappyBlockCompression: + if cap(*tmpbuf) < snappy.MaxEncodedLen(len(inp)) { + *tmpbuf = make([]byte, snappy.MaxEncodedLen(len(inp))) + } else { + *tmpbuf = (*tmpbuf)[:snappy.MaxEncodedLen(len(inp))] + } + + compressed = snappy.Encode(*tmpbuf, inp) + return compressed, nil + default: + return compressed, fmt.Errorf("unknown compression scheme [%v]", enc) + } +} + +func (r *API) attemptWrite(ctx context.Context, compr Compression, msgType WriteMessageType, payload []byte, attempt int) (WriteResponseStats, error) { + req, err := http.NewRequest(http.MethodPost, r.baseURL.String(), bytes.NewReader(payload)) + if err != nil { + // Errors from NewRequest are from unparsable URLs, so are not + // recoverable. + return WriteResponseStats{}, err + } + + req.Header.Add("Content-Encoding", string(compr)) + req.Header.Set("Content-Type", contentTypeHeader(msgType)) + if msgType == WriteV1MessageType { + // Compatibility mode for 1.0. + req.Header.Set(versionHeader, version1HeaderValue) + } else { + req.Header.Set(versionHeader, version20HeaderValue) + } + + if attempt > 0 { + req.Header.Set("Retry-Attempt", strconv.Itoa(attempt)) + } + + resp, err := r.opts.client.Do(req.WithContext(ctx)) + if err != nil { + // Errors from Client.Do are likely network errors, so recoverable. + return WriteResponseStats{}, retryableError{err, 0} + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return WriteResponseStats{}, fmt.Errorf("reading response body: %w", err) + } + + rs := WriteResponseStats{} + if msgType == WriteV2MessageType { + rs, err = parseWriteResponseStats(resp) + if err != nil { + r.opts.logger.Warn("parsing rw write statistics failed; partial or no stats", "err", err) + } + } + + if resp.StatusCode/100 == 2 { + return rs, nil + } + + err = fmt.Errorf("server returned HTTP status %s: %s", resp.Status, body) + if resp.StatusCode/100 == 5 || + (r.opts.retryOnRateLimit && resp.StatusCode == http.StatusTooManyRequests) { + return rs, retryableError{err, retryAfterDuration(resp.Header.Get("Retry-After"))} + } + return rs, err +} + +// retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it +// returns 0 as if the header was never supplied. +func retryAfterDuration(t string) time.Duration { + parsedDuration, err := time.Parse(http.TimeFormat, t) + if err == nil { + return time.Until(parsedDuration) + } + // The duration can be in seconds. + d, err := strconv.Atoi(t) + if err != nil { + return 0 + } + return time.Duration(d) * time.Second +} + +// writeStorage represents the storage for RemoteWriteHandler. +// This interface is intentionally private due its experimental state. +type writeStorage interface { + // Store stores remote write metrics encoded in the given WriteContentType. + // Provided http.Request contains the encoded bytes in the req.Body with all the HTTP information, + // except "Content-Type" header which is provided in a separate, validated ctype. + // + // Other headers might be trimmed, depending on the configured middlewares + // e.g. a default SnappyMiddleware trims "Content-Encoding" and ensures that + // encoded body bytes are already decompressed. + Store(req *http.Request, msgType WriteMessageType) (_ *WriteResponse, _ error) +} + +type writeHandler struct { + store writeStorage + acceptedMessageTypes MessageTypes + opts writeHandlerOpts +} + +type writeHandlerOpts struct { + logger *slog.Logger + middlewares []func(http.Handler) http.Handler +} + +// WriteHandlerOption represents an option for the write handler. +type WriteHandlerOption func(o *writeHandlerOpts) + +// WithWriteHandlerLogger returns WriteHandlerOption that allows providing slog logger. +// By default, nothing is logged. +func WithWriteHandlerLogger(logger *slog.Logger) WriteHandlerOption { + return func(o *writeHandlerOpts) { + o.logger = logger + } +} + +// WithWriteHandlerMiddlewares returns WriteHandlerOption that allows providing middlewares. +// Multiple middlewares can be provided and will be applied in the order they are passed. +// This option replaces the default middlewares (SnappyDecompressorMiddleware), so if +// you want to have handler that works with the default Remote Write 2.0 protocol, +// SnappyDecompressorMiddleware (or any other decompression middleware) needs to be added explicitly. +func WithWriteHandlerMiddlewares(middlewares ...func(http.Handler) http.Handler) WriteHandlerOption { + return func(o *writeHandlerOpts) { + o.middlewares = middlewares + } +} + +// SnappyDecodeMiddleware returns a middleware that checks if the request body is snappy-encoded and decompresses it. +// If the request body is not snappy-encoded, it returns an error. +// Used by default in NewHandler. +func SnappyDecodeMiddleware(logger *slog.Logger) func(http.Handler) http.Handler { + bufPool := sync.Pool{ + New: func() any { + return bytes.NewBuffer(nil) + }, + } + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + enc := r.Header.Get("Content-Encoding") + if enc != "" && enc != string(SnappyBlockCompression) { + err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression) + logger.Error("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return + } + + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + + bodyBytes, err := io.ReadAll(io.TeeReader(r.Body, buf)) + if err != nil { + logger.Error("Error reading request body", "err", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + decompressed, err := snappy.Decode(nil, bodyBytes) + if err != nil { + // TODO(bwplotka): Add more context to responded error? + logger.Error("Error snappy decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Replace the body with decompressed data and remove Content-Encoding header. + r.Body = io.NopCloser(bytes.NewReader(decompressed)) + r.Header.Del("Content-Encoding") + next.ServeHTTP(w, r) + }) + } +} + +// NewWriteHandler returns HTTP handler that receives Remote Write 2.0 +// protocol https://prometheus.io/docs/specs/remote_write_spec_2_0/. +func NewWriteHandler(store writeStorage, acceptedMessageTypes MessageTypes, opts ...WriteHandlerOption) http.Handler { + o := writeHandlerOpts{ + logger: slog.New(nopSlogHandler{}), + middlewares: []func(http.Handler) http.Handler{SnappyDecodeMiddleware(slog.New(nopSlogHandler{}))}, + } + for _, opt := range opts { + opt(&o) + } + + h := &writeHandler{ + opts: o, + store: store, + acceptedMessageTypes: acceptedMessageTypes, + } + + // Apply all middlewares in order + var handler http.Handler = h + for i := len(o.middlewares) - 1; i >= 0; i-- { + handler = o.middlewares[i](handler) + } + return handler +} + +// ParseProtoMsg parses the content-type header and returns the proto message type. +// +// The expected content-type will be of the form, +// - `application/x-protobuf;proto=io.prometheus.write.v2.Request` which will be treated as RW2.0 request, +// - `application/x-protobuf;proto=prometheus.WriteRequest` which will be treated as RW1.0 request, +// - `application/x-protobuf` which will be treated as RW1.0 request. +// +// If the content-type is not of the above forms, it will return an error. +func ParseProtoMsg(contentType string) (WriteMessageType, error) { + contentType = strings.TrimSpace(contentType) + + parts := strings.Split(contentType, ";") + if parts[0] != appProtoContentType { + return "", fmt.Errorf("expected %v as the first (media) part, got %v content-type", appProtoContentType, contentType) + } + // Parse potential https://www.rfc-editor.org/rfc/rfc9110#parameter + for _, p := range parts[1:] { + pair := strings.Split(p, "=") + if len(pair) != 2 { + return "", fmt.Errorf("as per https://www.rfc-editor.org/rfc/rfc9110#parameter expected parameters to be key-values, got %v in %v content-type", p, contentType) + } + if pair[0] == "proto" { + ret := WriteMessageType(pair[1]) + if err := ret.Validate(); err != nil { + return "", fmt.Errorf("got %v content type; %w", contentType, err) + } + return ret, nil + } + } + // No "proto=" parameter, assuming v1. + return WriteV1MessageType, nil +} + +func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + contentType := r.Header.Get("Content-Type") + if contentType == "" { + contentType = appProtoContentType + } + + msgType, err := ParseProtoMsg(contentType) + if err != nil { + h.opts.logger.Error("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return + } + + if !h.acceptedMessageTypes.Contains(msgType) { + err := fmt.Errorf("%v protobuf message is not accepted by this server; only accepts %v", msgType, h.acceptedMessageTypes.String()) + h.opts.logger.Error("Unaccepted message type", "msgType", msgType, "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return + } + + writeResponse, storeErr := h.store.Store(r, msgType) + if writeResponse == nil { + // User could forget to return write response; in this case we assume 0 samples + // were written. + writeResponse = NewWriteResponse() + } + + // Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases, along with any user-defined headers. + writeResponse.writeHeaders(w) + + if storeErr != nil { + if writeResponse.statusCode == 0 { + writeResponse.SetStatusCode(http.StatusInternalServerError) + } + if writeResponse.statusCode/100 == 5 { // 5xx + h.opts.logger.Error("Error while storing the remote write request", "err", storeErr) + } + http.Error(w, storeErr.Error(), writeResponse.statusCode) + return + } + w.WriteHeader(writeResponse.statusCode) +} diff --git a/exp/api/remote/remote_api_test.go b/exp/api/remote/remote_api_test.go new file mode 100644 index 000000000..9f982a4e3 --- /dev/null +++ b/exp/api/remote/remote_api_test.go @@ -0,0 +1,211 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "errors" + "io" + "log/slog" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + + writev2 "github.com/prometheus/client_golang/exp/api/remote/genproto/v2" + "github.com/prometheus/client_golang/exp/internal/github.com/efficientgo/core/backoff" +) + +func TestRetryAfterDuration(t *testing.T) { + tc := []struct { + name string + tInput string + expected model.Duration + }{ + { + name: "seconds", + tInput: "120", + expected: model.Duration(time.Second * 120), + }, + { + name: "date-time default", + tInput: time.RFC1123, // Expected layout is http.TimeFormat, hence an error. + expected: 0, + }, + { + name: "retry-after not provided", + tInput: "", // Expected layout is http.TimeFormat, hence an error. + expected: 0, + }, + } + for _, c := range tc { + if got := retryAfterDuration(c.tInput); got != time.Duration(c.expected) { + t.Fatal("expected", c.expected, "got", got) + } + } +} + +type mockStorage struct { + v2Reqs []*writev2.Request + protos []WriteMessageType + + mockCode *int + mockErr error +} + +func (m *mockStorage) Store(req *http.Request, msgType WriteMessageType) (*WriteResponse, error) { + w := NewWriteResponse() + if m.mockErr != nil { + if m.mockCode != nil { + w.SetStatusCode(*m.mockCode) + } + return w, m.mockErr + } + + // Read the request body + serializedRequest, err := io.ReadAll(req.Body) + if err != nil { + w.SetStatusCode(http.StatusBadRequest) + return w, err + } + + // This test expects v2 only + r := &writev2.Request{} + if err := proto.Unmarshal(serializedRequest, r); err != nil { + w.SetStatusCode(http.StatusInternalServerError) + return w, err + } + m.v2Reqs = append(m.v2Reqs, r) + m.protos = append(m.protos, msgType) + + // Set stats in response headers + w.Add(stats(r)) + w.SetStatusCode(http.StatusNoContent) + + return w, nil +} + +func testV2() *writev2.Request { + s := writev2.NewSymbolTable() + return &writev2.Request{ + Timeseries: []*writev2.TimeSeries{ + { + Metadata: &writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + HelpRef: s.Symbolize("My lovely counter"), + }, + LabelsRefs: s.SymbolizeLabels([]string{"__name__", "metric1", "foo", "bar1"}, nil), + Samples: []*writev2.Sample{ + {Value: 1.1, Timestamp: 1214141}, + {Value: 1.5, Timestamp: 1214180}, + }, + }, + { + Metadata: &writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + HelpRef: s.Symbolize("My lovely counter"), + }, + LabelsRefs: s.SymbolizeLabels([]string{"__name__", "metric1", "foo", "bar2"}, nil), + Samples: []*writev2.Sample{ + {Value: 1231311, Timestamp: 1214141}, + {Value: 1310001, Timestamp: 1214180}, + }, + }, + }, + } +} + +func stats(req *writev2.Request) (s WriteResponseStats) { + s.confirmed = true + for _, ts := range req.Timeseries { + s.Samples += len(ts.Samples) + s.Histograms += len(ts.Histograms) + s.Exemplars += len(ts.Exemplars) + } + return s +} + +func TestRemoteAPI_Write_WithHandler(t *testing.T) { + t.Run("success", func(t *testing.T) { + tLogger := slog.Default() + mStore := &mockStorage{} + srv := httptest.NewServer(NewWriteHandler(mStore, MessageTypes{WriteV2MessageType}, WithWriteHandlerLogger(tLogger))) + t.Cleanup(srv.Close) + + client, err := NewAPI(srv.URL, + WithAPIHTTPClient(srv.Client()), + WithAPILogger(tLogger), + WithAPIPath("api/v1/write"), + ) + if err != nil { + t.Fatal(err) + } + + req := testV2() + s, err := client.Write(context.Background(), WriteV2MessageType, req) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(stats(req), s, cmpopts.IgnoreUnexported(WriteResponseStats{})); diff != "" { + t.Fatal("unexpected stats", diff) + } + if len(mStore.v2Reqs) != 1 { + t.Fatal("expected 1 v2 request stored, got", mStore.v2Reqs) + } + if diff := cmp.Diff(req, mStore.v2Reqs[0], protocmp.Transform()); diff != "" { + t.Fatal("unexpected request received", diff) + } + }) + + t.Run("storage error", func(t *testing.T) { + tLogger := slog.Default() + mockCode := http.StatusInternalServerError + mStore := &mockStorage{ + mockErr: errors.New("storage error"), + mockCode: &mockCode, + } + srv := httptest.NewServer(NewWriteHandler(mStore, MessageTypes{WriteV2MessageType}, WithWriteHandlerLogger(tLogger))) + t.Cleanup(srv.Close) + + client, err := NewAPI(srv.URL, + WithAPIHTTPClient(srv.Client()), + WithAPILogger(tLogger), + WithAPIPath("api/v1/write"), + WithAPIBackoff(backoff.Config{ + Min: 1 * time.Second, + Max: 1 * time.Second, + MaxRetries: 2, + }), + ) + if err != nil { + t.Fatal(err) + } + + req := testV2() + _, err = client.Write(context.Background(), WriteV2MessageType, req) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "storage error") { + t.Fatalf("expected error to contain 'storage error', got %v", err) + } + }) +} diff --git a/exp/api/remote/remote_headers.go b/exp/api/remote/remote_headers.go new file mode 100644 index 000000000..dfe1931ad --- /dev/null +++ b/exp/api/remote/remote_headers.go @@ -0,0 +1,217 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "errors" + "fmt" + "net/http" + "slices" + "strconv" + "strings" +) + +const ( + versionHeader = "X-Prometheus-Remote-Write-Version" + version1HeaderValue = "0.1.0" + version20HeaderValue = "2.0.0" + appProtoContentType = "application/x-protobuf" +) + +// Compression represents the encoding. Currently remote storage supports only +// one, but we experiment with more, thus leaving the compression scaffolding +// for now. +type Compression string + +const ( + // SnappyBlockCompression represents https://github.com/google/snappy/blob/2c94e11145f0b7b184b831577c93e5a41c4c0346/format_description.txt + SnappyBlockCompression Compression = "snappy" +) + +// WriteMessageType represents the fully qualified name of the protobuf message +// to use in Remote write 1.0 and 2.0 protocols. +// See https://prometheus.io/docs/specs/remote_write_spec_2_0/#protocol. +type WriteMessageType string + +const ( + // WriteV1MessageType represents the `prometheus.WriteRequest` protobuf + // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/. + // DEPRECATED: Use WriteV2MessageType instead. + WriteV1MessageType WriteMessageType = "prometheus.WriteRequest" + // WriteV2MessageType represents the `io.prometheus.write.v2.Request` protobuf + // message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/ + WriteV2MessageType WriteMessageType = "io.prometheus.write.v2.Request" +) + +// Validate returns error if the given reference for the protobuf message is not supported. +func (n WriteMessageType) Validate() error { + switch n { + case WriteV1MessageType, WriteV2MessageType: + return nil + default: + return fmt.Errorf("unknown type for remote write protobuf message %v, supported: %v", n, MessageTypes{WriteV1MessageType, WriteV2MessageType}.String()) + } +} + +type MessageTypes []WriteMessageType + +func (m MessageTypes) Strings() []string { + ret := make([]string, 0, len(m)) + for _, typ := range m { + ret = append(ret, string(typ)) + } + return ret +} + +func (m MessageTypes) String() string { + return strings.Join(m.Strings(), ", ") +} + +func (m MessageTypes) Contains(mType WriteMessageType) bool { + return slices.Contains(m, mType) +} + +var contentTypeHeaders = map[WriteMessageType]string{ + WriteV1MessageType: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec. + WriteV2MessageType: appProtoContentType + ";proto=io.prometheus.write.v2.Request", +} + +// ContentTypeHeader returns content type header value for the given proto message +// or empty string for unknown proto message. +func contentTypeHeader(m WriteMessageType) string { + return contentTypeHeaders[m] +} + +const ( + writtenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written" + writtenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written" + writtenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written" +) + +// WriteResponse represents the response from the remote storage upon receiving a remote write request. +type WriteResponse struct { + WriteResponseStats + statusCode int + extraHeaders http.Header +} + +// NewWriteResponse creates a new WriteResponse with empty stats and status code http.StatusNoContent. +func NewWriteResponse() *WriteResponse { + return &WriteResponse{ + WriteResponseStats: WriteResponseStats{}, + statusCode: http.StatusNoContent, + extraHeaders: make(http.Header), + } +} + +// Stats returns the current statistics. +func (w *WriteResponse) Stats() WriteResponseStats { + return w.WriteResponseStats +} + +// SetStatusCode sets the HTTP status code for the response. http.StatusNoContent is the default unless 5xx is set. +func (w *WriteResponse) SetStatusCode(code int) { + w.statusCode = code +} + +// SetExtraHeader adds additional headers to be set in the response (apart from stats headers) +func (w *WriteResponse) SetExtraHeader(key, value string) { + w.extraHeaders.Set(key, value) +} + +// writeHeaders sets response headers in a given response writer. +// Make sure to use it before http.ResponseWriter.WriteHeader and .Write. +func (w *WriteResponse) writeHeaders(rw http.ResponseWriter) { + h := rw.Header() + h.Set(writtenSamplesHeader, strconv.Itoa(w.Samples)) + h.Set(writtenHistogramsHeader, strconv.Itoa(w.Histograms)) + h.Set(writtenExemplarsHeader, strconv.Itoa(w.Exemplars)) + for k, v := range w.extraHeaders { + for _, vv := range v { + h.Add(k, vv) + } + } +} + +// WriteResponseStats represents the response, remote write statistics. +type WriteResponseStats struct { + // Samples represents X-Prometheus-Remote-Write-Written-Samples + Samples int + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + Histograms int + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + Exemplars int + + // Confirmed means we can trust those statistics from the point of view + // of the PRW 2.0 spec. When parsed from headers, it means we got at least one + // response header from the Receiver to confirm those numbers, meaning it must + // be at least 2.0 Receiver. See ParseWriteResponseStats for details. + confirmed bool +} + +// NoDataWritten returns true if statistics indicate no data was written. +func (s WriteResponseStats) NoDataWritten() bool { + return (s.Samples + s.Histograms + s.Exemplars) == 0 +} + +// AllSamples returns both float and histogram sample numbers. +func (s WriteResponseStats) AllSamples() int { + return s.Samples + s.Histograms +} + +// Add adds the given WriteResponseStats to this WriteResponseStats. +// If this WriteResponseStats is empty, it will be replaced by the given WriteResponseStats. +func (s *WriteResponseStats) Add(rs WriteResponseStats) { + s.confirmed = rs.confirmed + s.Samples += rs.Samples + s.Histograms += rs.Histograms + s.Exemplars += rs.Exemplars +} + +// parseWriteResponseStats returns WriteResponseStats parsed from the response headers. +// +// As per 2.0 spec, missing header means 0. However, abrupt HTTP errors, 1.0 Receivers +// or buggy 2.0 Receivers might result in no response headers specified and that +// might NOT necessarily mean nothing was written. To represent that we set +// s.Confirmed = true only when see at least on response header. +// +// Error is returned when any of the header fails to parse as int64. +func parseWriteResponseStats(r *http.Response) (s WriteResponseStats, err error) { + var ( + errs []error + h = r.Header + ) + if v := h.Get(writtenSamplesHeader); v != "" { // Empty means zero. + s.confirmed = true + if s.Samples, err = strconv.Atoi(v); err != nil { + s.Samples = 0 + errs = append(errs, err) + } + } + if v := h.Get(writtenHistogramsHeader); v != "" { // Empty means zero. + s.confirmed = true + if s.Histograms, err = strconv.Atoi(v); err != nil { + s.Histograms = 0 + errs = append(errs, err) + } + } + if v := h.Get(writtenExemplarsHeader); v != "" { // Empty means zero. + s.confirmed = true + if s.Exemplars, err = strconv.Atoi(v); err != nil { + s.Exemplars = 0 + errs = append(errs, err) + } + } + return s, errors.Join(errs...) +} diff --git a/exp/api/remote/remote_headers_test.go b/exp/api/remote/remote_headers_test.go new file mode 100644 index 000000000..760644824 --- /dev/null +++ b/exp/api/remote/remote_headers_test.go @@ -0,0 +1,100 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" +) + +func TestWriteResponse(t *testing.T) { + t.Run("new response has empty headers", func(t *testing.T) { + resp := NewWriteResponse() + if len(resp.extraHeaders) != 0 { + t.Errorf("expected empty headers, got %v", resp.extraHeaders) + } + }) + + t.Run("setters", func(t *testing.T) { + resp := NewWriteResponse() + + resp.SetStatusCode(http.StatusOK) + if got := resp.statusCode; got != http.StatusOK { + t.Errorf("expected status code %d, got %d", http.StatusOK, got) + } + + stats := WriteResponseStats{ + Samples: 10, + Histograms: 5, + Exemplars: 2, + confirmed: true, + } + resp.Add(stats) + if diff := cmp.Diff(stats, resp.Stats(), cmpopts.IgnoreUnexported(WriteResponseStats{})); diff != "" { + t.Errorf("stats mismatch (-want +got):\n%s", diff) + } + + toAdd := WriteResponseStats{ + Samples: 10, + Histograms: 5, + Exemplars: 2, + confirmed: true, + } + resp.Add(toAdd) + if diff := cmp.Diff(WriteResponseStats{ + Samples: 20, + Histograms: 10, + Exemplars: 4, + confirmed: true, + }, resp.Stats(), cmpopts.IgnoreUnexported(WriteResponseStats{})); diff != "" { + t.Errorf("stats mismatch (-want +got):\n%s", diff) + } + + resp.SetExtraHeader("Test-Header", "test-value") + if got := resp.extraHeaders.Get("Test-Header"); got != "test-value" { + t.Errorf("expected header value %q, got %q", "test-value", got) + } + }) + + t.Run("writeHeaders", func(t *testing.T) { + resp := NewWriteResponse() + resp.Add(WriteResponseStats{ + Samples: 10, + Histograms: 5, + Exemplars: 2, + confirmed: true, + }) + resp.SetExtraHeader("Custom-Header", "custom-value") + + w := httptest.NewRecorder() + resp.writeHeaders(w) + + expectedHeaders := map[string]string{ + "Custom-Header": "custom-value", + "X-Prometheus-Remote-Write-Samples-Written": "10", + "X-Prometheus-Remote-Write-Histograms-Written": "5", + "X-Prometheus-Remote-Write-Exemplars-Written": "2", + } + + for k, want := range expectedHeaders { + if got := w.Header().Get(k); got != want { + t.Errorf("header %q: want %q, got %q", k, want, got) + } + } + }) +} diff --git a/exp/exp.go b/exp/exp.go new file mode 100644 index 000000000..e9c572993 --- /dev/null +++ b/exp/exp.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// package exp contains experimental utilities and APIs for Prometheus. +// +// This package is experimental and may contain breaking changes or be removed in the future. +package exp diff --git a/exp/go.mod b/exp/go.mod new file mode 100644 index 000000000..52e4f26c6 --- /dev/null +++ b/exp/go.mod @@ -0,0 +1,16 @@ +module github.com/prometheus/client_golang/exp + +go 1.23.0 + +require ( + github.com/google/go-cmp v0.7.0 + github.com/klauspost/compress v1.18.0 + github.com/prometheus/common v0.66.1 + google.golang.org/protobuf v1.36.9 +) + +require ( + github.com/kr/pretty v0.3.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect +) diff --git a/exp/go.sum b/exp/go.sum new file mode 100644 index 000000000..a969bcddb --- /dev/null +++ b/exp/go.sum @@ -0,0 +1,32 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/exp/internal/github.com/efficientgo/core/backoff/backoff.go b/exp/internal/github.com/efficientgo/core/backoff/backoff.go new file mode 100644 index 000000000..53b8390a5 --- /dev/null +++ b/exp/internal/github.com/efficientgo/core/backoff/backoff.go @@ -0,0 +1,114 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Cortex project. + +// Package backoff implements backoff timers which increases wait time on every retry, incredibly useful +// in distributed system timeout functionalities. +package backoff + +import ( + "context" + "fmt" + "math/rand" + "time" +) + +// Config configures a Backoff. +type Config struct { + Min time.Duration `yaml:"min_period"` // Start backoff at this level + Max time.Duration `yaml:"max_period"` // Increase exponentially to this level + MaxRetries int `yaml:"max_retries"` // Give up after this many; zero means infinite retries +} + +// Backoff implements exponential backoff with randomized wait times. +type Backoff struct { + cfg Config + ctx context.Context + numRetries int + nextDelayMin time.Duration + nextDelayMax time.Duration +} + +// New creates a Backoff object. Pass a Context that can also terminate the operation. +func New(ctx context.Context, cfg Config) *Backoff { + return &Backoff{ + cfg: cfg, + ctx: ctx, + nextDelayMin: cfg.Min, + nextDelayMax: doubleDuration(cfg.Min, cfg.Max), + } +} + +// Reset the Backoff back to its initial condition. +func (b *Backoff) Reset() { + b.numRetries = 0 + b.nextDelayMin = b.cfg.Min + b.nextDelayMax = doubleDuration(b.cfg.Min, b.cfg.Max) +} + +// Ongoing returns true if caller should keep going. +func (b *Backoff) Ongoing() bool { + // Stop if Context has errored or max retry count is exceeded. + return b.ctx.Err() == nil && (b.cfg.MaxRetries == 0 || b.numRetries < b.cfg.MaxRetries) +} + +// Err returns the reason for terminating the backoff, or nil if it didn't terminate. +func (b *Backoff) Err() error { + if b.ctx.Err() != nil { + return b.ctx.Err() + } + if b.cfg.MaxRetries != 0 && b.numRetries >= b.cfg.MaxRetries { + return fmt.Errorf("terminated after %d retries", b.numRetries) + } + return nil +} + +// NumRetries returns the number of retries so far. +func (b *Backoff) NumRetries() int { + return b.numRetries +} + +// Wait sleeps for the backoff time then increases the retry count and backoff time. +// Returns immediately if Context is terminated. +func (b *Backoff) Wait() { + // Increase the number of retries and get the next delay. + sleepTime := b.NextDelay() + + if b.Ongoing() { + select { + case <-b.ctx.Done(): + case <-time.After(sleepTime): + } + } +} + +func (b *Backoff) NextDelay() time.Duration { + b.numRetries++ + + // Handle the edge case the min and max have the same value + // (or due to some misconfig max is < min). + if b.nextDelayMin >= b.nextDelayMax { + return b.nextDelayMin + } + + // Add a jitter within the next exponential backoff range. + sleepTime := b.nextDelayMin + time.Duration(rand.Int63n(int64(b.nextDelayMax-b.nextDelayMin))) + + // Apply the exponential backoff to calculate the next jitter + // range, unless we've already reached the max. + if b.nextDelayMax < b.cfg.Max { + b.nextDelayMin = doubleDuration(b.nextDelayMin, b.cfg.Max) + b.nextDelayMax = doubleDuration(b.nextDelayMax, b.cfg.Max) + } + + return sleepTime +} + +func doubleDuration(value, maxValue time.Duration) time.Duration { + value = value * 2 + if value <= maxValue { + return value + } + return maxValue +} diff --git a/exp/internal/github.com/efficientgo/core/backoff/backoff_test.go b/exp/internal/github.com/efficientgo/core/backoff/backoff_test.go new file mode 100644 index 000000000..e7a5310af --- /dev/null +++ b/exp/internal/github.com/efficientgo/core/backoff/backoff_test.go @@ -0,0 +1,108 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Cortex project. + +package backoff + +import ( + "context" + "testing" + "time" +) + +func TestBackoff_NextDelay(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + minBackoff time.Duration + maxBackoff time.Duration + expectedRanges [][]time.Duration + }{ + "exponential backoff with jitter honoring min and max": { + minBackoff: 100 * time.Millisecond, + maxBackoff: 10 * time.Second, + expectedRanges: [][]time.Duration{ + {100 * time.Millisecond, 200 * time.Millisecond}, + {200 * time.Millisecond, 400 * time.Millisecond}, + {400 * time.Millisecond, 800 * time.Millisecond}, + {800 * time.Millisecond, 1600 * time.Millisecond}, + {1600 * time.Millisecond, 3200 * time.Millisecond}, + {3200 * time.Millisecond, 6400 * time.Millisecond}, + {6400 * time.Millisecond, 10000 * time.Millisecond}, + {6400 * time.Millisecond, 10000 * time.Millisecond}, + }, + }, + "exponential backoff with max equal to the end of a range": { + minBackoff: 100 * time.Millisecond, + maxBackoff: 800 * time.Millisecond, + expectedRanges: [][]time.Duration{ + {100 * time.Millisecond, 200 * time.Millisecond}, + {200 * time.Millisecond, 400 * time.Millisecond}, + {400 * time.Millisecond, 800 * time.Millisecond}, + {400 * time.Millisecond, 800 * time.Millisecond}, + }, + }, + "exponential backoff with max equal to the end of a range + 1": { + minBackoff: 100 * time.Millisecond, + maxBackoff: 801 * time.Millisecond, + expectedRanges: [][]time.Duration{ + {100 * time.Millisecond, 200 * time.Millisecond}, + {200 * time.Millisecond, 400 * time.Millisecond}, + {400 * time.Millisecond, 800 * time.Millisecond}, + {800 * time.Millisecond, 801 * time.Millisecond}, + {800 * time.Millisecond, 801 * time.Millisecond}, + }, + }, + "exponential backoff with max equal to the end of a range - 1": { + minBackoff: 100 * time.Millisecond, + maxBackoff: 799 * time.Millisecond, + expectedRanges: [][]time.Duration{ + {100 * time.Millisecond, 200 * time.Millisecond}, + {200 * time.Millisecond, 400 * time.Millisecond}, + {400 * time.Millisecond, 799 * time.Millisecond}, + {400 * time.Millisecond, 799 * time.Millisecond}, + }, + }, + "min backoff is equal to max": { + minBackoff: 100 * time.Millisecond, + maxBackoff: 100 * time.Millisecond, + expectedRanges: [][]time.Duration{ + {100 * time.Millisecond, 100 * time.Millisecond}, + {100 * time.Millisecond, 100 * time.Millisecond}, + {100 * time.Millisecond, 100 * time.Millisecond}, + }, + }, + "min backoff is greater then max": { + minBackoff: 200 * time.Millisecond, + maxBackoff: 100 * time.Millisecond, + expectedRanges: [][]time.Duration{ + {200 * time.Millisecond, 200 * time.Millisecond}, + {200 * time.Millisecond, 200 * time.Millisecond}, + {200 * time.Millisecond, 200 * time.Millisecond}, + }, + }, + } + + for testName, testData := range tests { + testData := testData + + t.Run(testName, func(t *testing.T) { + t.Parallel() + + b := New(context.Background(), Config{ + Min: testData.minBackoff, + Max: testData.maxBackoff, + MaxRetries: len(testData.expectedRanges), + }) + + for _, expectedRange := range testData.expectedRanges { + delay := b.NextDelay() + + if delay < expectedRange[0] || delay > expectedRange[1] { + t.Errorf("%d expected to be within %d and %d", delay, expectedRange[0], expectedRange[1]) + } + } + }) + } +} diff --git a/exp/internal/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go b/exp/internal/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go new file mode 100644 index 000000000..b0a3f4a3a --- /dev/null +++ b/exp/internal/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go @@ -0,0 +1,125 @@ +// Copyright (c) 2022 PlanetScale Inc. All rights reserved. + +// Package protohelpers provides helper functions for encoding and decoding protobuf messages. +// The spec can be found at https://protobuf.dev/programming-guides/encoding/. +package protohelpers + +import ( + "errors" + "fmt" + "io" + "math/bits" +) + +var ( + // ErrInvalidLength is returned when decoding a negative length. + ErrInvalidLength = errors.New("proto: negative length found during unmarshaling") + // ErrIntOverflow is returned when decoding a varint representation of an integer that overflows 64 bits. + ErrIntOverflow = errors.New("proto: integer overflow") + // ErrUnexpectedEndOfGroup is returned when decoding a group end without a corresponding group start. + ErrUnexpectedEndOfGroup = errors.New("proto: unexpected end of group") +) + +// EncodeVarint encodes a uint64 into a varint-encoded byte slice and returns the offset of the encoded value. +// The provided offset is the offset after the last byte of the encoded value. +func EncodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= SizeOfVarint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} + +// SizeOfVarint returns the size of the varint-encoded value. +func SizeOfVarint(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} + +// SizeOfZigzag returns the size of the zigzag-encoded value. +func SizeOfZigzag(x uint64) (n int) { + return SizeOfVarint((x << 1) ^ uint64(int64(x)>>63)) +} + +// Skip the first record of the byte slice and return the offset of the next record. +func Skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} diff --git a/go.mod b/go.mod index 372c69508..4618075f8 100644 --- a/go.mod +++ b/go.mod @@ -1,19 +1,20 @@ module github.com/prometheus/client_golang -go 1.21 +go 1.23.0 require ( github.com/beorn7/perks v1.0.1 github.com/cespare/xxhash/v2 v2.3.0 - github.com/google/go-cmp v0.6.0 + github.com/google/go-cmp v0.7.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.11 + github.com/klauspost/compress v1.18.0 github.com/kylelemons/godebug v1.1.0 - github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.62.0 - github.com/prometheus/procfs v0.15.1 - golang.org/x/sys v0.29.0 - google.golang.org/protobuf v1.36.3 + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/common v0.66.1 + github.com/prometheus/procfs v0.17.0 + go.uber.org/goleak v1.3.0 + golang.org/x/sys v0.35.0 + google.golang.org/protobuf v1.36.9 ) require ( @@ -23,10 +24,10 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/text v0.21.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/text v0.28.0 // indirect ) exclude github.com/prometheus/client_golang v1.12.1 diff --git a/go.sum b/go.sum index cdd858fa7..4cbe968c7 100644 --- a/go.sum +++ b/go.sum @@ -6,15 +6,15 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -33,33 +33,35 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= -golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/prometheus/atomic_update.go b/prometheus/atomic_update.go deleted file mode 100644 index b65896a31..000000000 --- a/prometheus/atomic_update.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync/atomic" - "time" -) - -// atomicUpdateFloat atomically updates the float64 value pointed to by bits -// using the provided updateFunc, with an exponential backoff on contention. -func atomicUpdateFloat(bits *uint64, updateFunc func(float64) float64) { - const ( - // both numbers are derived from empirical observations - // documented in this PR: https://github.com/prometheus/client_golang/pull/1661 - maxBackoff = 320 * time.Millisecond - initialBackoff = 10 * time.Millisecond - ) - backoff := initialBackoff - - for { - loadedBits := atomic.LoadUint64(bits) - oldFloat := math.Float64frombits(loadedBits) - newFloat := updateFunc(oldFloat) - newBits := math.Float64bits(newFloat) - - if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { - break - } else { - // Exponential backoff with sleep and cap to avoid infinite wait - time.Sleep(backoff) - backoff *= 2 - if backoff > maxBackoff { - backoff = maxBackoff - } - } - } -} diff --git a/prometheus/atomic_update_test.go b/prometheus/atomic_update_test.go deleted file mode 100644 index 0233bc71f..000000000 --- a/prometheus/atomic_update_test.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync" - "sync/atomic" - "testing" - "unsafe" -) - -var output float64 - -func TestAtomicUpdateFloat(t *testing.T) { - var val float64 = 0.0 - bits := (*uint64)(unsafe.Pointer(&val)) - var wg sync.WaitGroup - numGoroutines := 100000 - increment := 1.0 - - for i := 0; i < numGoroutines; i++ { - wg.Add(1) - go func() { - defer wg.Done() - atomicUpdateFloat(bits, func(f float64) float64 { - return f + increment - }) - }() - } - - wg.Wait() - expected := float64(numGoroutines) * increment - if val != expected { - t.Errorf("Expected %f, got %f", expected, val) - } -} - -// Benchmark for atomicUpdateFloat with single goroutine (no contention). -func BenchmarkAtomicUpdateFloat_SingleGoroutine(b *testing.B) { - var val float64 = 0.0 - bits := (*uint64)(unsafe.Pointer(&val)) - - for i := 0; i < b.N; i++ { - atomicUpdateFloat(bits, func(f float64) float64 { - return f + 1.0 - }) - } - - output = val -} - -// Benchmark for old implementation with single goroutine (no contention) -> to check overhead of backoff -func BenchmarkAtomicNoBackoff_SingleGoroutine(b *testing.B) { - var val float64 = 0.0 - bits := (*uint64)(unsafe.Pointer(&val)) - - for i := 0; i < b.N; i++ { - for { - loadedBits := atomic.LoadUint64(bits) - newBits := math.Float64bits(math.Float64frombits(loadedBits) + 1.0) - if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { - break - } - } - } - - output = val -} - -// Benchmark varying the number of goroutines. -func benchmarkAtomicUpdateFloatConcurrency(b *testing.B, numGoroutines int) { - var val float64 = 0.0 - bits := (*uint64)(unsafe.Pointer(&val)) - b.SetParallelism(numGoroutines) - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - atomicUpdateFloat(bits, func(f float64) float64 { - return f + 1.0 - }) - } - }) - - output = val -} - -func benchmarkAtomicNoBackoffFloatConcurrency(b *testing.B, numGoroutines int) { - var val float64 = 0.0 - bits := (*uint64)(unsafe.Pointer(&val)) - b.SetParallelism(numGoroutines) - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - for { - loadedBits := atomic.LoadUint64(bits) - newBits := math.Float64bits(math.Float64frombits(loadedBits) + 1.0) - if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { - break - } - } - } - }) - - output = val -} - -func BenchmarkAtomicUpdateFloat_1Goroutine(b *testing.B) { - benchmarkAtomicUpdateFloatConcurrency(b, 1) -} - -func BenchmarkAtomicNoBackoff_1Goroutine(b *testing.B) { - benchmarkAtomicNoBackoffFloatConcurrency(b, 1) -} - -func BenchmarkAtomicUpdateFloat_2Goroutines(b *testing.B) { - benchmarkAtomicUpdateFloatConcurrency(b, 2) -} - -func BenchmarkAtomicNoBackoff_2Goroutines(b *testing.B) { - benchmarkAtomicNoBackoffFloatConcurrency(b, 2) -} - -func BenchmarkAtomicUpdateFloat_4Goroutines(b *testing.B) { - benchmarkAtomicUpdateFloatConcurrency(b, 4) -} - -func BenchmarkAtomicNoBackoff_4Goroutines(b *testing.B) { - benchmarkAtomicNoBackoffFloatConcurrency(b, 4) -} - -func BenchmarkAtomicUpdateFloat_8Goroutines(b *testing.B) { - benchmarkAtomicUpdateFloatConcurrency(b, 8) -} - -func BenchmarkAtomicNoBackoff_8Goroutines(b *testing.B) { - benchmarkAtomicNoBackoffFloatConcurrency(b, 8) -} - -func BenchmarkAtomicUpdateFloat_16Goroutines(b *testing.B) { - benchmarkAtomicUpdateFloatConcurrency(b, 16) -} - -func BenchmarkAtomicNoBackoff_16Goroutines(b *testing.B) { - benchmarkAtomicNoBackoffFloatConcurrency(b, 16) -} - -func BenchmarkAtomicUpdateFloat_32Goroutines(b *testing.B) { - benchmarkAtomicUpdateFloatConcurrency(b, 32) -} - -func BenchmarkAtomicNoBackoff_32Goroutines(b *testing.B) { - benchmarkAtomicNoBackoffFloatConcurrency(b, 32) -} diff --git a/prometheus/collectorfunc.go b/prometheus/collectorfunc.go new file mode 100644 index 000000000..9a71a15db --- /dev/null +++ b/prometheus/collectorfunc.go @@ -0,0 +1,30 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// CollectorFunc is a convenient way to implement a Prometheus Collector +// without interface boilerplate. +// This implementation is based on DescribeByCollect method. +// familiarize yourself to it before using. +type CollectorFunc func(chan<- Metric) + +// Collect calls the defined CollectorFunc function with the provided Metrics channel +func (f CollectorFunc) Collect(ch chan<- Metric) { + f(ch) +} + +// Describe sends the descriptor information using DescribeByCollect +func (f CollectorFunc) Describe(ch chan<- *Desc) { + DescribeByCollect(f, ch) +} diff --git a/prometheus/collectorfunc_test.go b/prometheus/collectorfunc_test.go new file mode 100644 index 000000000..2e9aa324b --- /dev/null +++ b/prometheus/collectorfunc_test.go @@ -0,0 +1,83 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "testing" + +func TestCollectorFunc(t *testing.T) { + testDesc := NewDesc( + "test_metric", + "A test metric", + nil, nil, + ) + + cf := CollectorFunc(func(ch chan<- Metric) { + ch <- MustNewConstMetric( + testDesc, + GaugeValue, + 42.0, + ) + }) + + ch := make(chan Metric, 1) + cf.Collect(ch) + close(ch) + + metric := <-ch + if metric == nil { + t.Fatal("Expected metric, got nil") + } + + descCh := make(chan *Desc, 1) + cf.Describe(descCh) + close(descCh) + + desc := <-descCh + if desc == nil { + t.Fatal("Expected desc, got nil") + } + + if desc.String() != testDesc.String() { + t.Fatalf("Expected %s, got %s", testDesc.String(), desc.String()) + } +} + +func TestCollectorFuncWithRegistry(t *testing.T) { + reg := NewPedanticRegistry() + + cf := CollectorFunc(func(ch chan<- Metric) { + ch <- MustNewConstMetric( + NewDesc( + "test_metric", + "A test metric", + nil, nil, + ), + GaugeValue, + 42.0, + ) + }) + + if err := reg.Register(cf); err != nil { + t.Errorf("Failed to register CollectorFunc: %v", err) + } + + collectedMetrics, err := reg.Gather() + if err != nil { + t.Errorf("Failed to gather metrics: %v", err) + } + + if len(collectedMetrics) != 1 { + t.Errorf("Expected 1 metric family, got %d", len(collectedMetrics)) + } +} diff --git a/prometheus/collectors/go_collector_go116.go b/prometheus/collectors/go_collector_go116.go deleted file mode 100644 index effc57840..000000000 --- a/prometheus/collectors/go_collector_go116.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.17 -// +build !go1.17 - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// NewGoCollector returns a collector that exports metrics about the current Go -// process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This requires to “stop the world”, which usually only happens for -// garbage collection (GC). Take the following implications into account when -// deciding whether to use the Go collector: -// -// 1. The performance impact of stopping the world is the more relevant the more -// frequently metrics are collected. However, with Go1.9 or later the -// stop-the-world time per metrics collection is very short (~25µs) so that the -// performance impact will only matter in rare cases. However, with older Go -// versions, the stop-the-world duration depends on the heap size and can be -// quite significant (~1.7 ms/GiB as per -// https://go-review.googlesource.com/c/go/+/34937). -// -// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the -// metrics collection happens to coincide with GC, it will only complete after -// GC has finished. Usually, GC is fast enough to not cause problems. However, -// with a very large heap, GC might take multiple seconds, which is enough to -// cause scrape timeouts in common setups. To avoid this problem, the Go -// collector will use the memstats from a previous collection if -// runtime.ReadMemStats takes more than 1s. However, if there are no previously -// collected memstats, or their collection is more than 5m ago, the collection -// will block until runtime.ReadMemStats succeeds. -// -// NOTE: The problem is solved in Go 1.15, see -// https://github.com/golang/go/issues/19812 for the related Go issue. -func NewGoCollector() prometheus.Collector { - return prometheus.NewGoCollector() -} diff --git a/prometheus/collectors/go_collector_go120_test.go b/prometheus/collectors/go_collector_go120_test.go deleted file mode 100644 index e67fea10e..000000000 --- a/prometheus/collectors/go_collector_go120_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.20 && !go1.21 -// +build go1.20,!go1.21 - -package collectors - -func withAllMetrics() []string { - return withBaseMetrics([]string{ - "go_cgo_go_to_c_calls_calls_total", - "go_cpu_classes_gc_mark_assist_cpu_seconds_total", - "go_cpu_classes_gc_mark_dedicated_cpu_seconds_total", - "go_cpu_classes_gc_mark_idle_cpu_seconds_total", - "go_cpu_classes_gc_pause_cpu_seconds_total", - "go_cpu_classes_gc_total_cpu_seconds_total", - "go_cpu_classes_idle_cpu_seconds_total", - "go_cpu_classes_scavenge_assist_cpu_seconds_total", - "go_cpu_classes_scavenge_background_cpu_seconds_total", - "go_cpu_classes_scavenge_total_cpu_seconds_total", - "go_cpu_classes_total_cpu_seconds_total", - "go_cpu_classes_user_cpu_seconds_total", - "go_gc_cycles_automatic_gc_cycles_total", - "go_gc_cycles_forced_gc_cycles_total", - "go_gc_cycles_total_gc_cycles_total", - "go_gc_heap_allocs_by_size_bytes", - "go_gc_heap_allocs_bytes_total", - "go_gc_heap_allocs_objects_total", - "go_gc_heap_frees_by_size_bytes", - "go_gc_heap_frees_bytes_total", - "go_gc_heap_frees_objects_total", - "go_gc_heap_goal_bytes", - "go_gc_heap_objects_objects", - "go_gc_heap_tiny_allocs_objects_total", - "go_gc_limiter_last_enabled_gc_cycle", - "go_gc_pauses_seconds", - "go_gc_stack_starting_size_bytes", - "go_memory_classes_heap_free_bytes", - "go_memory_classes_heap_objects_bytes", - "go_memory_classes_heap_released_bytes", - "go_memory_classes_heap_stacks_bytes", - "go_memory_classes_heap_unused_bytes", - "go_memory_classes_metadata_mcache_free_bytes", - "go_memory_classes_metadata_mcache_inuse_bytes", - "go_memory_classes_metadata_mspan_free_bytes", - "go_memory_classes_metadata_mspan_inuse_bytes", - "go_memory_classes_metadata_other_bytes", - "go_memory_classes_os_stacks_bytes", - "go_memory_classes_other_bytes", - "go_memory_classes_profiling_buckets_bytes", - "go_memory_classes_total_bytes", - "go_sched_gomaxprocs_threads", - "go_sched_goroutines_goroutines", - "go_sched_latencies_seconds", - "go_sync_mutex_wait_total_seconds_total", - }) -} - -func withGCMetrics() []string { - return withBaseMetrics([]string{ - "go_gc_cycles_automatic_gc_cycles_total", - "go_gc_cycles_forced_gc_cycles_total", - "go_gc_cycles_total_gc_cycles_total", - "go_gc_heap_allocs_by_size_bytes", - "go_gc_heap_allocs_bytes_total", - "go_gc_heap_allocs_objects_total", - "go_gc_heap_frees_by_size_bytes", - "go_gc_heap_frees_bytes_total", - "go_gc_heap_frees_objects_total", - "go_gc_heap_goal_bytes", - "go_gc_heap_objects_objects", - "go_gc_heap_tiny_allocs_objects_total", - "go_gc_limiter_last_enabled_gc_cycle", - "go_gc_pauses_seconds", - "go_gc_stack_starting_size_bytes", - }) -} - -func withMemoryMetrics() []string { - return withBaseMetrics([]string{ - "go_memory_classes_heap_free_bytes", - "go_memory_classes_heap_objects_bytes", - "go_memory_classes_heap_released_bytes", - "go_memory_classes_heap_stacks_bytes", - "go_memory_classes_heap_unused_bytes", - "go_memory_classes_metadata_mcache_free_bytes", - "go_memory_classes_metadata_mcache_inuse_bytes", - "go_memory_classes_metadata_mspan_free_bytes", - "go_memory_classes_metadata_mspan_inuse_bytes", - "go_memory_classes_metadata_other_bytes", - "go_memory_classes_os_stacks_bytes", - "go_memory_classes_other_bytes", - "go_memory_classes_profiling_buckets_bytes", - "go_memory_classes_total_bytes", - }) -} - -func withSchedulerMetrics() []string { - return withBaseMetrics([]string{ - "go_sched_gomaxprocs_threads", - "go_sched_goroutines_goroutines", - "go_sched_latencies_seconds", - }) -} - -func withDebugMetrics() []string { - return withBaseMetrics([]string{}) -} - -var ( - defaultRuntimeMetrics = []string{ - "go_sched_gomaxprocs_threads", - } - onlyGCDefRuntimeMetrics = []string{} - onlySchedDefRuntimeMetrics = []string{ - "go_sched_gomaxprocs_threads", - } -) diff --git a/prometheus/collectors/go_collector_go123_test.go b/prometheus/collectors/go_collector_go123_test.go index 1c26dc25b..9969febf3 100644 --- a/prometheus/collectors/go_collector_go123_test.go +++ b/prometheus/collectors/go_collector_go123_test.go @@ -52,6 +52,7 @@ func withAllMetrics() []string { "go_gc_scan_stack_bytes", "go_gc_scan_total_bytes", "go_gc_stack_starting_size_bytes", + "go_godebug_non_default_behavior_allowmultiplevcs_events_total", "go_godebug_non_default_behavior_asynctimerchan_events_total", "go_godebug_non_default_behavior_execerrdot_events_total", "go_godebug_non_default_behavior_gocachehash_events_total", @@ -169,6 +170,7 @@ func withSchedulerMetrics() []string { func withDebugMetrics() []string { return withBaseMetrics([]string{ + "go_godebug_non_default_behavior_allowmultiplevcs_events_total", "go_godebug_non_default_behavior_asynctimerchan_events_total", "go_godebug_non_default_behavior_execerrdot_events_total", "go_godebug_non_default_behavior_gocachehash_events_total", diff --git a/prometheus/collectors/go_collector_go121_test.go b/prometheus/collectors/go_collector_go124_test.go similarity index 71% rename from prometheus/collectors/go_collector_go121_test.go rename to prometheus/collectors/go_collector_go124_test.go index f8a5879af..3933de410 100644 --- a/prometheus/collectors/go_collector_go121_test.go +++ b/prometheus/collectors/go_collector_go124_test.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 && !go1.22 -// +build go1.21,!go1.22 +//go:build go1.24 && !go1.25 +// +build go1.24,!go1.25 package collectors @@ -52,24 +52,41 @@ func withAllMetrics() []string { "go_gc_scan_stack_bytes", "go_gc_scan_total_bytes", "go_gc_stack_starting_size_bytes", + "go_godebug_non_default_behavior_allowmultiplevcs_events_total", + "go_godebug_non_default_behavior_asynctimerchan_events_total", "go_godebug_non_default_behavior_execerrdot_events_total", "go_godebug_non_default_behavior_gocachehash_events_total", "go_godebug_non_default_behavior_gocachetest_events_total", "go_godebug_non_default_behavior_gocacheverify_events_total", + "go_godebug_non_default_behavior_gotestjsonbuildtext_events_total", + "go_godebug_non_default_behavior_gotypesalias_events_total", "go_godebug_non_default_behavior_http2client_events_total", "go_godebug_non_default_behavior_http2server_events_total", + "go_godebug_non_default_behavior_httplaxcontentlength_events_total", + "go_godebug_non_default_behavior_httpmuxgo121_events_total", + "go_godebug_non_default_behavior_httpservecontentkeepheaders_events_total", "go_godebug_non_default_behavior_installgoroot_events_total", - "go_godebug_non_default_behavior_jstmpllitinterp_events_total", "go_godebug_non_default_behavior_multipartmaxheaders_events_total", "go_godebug_non_default_behavior_multipartmaxparts_events_total", "go_godebug_non_default_behavior_multipathtcp_events_total", "go_godebug_non_default_behavior_netedns0_events_total", "go_godebug_non_default_behavior_panicnil_events_total", "go_godebug_non_default_behavior_randautoseed_events_total", + "go_godebug_non_default_behavior_randseednop_events_total", + "go_godebug_non_default_behavior_rsa1024min_events_total", "go_godebug_non_default_behavior_tarinsecurepath_events_total", + "go_godebug_non_default_behavior_tls10server_events_total", + "go_godebug_non_default_behavior_tls3des_events_total", "go_godebug_non_default_behavior_tlsmaxrsasize_events_total", - "go_godebug_non_default_behavior_x509sha1_events_total", + "go_godebug_non_default_behavior_tlsrsakex_events_total", + "go_godebug_non_default_behavior_tlsunsafeekm_events_total", + "go_godebug_non_default_behavior_winreadlinkvolume_events_total", + "go_godebug_non_default_behavior_winsymlink_events_total", + "go_godebug_non_default_behavior_x509keypairleaf_events_total", + "go_godebug_non_default_behavior_x509negativeserial_events_total", + "go_godebug_non_default_behavior_x509rsacrt_events_total", "go_godebug_non_default_behavior_x509usefallbackroots_events_total", + "go_godebug_non_default_behavior_x509usepolicies_events_total", "go_godebug_non_default_behavior_zipinsecurepath_events_total", "go_memory_classes_heap_free_bytes", "go_memory_classes_heap_objects_bytes", @@ -88,6 +105,10 @@ func withAllMetrics() []string { "go_sched_gomaxprocs_threads", "go_sched_goroutines_goroutines", "go_sched_latencies_seconds", + "go_sched_pauses_stopping_gc_seconds", + "go_sched_pauses_stopping_other_seconds", + "go_sched_pauses_total_gc_seconds", + "go_sched_pauses_total_other_seconds", "go_sync_mutex_wait_total_seconds_total", }) } @@ -143,29 +164,50 @@ func withSchedulerMetrics() []string { "go_sched_gomaxprocs_threads", "go_sched_goroutines_goroutines", "go_sched_latencies_seconds", + "go_sched_pauses_stopping_gc_seconds", + "go_sched_pauses_stopping_other_seconds", + "go_sched_pauses_total_gc_seconds", + "go_sched_pauses_total_other_seconds", }) } func withDebugMetrics() []string { return withBaseMetrics([]string{ + "go_godebug_non_default_behavior_allowmultiplevcs_events_total", + "go_godebug_non_default_behavior_asynctimerchan_events_total", "go_godebug_non_default_behavior_execerrdot_events_total", "go_godebug_non_default_behavior_gocachehash_events_total", "go_godebug_non_default_behavior_gocachetest_events_total", "go_godebug_non_default_behavior_gocacheverify_events_total", + "go_godebug_non_default_behavior_gotestjsonbuildtext_events_total", + "go_godebug_non_default_behavior_gotypesalias_events_total", "go_godebug_non_default_behavior_http2client_events_total", "go_godebug_non_default_behavior_http2server_events_total", + "go_godebug_non_default_behavior_httplaxcontentlength_events_total", + "go_godebug_non_default_behavior_httpmuxgo121_events_total", + "go_godebug_non_default_behavior_httpservecontentkeepheaders_events_total", "go_godebug_non_default_behavior_installgoroot_events_total", - "go_godebug_non_default_behavior_jstmpllitinterp_events_total", "go_godebug_non_default_behavior_multipartmaxheaders_events_total", "go_godebug_non_default_behavior_multipartmaxparts_events_total", "go_godebug_non_default_behavior_multipathtcp_events_total", "go_godebug_non_default_behavior_netedns0_events_total", "go_godebug_non_default_behavior_panicnil_events_total", "go_godebug_non_default_behavior_randautoseed_events_total", + "go_godebug_non_default_behavior_randseednop_events_total", + "go_godebug_non_default_behavior_rsa1024min_events_total", "go_godebug_non_default_behavior_tarinsecurepath_events_total", + "go_godebug_non_default_behavior_tls10server_events_total", + "go_godebug_non_default_behavior_tls3des_events_total", "go_godebug_non_default_behavior_tlsmaxrsasize_events_total", - "go_godebug_non_default_behavior_x509sha1_events_total", + "go_godebug_non_default_behavior_tlsrsakex_events_total", + "go_godebug_non_default_behavior_tlsunsafeekm_events_total", + "go_godebug_non_default_behavior_winreadlinkvolume_events_total", + "go_godebug_non_default_behavior_winsymlink_events_total", + "go_godebug_non_default_behavior_x509keypairleaf_events_total", + "go_godebug_non_default_behavior_x509negativeserial_events_total", + "go_godebug_non_default_behavior_x509rsacrt_events_total", "go_godebug_non_default_behavior_x509usefallbackroots_events_total", + "go_godebug_non_default_behavior_x509usepolicies_events_total", "go_godebug_non_default_behavior_zipinsecurepath_events_total", }) } diff --git a/prometheus/collectors/go_collector_latest.go b/prometheus/collectors/go_collector_latest.go index cc4ef1077..d29ade654 100644 --- a/prometheus/collectors/go_collector_latest.go +++ b/prometheus/collectors/go_collector_latest.go @@ -90,13 +90,13 @@ func WithGoCollectorMemStatsMetricsDisabled() func(options *internal.GoCollector // GoRuntimeMetricsRule allow enabling and configuring particular group of runtime/metrics. // TODO(bwplotka): Consider adding ability to adjust buckets. type GoRuntimeMetricsRule struct { - // Matcher represents RE2 expression will match the runtime/metrics from https://golang.bg/src/runtime/metrics/description.go + // Matcher represents RE2 expression will match the runtime/metrics from https://pkg.go.dev/runtime/metrics // Use `regexp.MustCompile` or `regexp.Compile` to create this field. Matcher *regexp.Regexp } // WithGoCollectorRuntimeMetrics allows enabling and configuring particular group of runtime/metrics. -// See the list of metrics https://golang.bg/src/runtime/metrics/description.go (pick the Go version you use there!). +// See the list of metrics https://pkg.go.dev/runtime/metrics (pick the Go version you use there!). // You can use this option in repeated manner, which will add new rules. The order of rules is important, the last rule // that matches particular metrics is applied. func WithGoCollectorRuntimeMetrics(rules ...GoRuntimeMetricsRule) func(options *internal.GoCollectorOptions) { diff --git a/prometheus/collectors/go_collector_latest_test.go b/prometheus/collectors/go_collector_latest_test.go index 974db6c13..4d8f87a0a 100644 --- a/prometheus/collectors/go_collector_latest_test.go +++ b/prometheus/collectors/go_collector_latest_test.go @@ -258,7 +258,7 @@ func TestGoCollectorDenyList(t *testing.T) { } } -func ExampleGoCollector() { +func ExampleNewGoCollector() { reg := prometheus.NewPedanticRegistry() // Register the GoCollector with the default options. Only the base metrics, default runtime metrics and memstats are enabled. @@ -268,7 +268,7 @@ func ExampleGoCollector() { log.Fatal(http.ListenAndServe(":8080", nil)) } -func ExampleGoCollector_WithAdvancedGoMetrics() { +func ExampleNewGoCollector_withAdvancedGoMetrics() { reg := prometheus.NewPedanticRegistry() // Enable Go metrics with pre-defined rules. Or your custom rules. @@ -289,7 +289,7 @@ func ExampleGoCollector_WithAdvancedGoMetrics() { log.Fatal(http.ListenAndServe(":8080", nil)) } -func ExampleGoCollector_DefaultRegister() { +func ExampleNewGoCollector_defaultRegister() { // Unregister the default GoCollector. prometheus.Unregister(NewGoCollector()) diff --git a/prometheus/collectors/version/version.go b/prometheus/collectors/version/version.go index c96e18712..0306b5878 100644 --- a/prometheus/collectors/version/version.go +++ b/prometheus/collectors/version/version.go @@ -15,15 +15,44 @@ package version import ( "fmt" + "maps" "github.com/prometheus/common/version" "github.com/prometheus/client_golang/prometheus" ) +type Option func(*options) + +type options struct { + extraConstLabels prometheus.Labels +} + +func WithExtraConstLabels(l prometheus.Labels) Option { + return func(o *options) { + o.extraConstLabels = l + } +} + // NewCollector returns a collector that exports metrics about current version // information. -func NewCollector(program string) prometheus.Collector { +func NewCollector(program string, opts ...Option) prometheus.Collector { + o := options{} + for _, opt := range opts { + opt(&o) + } + + constLabels := prometheus.Labels{ + "version": version.Version, + "revision": version.GetRevision(), + "branch": version.Branch, + "goversion": version.GoVersion, + "goos": version.GoOS, + "goarch": version.GoArch, + "tags": version.GetTags(), + } + maps.Copy(constLabels, o.extraConstLabels) + return prometheus.NewGaugeFunc( prometheus.GaugeOpts{ Namespace: program, @@ -32,15 +61,7 @@ func NewCollector(program string) prometheus.Collector { "A metric with a constant '1' value labeled by version, revision, branch, goversion from which %s was built, and the goos and goarch for the build.", program, ), - ConstLabels: prometheus.Labels{ - "version": version.Version, - "revision": version.GetRevision(), - "branch": version.Branch, - "goversion": version.GoVersion, - "goos": version.GoOS, - "goarch": version.GoArch, - "tags": version.GetTags(), - }, + ConstLabels: constLabels, }, func() float64 { return 1 }, ) diff --git a/prometheus/collectors/version/version_test.go b/prometheus/collectors/version/version_test.go new file mode 100644 index 000000000..ec4d308ee --- /dev/null +++ b/prometheus/collectors/version/version_test.go @@ -0,0 +1,114 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/prometheus/client_golang/prometheus" +) + +var defaultLabels = []string{"branch", "goarch", "goos", "goversion", "revision", "tags", "version"} + +func TestGoVersionCollector(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(NewCollector( + "foo"), + ) + result, err := reg.Gather() + if err != nil { + t.Fatal(err) + } + + if _, err := json.Marshal(result); err != nil { + t.Errorf("json marshalling should not fail, %v", err) + } + + got := []string{} + + for _, r := range result { + got = append(got, r.GetName()) + fmt.Println("foo") + m := r.GetMetric() + + if len(m) != 1 { + t.Errorf("expected 1 metric, but got %d", len(m)) + } + + lk := []string{} + for _, lp := range m[0].GetLabel() { + lk = append(lk, lp.GetName()) + } + + if diff := cmp.Diff(lk, defaultLabels); diff != "" { + t.Errorf("missmatch (-want +got):\n%s", diff) + } + + } + + if diff := cmp.Diff(got, []string{"foo_build_info"}); diff != "" { + t.Errorf("missmatch (-want +got):\n%s", diff) + } +} + +func TestGoVersionCollectorWithLabels(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + + labels := prometheus.Labels{ + "z-mylabel": "myvalue", + } + reg.MustRegister(NewCollector( + "foo", WithExtraConstLabels(labels)), + ) + result, err := reg.Gather() + if err != nil { + t.Fatal(err) + } + + if _, err := json.Marshal(result); err != nil { + t.Errorf("json marshalling should not fail, %v", err) + } + + got := []string{} + + for _, r := range result { + got = append(got, r.GetName()) + fmt.Println("foo") + m := r.GetMetric() + + if len(m) != 1 { + t.Errorf("expected 1 metric, but got %d", len(m)) + } + + lk := []string{} + for _, lp := range m[0].GetLabel() { + lk = append(lk, lp.GetName()) + } + + labels := append(defaultLabels, "z-mylabel") + + if diff := cmp.Diff(lk, labels); diff != "" { + t.Errorf("missmatch (-want +got):\n%s", diff) + } + + } + + if diff := cmp.Diff(got, []string{"foo_build_info"}); diff != "" { + t.Errorf("missmatch (-want +got):\n%s", diff) + } +} diff --git a/prometheus/counter.go b/prometheus/counter.go index 2996aef6a..4ce84e7a8 100644 --- a/prometheus/counter.go +++ b/prometheus/counter.go @@ -134,9 +134,13 @@ func (c *counter) Add(v float64) { return } - atomicUpdateFloat(&c.valBits, func(oldVal float64) float64 { - return oldVal + v - }) + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } } func (c *counter) AddWithExemplar(v float64, e Labels) { diff --git a/prometheus/desc.go b/prometheus/desc.go index ad347113c..2331b8b4f 100644 --- a/prometheus/desc.go +++ b/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/prometheus/examples_test.go b/prometheus/examples_test.go index fdbb6f747..88164ccc5 100644 --- a/prometheus/examples_test.go +++ b/prometheus/examples_test.go @@ -25,8 +25,10 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promhttp" ) @@ -513,7 +515,7 @@ func ExampleNewConstHistogramWithCreatedTimestamp() { // {"label":[{"name":"code","value":"200"},{"name":"method","value":"get"},{"name":"owner","value":"example"}],"histogram":{"sampleCount":"4711","sampleSum":403.34,"bucket":[{"cumulativeCount":"121","upperBound":25},{"cumulativeCount":"2403","upperBound":50},{"cumulativeCount":"3221","upperBound":100},{"cumulativeCount":"4233","upperBound":200}],"createdTimestamp":"2024-06-29T14:19:24.000000123Z"}} } -func ExampleNewConstHistogram_WithExemplar() { +func ExampleNewConstHistogram_withExemplar() { desc := prometheus.NewDesc( "http_request_duration_seconds", "A histogram of the HTTP request durations.", @@ -583,7 +585,7 @@ func ExampleGatherers() { temp.WithLabelValues("outside").Set(273.14) temp.WithLabelValues("inside").Set(298.44) - var parser expfmt.TextParser + parser := expfmt.NewTextParser(model.UTF8Validation) text := ` # TYPE humidity_percent gauge @@ -736,3 +738,130 @@ func ExampleNewConstMetricWithCreatedTimestamp() { // Output: // {"counter":{"value":1257894000,"createdTimestamp":"1970-01-01T00:00:00Z"}} } + +// Using CollectorFunc that registers the metric info for the HTTP requests. +func ExampleCollectorFunc() { + desc := prometheus.NewDesc( + "http_requests_info", + "Information about the received HTTP requests.", + []string{"code", "method"}, + nil, + ) + + // Example 1: 42 GET requests with 200 OK status code. + collector := prometheus.CollectorFunc(func(ch chan<- prometheus.Metric) { + ch <- prometheus.MustNewConstMetric( + desc, + prometheus.CounterValue, // Metric type: Counter + 42, // Value + "200", // Label value: HTTP status code + "GET", // Label value: HTTP method + ) + + // Example 2: 15 POST requests with 404 Not Found status code. + ch <- prometheus.MustNewConstMetric( + desc, + prometheus.CounterValue, + 15, + "404", + "POST", + ) + }) + + prometheus.MustRegister(collector) + + // Just for demonstration, let's check the state of the metric by registering + // it with a custom registry and then let it collect the metrics. + + reg := prometheus.NewRegistry() + reg.MustRegister(collector) + + metricFamilies, err := reg.Gather() + if err != nil || len(metricFamilies) != 1 { + panic("unexpected behavior of custom test registry") + } + + fmt.Println(toNormalizedJSON(sanitizeMetricFamily(metricFamilies[0]))) + + // Output: + // {"name":"http_requests_info","help":"Information about the received HTTP requests.","type":"COUNTER","metric":[{"label":[{"name":"code","value":"200"},{"name":"method","value":"GET"}],"counter":{"value":42}},{"label":[{"name":"code","value":"404"},{"name":"method","value":"POST"}],"counter":{"value":15}}]} +} + +// Using WrapCollectorWith to un-register metrics registered by a third party lib. +// newThirdPartyLibFoo illustrates a constructor from a third-party lib that does +// not expose any way to un-register metrics. +func ExampleWrapCollectorWith() { + reg := prometheus.NewRegistry() + + // We want to create two instances of thirdPartyLibFoo, each one wrapped with + // its "instance" label. + firstReg := prometheus.NewRegistry() + _ = newThirdPartyLibFoo(firstReg) + firstCollector := prometheus.WrapCollectorWith(prometheus.Labels{"instance": "first"}, firstReg) + reg.MustRegister(firstCollector) + + secondReg := prometheus.NewRegistry() + _ = newThirdPartyLibFoo(secondReg) + secondCollector := prometheus.WrapCollectorWith(prometheus.Labels{"instance": "second"}, secondReg) + reg.MustRegister(secondCollector) + + // So far we have illustrated that we can create two instances of thirdPartyLibFoo, + // wrapping each one's metrics with some const label. + // This is something we could've achieved by doing: + // newThirdPartyLibFoo(prometheus.WrapRegistererWith(prometheus.Labels{"instance": "first"}, reg)) + metricFamilies, err := reg.Gather() + if err != nil { + panic("unexpected behavior of registry") + } + fmt.Println("Both instances:") + fmt.Println(toNormalizedJSON(sanitizeMetricFamily(metricFamilies[0]))) + + // Now we want to unregister first Foo's metrics, and then register them again. + // This is not possible by passing a wrapped Registerer to newThirdPartyLibFoo, + // because we have already lost track of the registered Collectors, + // however since we've collected Foo's metrics in it's own Registry, and we have registered that + // as a specific Collector, we can now de-register them: + unregistered := reg.Unregister(firstCollector) + if !unregistered { + panic("unexpected behavior of registry") + } + + metricFamilies, err = reg.Gather() + if err != nil { + panic("unexpected behavior of registry") + } + fmt.Println("First unregistered:") + fmt.Println(toNormalizedJSON(sanitizeMetricFamily(metricFamilies[0]))) + + // Now we can create another instance of Foo with {instance: "first"} label again. + firstRegAgain := prometheus.NewRegistry() + _ = newThirdPartyLibFoo(firstRegAgain) + firstCollectorAgain := prometheus.WrapCollectorWith(prometheus.Labels{"instance": "first"}, firstRegAgain) + reg.MustRegister(firstCollectorAgain) + + metricFamilies, err = reg.Gather() + if err != nil { + panic("unexpected behavior of registry") + } + fmt.Println("Both again:") + fmt.Println(toNormalizedJSON(sanitizeMetricFamily(metricFamilies[0]))) + + // Output: + // Both instances: + // {"name":"foo","help":"Registered forever.","type":"GAUGE","metric":[{"label":[{"name":"instance","value":"first"}],"gauge":{"value":1}},{"label":[{"name":"instance","value":"second"}],"gauge":{"value":1}}]} + // First unregistered: + // {"name":"foo","help":"Registered forever.","type":"GAUGE","metric":[{"label":[{"name":"instance","value":"second"}],"gauge":{"value":1}}]} + // Both again: + // {"name":"foo","help":"Registered forever.","type":"GAUGE","metric":[{"label":[{"name":"instance","value":"first"}],"gauge":{"value":1}},{"label":[{"name":"instance","value":"second"}],"gauge":{"value":1}}]} +} + +func newThirdPartyLibFoo(reg prometheus.Registerer) struct{} { + foo := struct{}{} + // Register the metrics of the third party lib. + c := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Name: "foo", + Help: "Registered forever.", + }) + c.Set(1) + return foo +} diff --git a/prometheus/gauge.go b/prometheus/gauge.go index aa1846365..dd2eac940 100644 --- a/prometheus/gauge.go +++ b/prometheus/gauge.go @@ -120,9 +120,13 @@ func (g *gauge) Dec() { } func (g *gauge) Add(val float64) { - atomicUpdateFloat(&g.valBits, func(oldVal float64) float64 { - return oldVal + val - }) + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } } func (g *gauge) Sub(val float64) { diff --git a/prometheus/go_collector_go116.go b/prometheus/go_collector_go116.go deleted file mode 100644 index 897a6e906..000000000 --- a/prometheus/go_collector_go116.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.17 -// +build !go1.17 - -package prometheus - -import ( - "runtime" - "sync" - "time" -) - -type goCollector struct { - base baseGoCollector - - // ms... are memstats related. - msLast *runtime.MemStats // Previously collected memstats. - msLastTimestamp time.Time - msMtx sync.Mutex // Protects msLast and msLastTimestamp. - msMetrics memStatsMetrics - msRead func(*runtime.MemStats) // For mocking in tests. - msMaxWait time.Duration // Wait time for fresh memstats. - msMaxAge time.Duration // Maximum allowed age of old memstats. -} - -// NewGoCollector is the obsolete version of collectors.NewGoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - msMetrics := goRuntimeMemStats() - msMetrics = append(msMetrics, struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType - }{ - // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, - }) - return &goCollector{ - base: newBaseGoCollector(), - msLast: &runtime.MemStats{}, - msRead: runtime.ReadMemStats, - msMaxWait: time.Second, - msMaxAge: 5 * time.Minute, - msMetrics: msMetrics, - } -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - c.base.Describe(ch) - for _, i := range c.msMetrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - var ( - ms = &runtime.MemStats{} - done = make(chan struct{}) - ) - // Start reading memstats first as it might take a while. - go func() { - c.msRead(ms) - c.msMtx.Lock() - c.msLast = ms - c.msLastTimestamp = time.Now() - c.msMtx.Unlock() - close(done) - }() - - // Collect base non-memory metrics. - c.base.Collect(ch) - - timer := time.NewTimer(c.msMaxWait) - select { - case <-done: // Our own ReadMemStats succeeded in time. Use it. - timer.Stop() // Important for high collection frequencies to not pile up timers. - c.msCollect(ch, ms) - return - case <-timer.C: // Time out, use last memstats if possible. Continue below. - } - c.msMtx.Lock() - if time.Since(c.msLastTimestamp) < c.msMaxAge { - // Last memstats are recent enough. Collect from them under the lock. - c.msCollect(ch, c.msLast) - c.msMtx.Unlock() - return - } - // If we are here, the last memstats are too old or don't exist. We have - // to wait until our own ReadMemStats finally completes. For that to - // happen, we have to release the lock. - c.msMtx.Unlock() - <-done - c.msCollect(ch, ms) -} - -func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} diff --git a/prometheus/go_collector_go116_test.go b/prometheus/go_collector_go116_test.go deleted file mode 100644 index 8969f38a1..000000000 --- a/prometheus/go_collector_go116_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.17 -// +build !go1.17 - -package prometheus - -import ( - "runtime" - "testing" - "time" - - dto "github.com/prometheus/client_model/go" -) - -func TestGoCollectorMemStats(t *testing.T) { - var ( - c = NewGoCollector().(*goCollector) - got uint64 - ) - - checkCollect := func(want uint64) { - metricCh := make(chan Metric) - endCh := make(chan struct{}) - - go func() { - c.Collect(metricCh) - close(endCh) - }() - Collect: - for { - select { - case metric := <-metricCh: - if metric.Desc().fqName != "go_memstats_alloc_bytes" { - continue Collect - } - pb := &dto.Metric{} - metric.Write(pb) - got = uint64(pb.GetGauge().GetValue()) - case <-endCh: - break Collect - } - } - if want != got { - t.Errorf("unexpected value of go_memstats_alloc_bytes, want %d, got %d", want, got) - } - } - - // Speed up the timing to make the test faster. - c.msMaxWait = 5 * time.Millisecond - c.msMaxAge = 50 * time.Millisecond - - // Scenario 1: msRead responds slowly, no previous memstats available, - // msRead is executed anyway. - c.msRead = func(ms *runtime.MemStats) { - time.Sleep(20 * time.Millisecond) - ms.Alloc = 1 - } - checkCollect(1) - // Now msLast is set. - c.msMtx.Lock() - if want, got := uint64(1), c.msLast.Alloc; want != got { - t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got) - } - c.msMtx.Unlock() - - // Scenario 2: msRead responds fast, previous memstats available, new - // value collected. - c.msRead = func(ms *runtime.MemStats) { - ms.Alloc = 2 - } - checkCollect(2) - // msLast is set, too. - c.msMtx.Lock() - if want, got := uint64(2), c.msLast.Alloc; want != got { - t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got) - } - c.msMtx.Unlock() - - // Scenario 3: msRead responds slowly, previous memstats available, old - // value collected. - c.msRead = func(ms *runtime.MemStats) { - time.Sleep(20 * time.Millisecond) - ms.Alloc = 3 - } - checkCollect(2) - // After waiting, new value is still set in msLast. - time.Sleep(80 * time.Millisecond) - c.msMtx.Lock() - if want, got := uint64(3), c.msLast.Alloc; want != got { - t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got) - } - c.msMtx.Unlock() - - // Scenario 4: msRead responds slowly, previous memstats is too old, new - // value collected. - c.msRead = func(ms *runtime.MemStats) { - time.Sleep(20 * time.Millisecond) - ms.Alloc = 4 - } - checkCollect(4) - c.msMtx.Lock() - if want, got := uint64(4), c.msLast.Alloc; want != got { - t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got) - } - c.msMtx.Unlock() -} diff --git a/prometheus/go_collector_metrics_go120_test.go b/prometheus/go_collector_metrics_go120_test.go deleted file mode 100644 index 4c1ca38d3..000000000 --- a/prometheus/go_collector_metrics_go120_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by gen_go_collector_metrics_set.go; DO NOT EDIT. -//go:generate go run gen_go_collector_metrics_set.go go1.20 - -//go:build go1.20 && !go1.21 -// +build go1.20,!go1.21 - -package prometheus - -var ( - expectedRuntimeMetrics = map[string]string{ - "/cgo/go-to-c-calls:calls": "go_cgo_go_to_c_calls_calls_total", - "/cpu/classes/gc/mark/assist:cpu-seconds": "go_cpu_classes_gc_mark_assist_cpu_seconds_total", - "/cpu/classes/gc/mark/dedicated:cpu-seconds": "go_cpu_classes_gc_mark_dedicated_cpu_seconds_total", - "/cpu/classes/gc/mark/idle:cpu-seconds": "go_cpu_classes_gc_mark_idle_cpu_seconds_total", - "/cpu/classes/gc/pause:cpu-seconds": "go_cpu_classes_gc_pause_cpu_seconds_total", - "/cpu/classes/gc/total:cpu-seconds": "go_cpu_classes_gc_total_cpu_seconds_total", - "/cpu/classes/idle:cpu-seconds": "go_cpu_classes_idle_cpu_seconds_total", - "/cpu/classes/scavenge/assist:cpu-seconds": "go_cpu_classes_scavenge_assist_cpu_seconds_total", - "/cpu/classes/scavenge/background:cpu-seconds": "go_cpu_classes_scavenge_background_cpu_seconds_total", - "/cpu/classes/scavenge/total:cpu-seconds": "go_cpu_classes_scavenge_total_cpu_seconds_total", - "/cpu/classes/total:cpu-seconds": "go_cpu_classes_total_cpu_seconds_total", - "/cpu/classes/user:cpu-seconds": "go_cpu_classes_user_cpu_seconds_total", - "/gc/cycles/automatic:gc-cycles": "go_gc_cycles_automatic_gc_cycles_total", - "/gc/cycles/forced:gc-cycles": "go_gc_cycles_forced_gc_cycles_total", - "/gc/cycles/total:gc-cycles": "go_gc_cycles_total_gc_cycles_total", - "/gc/heap/allocs-by-size:bytes": "go_gc_heap_allocs_by_size_bytes", - "/gc/heap/allocs:bytes": "go_gc_heap_allocs_bytes_total", - "/gc/heap/allocs:objects": "go_gc_heap_allocs_objects_total", - "/gc/heap/frees-by-size:bytes": "go_gc_heap_frees_by_size_bytes", - "/gc/heap/frees:bytes": "go_gc_heap_frees_bytes_total", - "/gc/heap/frees:objects": "go_gc_heap_frees_objects_total", - "/gc/heap/goal:bytes": "go_gc_heap_goal_bytes", - "/gc/heap/objects:objects": "go_gc_heap_objects_objects", - "/gc/heap/tiny/allocs:objects": "go_gc_heap_tiny_allocs_objects_total", - "/gc/limiter/last-enabled:gc-cycle": "go_gc_limiter_last_enabled_gc_cycle", - "/gc/pauses:seconds": "go_gc_pauses_seconds", - "/gc/stack/starting-size:bytes": "go_gc_stack_starting_size_bytes", - "/memory/classes/heap/free:bytes": "go_memory_classes_heap_free_bytes", - "/memory/classes/heap/objects:bytes": "go_memory_classes_heap_objects_bytes", - "/memory/classes/heap/released:bytes": "go_memory_classes_heap_released_bytes", - "/memory/classes/heap/stacks:bytes": "go_memory_classes_heap_stacks_bytes", - "/memory/classes/heap/unused:bytes": "go_memory_classes_heap_unused_bytes", - "/memory/classes/metadata/mcache/free:bytes": "go_memory_classes_metadata_mcache_free_bytes", - "/memory/classes/metadata/mcache/inuse:bytes": "go_memory_classes_metadata_mcache_inuse_bytes", - "/memory/classes/metadata/mspan/free:bytes": "go_memory_classes_metadata_mspan_free_bytes", - "/memory/classes/metadata/mspan/inuse:bytes": "go_memory_classes_metadata_mspan_inuse_bytes", - "/memory/classes/metadata/other:bytes": "go_memory_classes_metadata_other_bytes", - "/memory/classes/os-stacks:bytes": "go_memory_classes_os_stacks_bytes", - "/memory/classes/other:bytes": "go_memory_classes_other_bytes", - "/memory/classes/profiling/buckets:bytes": "go_memory_classes_profiling_buckets_bytes", - "/memory/classes/total:bytes": "go_memory_classes_total_bytes", - "/sched/gomaxprocs:threads": "go_sched_gomaxprocs_threads", - "/sched/goroutines:goroutines": "go_sched_goroutines_goroutines", - "/sched/latencies:seconds": "go_sched_latencies_seconds", - "/sync/mutex/wait/total:seconds": "go_sync_mutex_wait_total_seconds_total", - } - - expMetrics = map[string]string{ - "/sched/gomaxprocs:threads": "go_sched_gomaxprocs_threads", - } -) - -const expectedRuntimeMetricsCardinality = 89 diff --git a/prometheus/go_collector_metrics_go121_test.go b/prometheus/go_collector_metrics_go121_test.go deleted file mode 100644 index 217b04fc7..000000000 --- a/prometheus/go_collector_metrics_go121_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Code generated by gen_go_collector_metrics_set.go; DO NOT EDIT. -//go:generate go run gen_go_collector_metrics_set.go go1.21 - -//go:build go1.21 && !go1.22 -// +build go1.21,!go1.22 - -package prometheus - -var ( - expectedRuntimeMetrics = map[string]string{ - "/cgo/go-to-c-calls:calls": "go_cgo_go_to_c_calls_calls_total", - "/cpu/classes/gc/mark/assist:cpu-seconds": "go_cpu_classes_gc_mark_assist_cpu_seconds_total", - "/cpu/classes/gc/mark/dedicated:cpu-seconds": "go_cpu_classes_gc_mark_dedicated_cpu_seconds_total", - "/cpu/classes/gc/mark/idle:cpu-seconds": "go_cpu_classes_gc_mark_idle_cpu_seconds_total", - "/cpu/classes/gc/pause:cpu-seconds": "go_cpu_classes_gc_pause_cpu_seconds_total", - "/cpu/classes/gc/total:cpu-seconds": "go_cpu_classes_gc_total_cpu_seconds_total", - "/cpu/classes/idle:cpu-seconds": "go_cpu_classes_idle_cpu_seconds_total", - "/cpu/classes/scavenge/assist:cpu-seconds": "go_cpu_classes_scavenge_assist_cpu_seconds_total", - "/cpu/classes/scavenge/background:cpu-seconds": "go_cpu_classes_scavenge_background_cpu_seconds_total", - "/cpu/classes/scavenge/total:cpu-seconds": "go_cpu_classes_scavenge_total_cpu_seconds_total", - "/cpu/classes/total:cpu-seconds": "go_cpu_classes_total_cpu_seconds_total", - "/cpu/classes/user:cpu-seconds": "go_cpu_classes_user_cpu_seconds_total", - "/gc/cycles/automatic:gc-cycles": "go_gc_cycles_automatic_gc_cycles_total", - "/gc/cycles/forced:gc-cycles": "go_gc_cycles_forced_gc_cycles_total", - "/gc/cycles/total:gc-cycles": "go_gc_cycles_total_gc_cycles_total", - "/gc/gogc:percent": "go_gc_gogc_percent", - "/gc/gomemlimit:bytes": "go_gc_gomemlimit_bytes", - "/gc/heap/allocs-by-size:bytes": "go_gc_heap_allocs_by_size_bytes", - "/gc/heap/allocs:bytes": "go_gc_heap_allocs_bytes_total", - "/gc/heap/allocs:objects": "go_gc_heap_allocs_objects_total", - "/gc/heap/frees-by-size:bytes": "go_gc_heap_frees_by_size_bytes", - "/gc/heap/frees:bytes": "go_gc_heap_frees_bytes_total", - "/gc/heap/frees:objects": "go_gc_heap_frees_objects_total", - "/gc/heap/goal:bytes": "go_gc_heap_goal_bytes", - "/gc/heap/live:bytes": "go_gc_heap_live_bytes", - "/gc/heap/objects:objects": "go_gc_heap_objects_objects", - "/gc/heap/tiny/allocs:objects": "go_gc_heap_tiny_allocs_objects_total", - "/gc/limiter/last-enabled:gc-cycle": "go_gc_limiter_last_enabled_gc_cycle", - "/gc/pauses:seconds": "go_gc_pauses_seconds", - "/gc/scan/globals:bytes": "go_gc_scan_globals_bytes", - "/gc/scan/heap:bytes": "go_gc_scan_heap_bytes", - "/gc/scan/stack:bytes": "go_gc_scan_stack_bytes", - "/gc/scan/total:bytes": "go_gc_scan_total_bytes", - "/gc/stack/starting-size:bytes": "go_gc_stack_starting_size_bytes", - "/godebug/non-default-behavior/execerrdot:events": "go_godebug_non_default_behavior_execerrdot_events_total", - "/godebug/non-default-behavior/gocachehash:events": "go_godebug_non_default_behavior_gocachehash_events_total", - "/godebug/non-default-behavior/gocachetest:events": "go_godebug_non_default_behavior_gocachetest_events_total", - "/godebug/non-default-behavior/gocacheverify:events": "go_godebug_non_default_behavior_gocacheverify_events_total", - "/godebug/non-default-behavior/http2client:events": "go_godebug_non_default_behavior_http2client_events_total", - "/godebug/non-default-behavior/http2server:events": "go_godebug_non_default_behavior_http2server_events_total", - "/godebug/non-default-behavior/installgoroot:events": "go_godebug_non_default_behavior_installgoroot_events_total", - "/godebug/non-default-behavior/jstmpllitinterp:events": "go_godebug_non_default_behavior_jstmpllitinterp_events_total", - "/godebug/non-default-behavior/multipartmaxheaders:events": "go_godebug_non_default_behavior_multipartmaxheaders_events_total", - "/godebug/non-default-behavior/multipartmaxparts:events": "go_godebug_non_default_behavior_multipartmaxparts_events_total", - "/godebug/non-default-behavior/multipathtcp:events": "go_godebug_non_default_behavior_multipathtcp_events_total", - "/godebug/non-default-behavior/netedns0:events": "go_godebug_non_default_behavior_netedns0_events_total", - "/godebug/non-default-behavior/panicnil:events": "go_godebug_non_default_behavior_panicnil_events_total", - "/godebug/non-default-behavior/randautoseed:events": "go_godebug_non_default_behavior_randautoseed_events_total", - "/godebug/non-default-behavior/tarinsecurepath:events": "go_godebug_non_default_behavior_tarinsecurepath_events_total", - "/godebug/non-default-behavior/tlsmaxrsasize:events": "go_godebug_non_default_behavior_tlsmaxrsasize_events_total", - "/godebug/non-default-behavior/x509sha1:events": "go_godebug_non_default_behavior_x509sha1_events_total", - "/godebug/non-default-behavior/x509usefallbackroots:events": "go_godebug_non_default_behavior_x509usefallbackroots_events_total", - "/godebug/non-default-behavior/zipinsecurepath:events": "go_godebug_non_default_behavior_zipinsecurepath_events_total", - "/memory/classes/heap/free:bytes": "go_memory_classes_heap_free_bytes", - "/memory/classes/heap/objects:bytes": "go_memory_classes_heap_objects_bytes", - "/memory/classes/heap/released:bytes": "go_memory_classes_heap_released_bytes", - "/memory/classes/heap/stacks:bytes": "go_memory_classes_heap_stacks_bytes", - "/memory/classes/heap/unused:bytes": "go_memory_classes_heap_unused_bytes", - "/memory/classes/metadata/mcache/free:bytes": "go_memory_classes_metadata_mcache_free_bytes", - "/memory/classes/metadata/mcache/inuse:bytes": "go_memory_classes_metadata_mcache_inuse_bytes", - "/memory/classes/metadata/mspan/free:bytes": "go_memory_classes_metadata_mspan_free_bytes", - "/memory/classes/metadata/mspan/inuse:bytes": "go_memory_classes_metadata_mspan_inuse_bytes", - "/memory/classes/metadata/other:bytes": "go_memory_classes_metadata_other_bytes", - "/memory/classes/os-stacks:bytes": "go_memory_classes_os_stacks_bytes", - "/memory/classes/other:bytes": "go_memory_classes_other_bytes", - "/memory/classes/profiling/buckets:bytes": "go_memory_classes_profiling_buckets_bytes", - "/memory/classes/total:bytes": "go_memory_classes_total_bytes", - "/sched/gomaxprocs:threads": "go_sched_gomaxprocs_threads", - "/sched/goroutines:goroutines": "go_sched_goroutines_goroutines", - "/sched/latencies:seconds": "go_sched_latencies_seconds", - "/sync/mutex/wait/total:seconds": "go_sync_mutex_wait_total_seconds_total", - } - - expMetrics = map[string]string{ - "/gc/gogc:percent": "go_gc_gogc_percent", - "/gc/gomemlimit:bytes": "go_gc_gomemlimit_bytes", - "/sched/gomaxprocs:threads": "go_sched_gomaxprocs_threads", - } -) - -const expectedRuntimeMetricsCardinality = 115 diff --git a/prometheus/go_collector_metrics_go123_test.go b/prometheus/go_collector_metrics_go123_test.go index cb9d12dd2..0a0de1856 100644 --- a/prometheus/go_collector_metrics_go123_test.go +++ b/prometheus/go_collector_metrics_go123_test.go @@ -42,6 +42,7 @@ var ( "/gc/scan/stack:bytes": "go_gc_scan_stack_bytes", "/gc/scan/total:bytes": "go_gc_scan_total_bytes", "/gc/stack/starting-size:bytes": "go_gc_stack_starting_size_bytes", + "/godebug/non-default-behavior/allowmultiplevcs:events": "go_godebug_non_default_behavior_allowmultiplevcs_events_total", "/godebug/non-default-behavior/asynctimerchan:events": "go_godebug_non_default_behavior_asynctimerchan_events_total", "/godebug/non-default-behavior/execerrdot:events": "go_godebug_non_default_behavior_execerrdot_events_total", "/godebug/non-default-behavior/gocachehash:events": "go_godebug_non_default_behavior_gocachehash_events_total", @@ -105,4 +106,4 @@ var ( } ) -const expectedRuntimeMetricsCardinality = 168 +const expectedRuntimeMetricsCardinality = 169 diff --git a/prometheus/go_collector_metrics_go124_test.go b/prometheus/go_collector_metrics_go124_test.go new file mode 100644 index 000000000..6c5d124e1 --- /dev/null +++ b/prometheus/go_collector_metrics_go124_test.go @@ -0,0 +1,112 @@ +// Code generated by gen_go_collector_metrics_set.go; DO NOT EDIT. +//go:generate go run gen_go_collector_metrics_set.go go1.24 + +//go:build go1.24 && !go1.25 +// +build go1.24,!go1.25 + +package prometheus + +var ( + expectedRuntimeMetrics = map[string]string{ + "/cgo/go-to-c-calls:calls": "go_cgo_go_to_c_calls_calls_total", + "/cpu/classes/gc/mark/assist:cpu-seconds": "go_cpu_classes_gc_mark_assist_cpu_seconds_total", + "/cpu/classes/gc/mark/dedicated:cpu-seconds": "go_cpu_classes_gc_mark_dedicated_cpu_seconds_total", + "/cpu/classes/gc/mark/idle:cpu-seconds": "go_cpu_classes_gc_mark_idle_cpu_seconds_total", + "/cpu/classes/gc/pause:cpu-seconds": "go_cpu_classes_gc_pause_cpu_seconds_total", + "/cpu/classes/gc/total:cpu-seconds": "go_cpu_classes_gc_total_cpu_seconds_total", + "/cpu/classes/idle:cpu-seconds": "go_cpu_classes_idle_cpu_seconds_total", + "/cpu/classes/scavenge/assist:cpu-seconds": "go_cpu_classes_scavenge_assist_cpu_seconds_total", + "/cpu/classes/scavenge/background:cpu-seconds": "go_cpu_classes_scavenge_background_cpu_seconds_total", + "/cpu/classes/scavenge/total:cpu-seconds": "go_cpu_classes_scavenge_total_cpu_seconds_total", + "/cpu/classes/total:cpu-seconds": "go_cpu_classes_total_cpu_seconds_total", + "/cpu/classes/user:cpu-seconds": "go_cpu_classes_user_cpu_seconds_total", + "/gc/cycles/automatic:gc-cycles": "go_gc_cycles_automatic_gc_cycles_total", + "/gc/cycles/forced:gc-cycles": "go_gc_cycles_forced_gc_cycles_total", + "/gc/cycles/total:gc-cycles": "go_gc_cycles_total_gc_cycles_total", + "/gc/gogc:percent": "go_gc_gogc_percent", + "/gc/gomemlimit:bytes": "go_gc_gomemlimit_bytes", + "/gc/heap/allocs-by-size:bytes": "go_gc_heap_allocs_by_size_bytes", + "/gc/heap/allocs:bytes": "go_gc_heap_allocs_bytes_total", + "/gc/heap/allocs:objects": "go_gc_heap_allocs_objects_total", + "/gc/heap/frees-by-size:bytes": "go_gc_heap_frees_by_size_bytes", + "/gc/heap/frees:bytes": "go_gc_heap_frees_bytes_total", + "/gc/heap/frees:objects": "go_gc_heap_frees_objects_total", + "/gc/heap/goal:bytes": "go_gc_heap_goal_bytes", + "/gc/heap/live:bytes": "go_gc_heap_live_bytes", + "/gc/heap/objects:objects": "go_gc_heap_objects_objects", + "/gc/heap/tiny/allocs:objects": "go_gc_heap_tiny_allocs_objects_total", + "/gc/limiter/last-enabled:gc-cycle": "go_gc_limiter_last_enabled_gc_cycle", + "/gc/pauses:seconds": "go_gc_pauses_seconds", + "/gc/scan/globals:bytes": "go_gc_scan_globals_bytes", + "/gc/scan/heap:bytes": "go_gc_scan_heap_bytes", + "/gc/scan/stack:bytes": "go_gc_scan_stack_bytes", + "/gc/scan/total:bytes": "go_gc_scan_total_bytes", + "/gc/stack/starting-size:bytes": "go_gc_stack_starting_size_bytes", + "/godebug/non-default-behavior/allowmultiplevcs:events": "go_godebug_non_default_behavior_allowmultiplevcs_events_total", + "/godebug/non-default-behavior/asynctimerchan:events": "go_godebug_non_default_behavior_asynctimerchan_events_total", + "/godebug/non-default-behavior/execerrdot:events": "go_godebug_non_default_behavior_execerrdot_events_total", + "/godebug/non-default-behavior/gocachehash:events": "go_godebug_non_default_behavior_gocachehash_events_total", + "/godebug/non-default-behavior/gocachetest:events": "go_godebug_non_default_behavior_gocachetest_events_total", + "/godebug/non-default-behavior/gocacheverify:events": "go_godebug_non_default_behavior_gocacheverify_events_total", + "/godebug/non-default-behavior/gotestjsonbuildtext:events": "go_godebug_non_default_behavior_gotestjsonbuildtext_events_total", + "/godebug/non-default-behavior/gotypesalias:events": "go_godebug_non_default_behavior_gotypesalias_events_total", + "/godebug/non-default-behavior/http2client:events": "go_godebug_non_default_behavior_http2client_events_total", + "/godebug/non-default-behavior/http2server:events": "go_godebug_non_default_behavior_http2server_events_total", + "/godebug/non-default-behavior/httplaxcontentlength:events": "go_godebug_non_default_behavior_httplaxcontentlength_events_total", + "/godebug/non-default-behavior/httpmuxgo121:events": "go_godebug_non_default_behavior_httpmuxgo121_events_total", + "/godebug/non-default-behavior/httpservecontentkeepheaders:events": "go_godebug_non_default_behavior_httpservecontentkeepheaders_events_total", + "/godebug/non-default-behavior/installgoroot:events": "go_godebug_non_default_behavior_installgoroot_events_total", + "/godebug/non-default-behavior/multipartmaxheaders:events": "go_godebug_non_default_behavior_multipartmaxheaders_events_total", + "/godebug/non-default-behavior/multipartmaxparts:events": "go_godebug_non_default_behavior_multipartmaxparts_events_total", + "/godebug/non-default-behavior/multipathtcp:events": "go_godebug_non_default_behavior_multipathtcp_events_total", + "/godebug/non-default-behavior/netedns0:events": "go_godebug_non_default_behavior_netedns0_events_total", + "/godebug/non-default-behavior/panicnil:events": "go_godebug_non_default_behavior_panicnil_events_total", + "/godebug/non-default-behavior/randautoseed:events": "go_godebug_non_default_behavior_randautoseed_events_total", + "/godebug/non-default-behavior/randseednop:events": "go_godebug_non_default_behavior_randseednop_events_total", + "/godebug/non-default-behavior/rsa1024min:events": "go_godebug_non_default_behavior_rsa1024min_events_total", + "/godebug/non-default-behavior/tarinsecurepath:events": "go_godebug_non_default_behavior_tarinsecurepath_events_total", + "/godebug/non-default-behavior/tls10server:events": "go_godebug_non_default_behavior_tls10server_events_total", + "/godebug/non-default-behavior/tls3des:events": "go_godebug_non_default_behavior_tls3des_events_total", + "/godebug/non-default-behavior/tlsmaxrsasize:events": "go_godebug_non_default_behavior_tlsmaxrsasize_events_total", + "/godebug/non-default-behavior/tlsrsakex:events": "go_godebug_non_default_behavior_tlsrsakex_events_total", + "/godebug/non-default-behavior/tlsunsafeekm:events": "go_godebug_non_default_behavior_tlsunsafeekm_events_total", + "/godebug/non-default-behavior/winreadlinkvolume:events": "go_godebug_non_default_behavior_winreadlinkvolume_events_total", + "/godebug/non-default-behavior/winsymlink:events": "go_godebug_non_default_behavior_winsymlink_events_total", + "/godebug/non-default-behavior/x509keypairleaf:events": "go_godebug_non_default_behavior_x509keypairleaf_events_total", + "/godebug/non-default-behavior/x509negativeserial:events": "go_godebug_non_default_behavior_x509negativeserial_events_total", + "/godebug/non-default-behavior/x509rsacrt:events": "go_godebug_non_default_behavior_x509rsacrt_events_total", + "/godebug/non-default-behavior/x509usefallbackroots:events": "go_godebug_non_default_behavior_x509usefallbackroots_events_total", + "/godebug/non-default-behavior/x509usepolicies:events": "go_godebug_non_default_behavior_x509usepolicies_events_total", + "/godebug/non-default-behavior/zipinsecurepath:events": "go_godebug_non_default_behavior_zipinsecurepath_events_total", + "/memory/classes/heap/free:bytes": "go_memory_classes_heap_free_bytes", + "/memory/classes/heap/objects:bytes": "go_memory_classes_heap_objects_bytes", + "/memory/classes/heap/released:bytes": "go_memory_classes_heap_released_bytes", + "/memory/classes/heap/stacks:bytes": "go_memory_classes_heap_stacks_bytes", + "/memory/classes/heap/unused:bytes": "go_memory_classes_heap_unused_bytes", + "/memory/classes/metadata/mcache/free:bytes": "go_memory_classes_metadata_mcache_free_bytes", + "/memory/classes/metadata/mcache/inuse:bytes": "go_memory_classes_metadata_mcache_inuse_bytes", + "/memory/classes/metadata/mspan/free:bytes": "go_memory_classes_metadata_mspan_free_bytes", + "/memory/classes/metadata/mspan/inuse:bytes": "go_memory_classes_metadata_mspan_inuse_bytes", + "/memory/classes/metadata/other:bytes": "go_memory_classes_metadata_other_bytes", + "/memory/classes/os-stacks:bytes": "go_memory_classes_os_stacks_bytes", + "/memory/classes/other:bytes": "go_memory_classes_other_bytes", + "/memory/classes/profiling/buckets:bytes": "go_memory_classes_profiling_buckets_bytes", + "/memory/classes/total:bytes": "go_memory_classes_total_bytes", + "/sched/gomaxprocs:threads": "go_sched_gomaxprocs_threads", + "/sched/goroutines:goroutines": "go_sched_goroutines_goroutines", + "/sched/latencies:seconds": "go_sched_latencies_seconds", + "/sched/pauses/stopping/gc:seconds": "go_sched_pauses_stopping_gc_seconds", + "/sched/pauses/stopping/other:seconds": "go_sched_pauses_stopping_other_seconds", + "/sched/pauses/total/gc:seconds": "go_sched_pauses_total_gc_seconds", + "/sched/pauses/total/other:seconds": "go_sched_pauses_total_other_seconds", + "/sync/mutex/wait/total:seconds": "go_sync_mutex_wait_total_seconds_total", + } + + expMetrics = map[string]string{ + "/gc/gogc:percent": "go_gc_gogc_percent", + "/gc/gomemlimit:bytes": "go_gc_gomemlimit_bytes", + "/sched/gomaxprocs:threads": "go_sched_gomaxprocs_threads", + } +) + +const expectedRuntimeMetricsCardinality = 172 diff --git a/prometheus/graphite/bridge.go b/prometheus/graphite/bridge.go index 84eac0de9..c763797ca 100644 --- a/prometheus/graphite/bridge.go +++ b/prometheus/graphite/bridge.go @@ -307,7 +307,8 @@ func replaceInvalidRune(c rune) rune { if c == ' ' { return '.' } - if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || c == '-' || (c >= '0' && c <= '9')) { + // TODO: Apply De Morgan's law to the condition. Make sure to test the condition first. + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || c == '-' || (c >= '0' && c <= '9')) { //nolint:staticcheck return '_' } return c diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 1a279035b..c453b754a 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -1647,9 +1647,13 @@ func waitForCooldown(count uint64, counts *histogramCounts) { // atomicAddFloat adds the provided float atomically to another float // represented by the bit pattern the bits pointer is pointing to. func atomicAddFloat(bits *uint64, v float64) { - atomicUpdateFloat(bits, func(oldVal float64) float64 { - return oldVal + v - }) + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } } // atomicDecUint32 atomically decrements the uint32 p points to. See diff --git a/prometheus/internal/difflib.go b/prometheus/internal/difflib.go index 8b016355a..7bac0da33 100644 --- a/prometheus/internal/difflib.go +++ b/prometheus/internal/difflib.go @@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') { groups = append(groups, group) } return groups @@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) + _, err := fmt.Fprintf(buf, format, args...) return err } ws := func(s string) error { diff --git a/prometheus/internal/difflib_test.go b/prometheus/internal/difflib_test.go index e99c7cbfc..2279f4d8f 100644 --- a/prometheus/internal/difflib_test.go +++ b/prometheus/internal/difflib_test.go @@ -114,7 +114,7 @@ group } } -func ExampleGetUnifiedDiffCode() { +func ExampleGetUnifiedDiffString() { a := `one two three diff --git a/prometheus/internal/go_runtime_metrics.go b/prometheus/internal/go_runtime_metrics.go index f7f97ef92..d273b6640 100644 --- a/prometheus/internal/go_runtime_metrics.go +++ b/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/prometheus/labels.go b/prometheus/labels.go index c21911f29..5fe8d3b4d 100644 --- a/prometheus/labels.go +++ b/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/prometheus/metric.go b/prometheus/metric.go index 592eec3e2..76e59f128 100644 --- a/prometheus/metric.go +++ b/prometheus/metric.go @@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { case pb.Counter != nil: pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] case pb.Histogram != nil: + h := pb.Histogram for _, e := range m.exemplars { - // pb.Histogram.Bucket are sorted by UpperBound. - i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { - return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 || + len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) && + e.GetTimestamp() != nil { + h.Exemplars = append(h.Exemplars, e) + if len(h.Bucket) == 0 { + // Don't proceed to classic buckets if there are none. + continue + } + } + // h.Bucket are sorted by UpperBound. + i := sort.Search(len(h.Bucket), func(i int) bool { + return h.Bucket[i].GetUpperBound() >= e.GetValue() }) - if i < len(pb.Histogram.Bucket) { - pb.Histogram.Bucket[i].Exemplar = e + if i < len(h.Bucket) { + h.Bucket[i].Exemplar = e } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + CumulativeCount: proto.Uint64(h.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } - pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + h.Bucket = append(h.Bucket, b) } } default: @@ -227,6 +237,7 @@ type Exemplar struct { // Only last applicable exemplar is injected from the list. // For example for Counter it means last exemplar is injected. // For Histogram, it means last applicable exemplar for each bucket is injected. +// For a Native Histogram, all valid exemplars are injected. // // NewMetricWithExemplars works best with MustNewConstMetric and // MustNewConstHistogram, see example. diff --git a/prometheus/metric_test.go b/prometheus/metric_test.go index 629aa4f84..f6553c332 100644 --- a/prometheus/metric_test.go +++ b/prometheus/metric_test.go @@ -14,12 +14,16 @@ package prometheus import ( + "errors" + "fmt" "math" "testing" + "time" dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) func TestBuildFQName(t *testing.T) { @@ -90,3 +94,264 @@ func TestWithExemplarsMetric(t *testing.T) { } }) } + +func TestWithExemplarsNativeHistogramMetric(t *testing.T) { + t.Run("native histogram single exemplar", func(t *testing.T) { + // Create a constant histogram from values we got from a 3rd party telemetry system. + h := MustNewConstNativeHistogram( + NewDesc("http_request_duration_seconds", "A histogram of the HTTP request durations.", nil, nil), + 10, 12.1, map[int]int64{1: 7, 2: 1, 3: 2}, map[int]int64{}, 0, 2, 0.2, time.Date( + 2009, 11, 17, 20, 34, 58, 651387237, time.UTC)) + m := &withExemplarsMetric{Metric: h, exemplars: []*dto.Exemplar{ + {Value: proto.Float64(2000.0), Timestamp: timestamppb.New(time.Date(2009, 11, 17, 20, 34, 58, 3243244, time.UTC))}, + }} + metric := dto.Metric{} + if err := m.Write(&metric); err != nil { + t.Fatal(err) + } + if want, got := 1, len(metric.GetHistogram().Exemplars); want != got { + t.Errorf("want %v, got %v", want, got) + } + + for _, b := range metric.GetHistogram().Bucket { + if b.Exemplar != nil { + t.Error("Not expecting exemplar for bucket") + } + } + }) + t.Run("native histogram multiple exemplar", func(t *testing.T) { + // Create a constant histogram from values we got from a 3rd party telemetry system. + h := MustNewConstNativeHistogram( + NewDesc("http_request_duration_seconds", "A histogram of the HTTP request durations.", nil, nil), + 10, 12.1, map[int]int64{1: 7, 2: 1, 3: 2}, map[int]int64{}, 0, 2, 0.2, time.Date( + 2009, 11, 17, 20, 34, 58, 651387237, time.UTC)) + m := &withExemplarsMetric{Metric: h, exemplars: []*dto.Exemplar{ + {Value: proto.Float64(2000.0), Timestamp: timestamppb.New(time.Date(2009, 11, 17, 20, 34, 58, 3243244, time.UTC))}, + {Value: proto.Float64(1000.0), Timestamp: timestamppb.New(time.Date(2009, 11, 17, 20, 34, 59, 3243244, time.UTC))}, + }} + metric := dto.Metric{} + if err := m.Write(&metric); err != nil { + t.Fatal(err) + } + if want, got := 2, len(metric.GetHistogram().Exemplars); want != got { + t.Errorf("want %v, got %v", want, got) + } + + for _, b := range metric.GetHistogram().Bucket { + if b.Exemplar != nil { + t.Error("Not expecting exemplar for bucket") + } + } + }) + t.Run("native histogram exemplar without timestamp", func(t *testing.T) { + // Create a constant histogram from values we got from a 3rd party telemetry system. + h := MustNewConstNativeHistogram( + NewDesc("http_request_duration_seconds", "A histogram of the HTTP request durations.", nil, nil), + 10, 12.1, map[int]int64{1: 7, 2: 1, 3: 2}, map[int]int64{}, 0, 2, 0.2, time.Date( + 2009, 11, 17, 20, 34, 58, 651387237, time.UTC)) + m := MustNewMetricWithExemplars(h, Exemplar{ + Value: 1000.0, + }) + metric := dto.Metric{} + if err := m.Write(&metric); err != nil { + t.Fatal(err) + } + if want, got := 1, len(metric.GetHistogram().Exemplars); want != got { + t.Errorf("want %v, got %v", want, got) + } + if got := metric.GetHistogram().Exemplars[0].Timestamp; got == nil { + t.Errorf("Got nil timestamp") + } + + for _, b := range metric.GetHistogram().Bucket { + if b.Exemplar != nil { + t.Error("Not expecting exemplar for bucket") + } + } + }) + t.Run("nativehistogram metric exemplars should be available in both buckets and exemplars", func(t *testing.T) { + now := time.Now() + tcs := []struct { + Name string + Count uint64 + Sum float64 + PositiveBuckets map[int]int64 + NegativeBuckets map[int]int64 + ZeroBucket uint64 + NativeHistogramSchema int32 + NativeHistogramZeroThreshold float64 + CreatedTimestamp time.Time + Bucket []*dto.Bucket + Exemplars []Exemplar + Want *dto.Metric + }{ + { + Name: "test_metric", + Count: 6, + Sum: 7.4, + PositiveBuckets: map[int]int64{ + 0: 1, 2: 2, 4: 2, + }, + NegativeBuckets: map[int]int64{}, + ZeroBucket: 1, + + NativeHistogramSchema: 2, + NativeHistogramZeroThreshold: 2.938735877055719e-39, + CreatedTimestamp: now, + Bucket: []*dto.Bucket{ + { + CumulativeCount: PointOf(uint64(6)), + UpperBound: PointOf(float64(1)), + }, + { + CumulativeCount: PointOf(uint64(8)), + UpperBound: PointOf(float64(2)), + }, + { + CumulativeCount: PointOf(uint64(11)), + UpperBound: PointOf(float64(5)), + }, + { + CumulativeCount: PointOf(uint64(13)), + UpperBound: PointOf(float64(10)), + }, + }, + Exemplars: []Exemplar{ + { + Timestamp: now, + Value: 10, + }, + }, + Want: &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(6), + SampleSum: proto.Float64(7.4), + Schema: proto.Int32(2), + ZeroThreshold: proto.Float64(2.938735877055719e-39), + ZeroCount: proto.Uint64(1), + PositiveSpan: []*dto.BucketSpan{ + {Offset: proto.Int32(0), Length: proto.Uint32(5)}, + }, + PositiveDelta: []int64{1, -1, 2, -2, 2}, + Exemplars: []*dto.Exemplar{ + { + Value: PointOf(float64(10)), + Timestamp: timestamppb.New(now), + }, + }, + Bucket: []*dto.Bucket{ + { + CumulativeCount: PointOf(uint64(6)), + UpperBound: PointOf(float64(1)), + }, + { + CumulativeCount: PointOf(uint64(8)), + UpperBound: PointOf(float64(2)), + }, + { + CumulativeCount: PointOf(uint64(11)), + UpperBound: PointOf(float64(5)), + }, + { + CumulativeCount: PointOf(uint64(13)), + UpperBound: PointOf(float64(10)), + Exemplar: &dto.Exemplar{ + Timestamp: timestamppb.New(now), + Value: PointOf(float64(10)), + }, + }, + }, + CreatedTimestamp: timestamppb.New(now), + }, + }, + }, + } + + for _, tc := range tcs { + m, err := newNativeHistogramWithClassicBuckets(NewDesc(tc.Name, "None", []string{}, map[string]string{}), tc.Count, tc.Sum, tc.PositiveBuckets, tc.NegativeBuckets, tc.ZeroBucket, tc.NativeHistogramSchema, tc.NativeHistogramZeroThreshold, tc.CreatedTimestamp, tc.Bucket) + if err != nil { + t.Fail() + } + metricWithExemplar, err := NewMetricWithExemplars(m, tc.Exemplars[0]) + if err != nil { + t.Fail() + } + got := &dto.Metric{} + err = metricWithExemplar.Write(got) + if err != nil { + t.Fail() + } + + if !proto.Equal(tc.Want, got) { + t.Errorf("want histogram %q, got %q", tc.Want, got) + } + + } + }) +} + +func PointOf[T any](value T) *T { + return &value +} + +// newNativeHistogramWithClassicBuckets returns a Metric representing +// a native histogram that also has classic buckets. This is for testing purposes. +func newNativeHistogramWithClassicBuckets( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + schema int32, + zeroThreshold float64, + createdTimestamp time.Time, + // DummyNativeHistogram also defines buckets in the metric for testing + buckets []*dto.Bucket, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + fmt.Println("error", desc.err) + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum { + return nil, errors.New("invalid native histogram schema") + } + if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil { + return nil, err + } + + NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets) + PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets) + ret := &constNativeHistogram{ + desc: desc, + Histogram: dto.Histogram{ + CreatedTimestamp: timestamppb.New(createdTimestamp), + Schema: &schema, + ZeroThreshold: &zeroThreshold, + SampleCount: &count, + SampleSum: &sum, + + NegativeSpan: NegativeSpan, + NegativeDelta: NegativeDelta, + + PositiveSpan: PositiveSpan, + PositiveDelta: PositiveDelta, + + ZeroCount: proto.Uint64(zeroBucket), + + // DummyNativeHistogram also defines buckets in the metric + Bucket: buckets, + }, + labelPairs: MakeLabelPairs(desc, labelValues), + } + if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 { + ret.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } + return ret, nil +} diff --git a/prometheus/process_collector_darwin.go b/prometheus/process_collector_darwin.go index 50eb860a6..b32c95fa3 100644 --- a/prometheus/process_collector_darwin.go +++ b/prometheus/process_collector_darwin.go @@ -11,6 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build darwin && !ios + package prometheus import ( @@ -23,9 +25,9 @@ import ( "golang.org/x/sys/unix" ) -// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// errNotImplemented is returned by stub functions that replace cgo functions, when cgo // isn't available. -var notImplementedErr = errors.New("not implemented") +var errNotImplemented = errors.New("not implemented") type memoryInfo struct { vsize uint64 // Virtual memory size in bytes @@ -99,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if memInfo, err := getMemory(); err == nil { ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) - } else if !errors.Is(err, notImplementedErr) { + } else if !errors.Is(err, errNotImplemented) { // Don't report an error when support is not compiled in. c.reportError(ch, c.rss, err) c.reportError(ch, c.vsize, err) diff --git a/prometheus/process_collector_cgo_darwin.c b/prometheus/process_collector_mem_cgo_darwin.c similarity index 98% rename from prometheus/process_collector_cgo_darwin.c rename to prometheus/process_collector_mem_cgo_darwin.c index 1554f674d..d00a24315 100644 --- a/prometheus/process_collector_cgo_darwin.c +++ b/prometheus/process_collector_mem_cgo_darwin.c @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build darwin && cgo +//go:build darwin && !ios && cgo #include #include diff --git a/prometheus/process_collector_cgo_darwin.go b/prometheus/process_collector_mem_cgo_darwin.go similarity index 97% rename from prometheus/process_collector_cgo_darwin.go rename to prometheus/process_collector_mem_cgo_darwin.go index b375c3a77..9ac53f999 100644 --- a/prometheus/process_collector_cgo_darwin.go +++ b/prometheus/process_collector_mem_cgo_darwin.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build darwin && cgo +//go:build darwin && !ios && cgo package prometheus diff --git a/prometheus/process_collector_nocgo_darwin.go b/prometheus/process_collector_mem_nocgo_darwin.go similarity index 95% rename from prometheus/process_collector_nocgo_darwin.go rename to prometheus/process_collector_mem_nocgo_darwin.go index 516504731..378865129 100644 --- a/prometheus/process_collector_nocgo_darwin.go +++ b/prometheus/process_collector_mem_nocgo_darwin.go @@ -11,12 +11,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build darwin && !cgo +//go:build darwin && !ios && !cgo package prometheus func getMemory() (*memoryInfo, error) { - return nil, notImplementedErr + return nil, errNotImplemented } // describe returns all descriptions of the collector for Darwin. diff --git a/prometheus/process_collector_wasip1_js.go b/prometheus/process_collector_not_supported.go similarity index 95% rename from prometheus/process_collector_wasip1_js.go rename to prometheus/process_collector_not_supported.go index c68f7f851..7732b7f37 100644 --- a/prometheus/process_collector_wasip1_js.go +++ b/prometheus/process_collector_not_supported.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build wasip1 || js -// +build wasip1 js +//go:build wasip1 || js || ios +// +build wasip1 js ios package prometheus diff --git a/prometheus/process_collector_other.go b/prometheus/process_collector_procfsenabled.go similarity index 94% rename from prometheus/process_collector_other.go rename to prometheus/process_collector_procfsenabled.go index 9f4b130be..8074f70f5 100644 --- a/prometheus/process_collector_other.go +++ b/prometheus/process_collector_procfsenabled.go @@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if netstat, err := p.Netstat(); err == nil { var inOctets, outOctets float64 - if netstat.IpExt.InOctets != nil { - inOctets = *netstat.IpExt.InOctets + if netstat.InOctets != nil { + inOctets = *netstat.InOctets } - if netstat.IpExt.OutOctets != nil { - outOctets = *netstat.IpExt.OutOctets + if netstat.OutOctets != nil { + outOctets = *netstat.OutOctets } ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) diff --git a/prometheus/promhttp/http.go b/prometheus/promhttp/http.go index 28eed2672..fd3c76342 100644 --- a/prometheus/promhttp/http.go +++ b/prometheus/promhttp/http.go @@ -41,11 +41,11 @@ import ( "sync" "time" - "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp/internal" ) const ( @@ -65,7 +65,13 @@ const ( Zstd Compression = "zstd" ) -var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} +func defaultCompressionFormats() []Compression { + if internal.NewZstdWriter != nil { + return []Compression{Identity, Gzip, Zstd} + } else { + return []Compression{Identity, Gzip} + } +} var gzipPool = sync.Pool{ New: func() interface{} { @@ -138,7 +144,7 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO // Select compression formats to offer based on default or user choice. var compressions []string if !opts.DisableCompression { - offers := defaultCompressionFormats + offers := defaultCompressionFormats() if len(opts.OfferedCompressions) > 0 { offers = opts.OfferedCompressions } @@ -454,7 +460,7 @@ func httpError(rsp http.ResponseWriter, err error) { // negotiateEncodingWriter reads the Accept-Encoding header from a request and // selects the right compression based on an allow-list of supported -// compressions. It returns a writer implementing the compression and an the +// compressions. It returns a writer implementing the compression and the // correct value that the caller can set in the response header. func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { if len(compressions) == 0 { @@ -466,14 +472,12 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin switch selected { case "zstd": - // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. - z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) - if err != nil { - return nil, "", func() {}, err + if internal.NewZstdWriter == nil { + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) } - - z.Reset(rw) - return z, selected, func() { _ = z.Close() }, nil + writer, closeWriter, err := internal.NewZstdWriter(rw) + return writer, selected, closeWriter, err case "gzip": gz := gzipPool.Get().(*gzip.Writer) gz.Reset(rw) @@ -483,6 +487,6 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin return rw, selected, func() {}, nil default: // The content encoding was not implemented yet. - return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) } } diff --git a/prometheus/promhttp/http_test.go b/prometheus/promhttp/http_test.go index a763da469..189ee357c 100644 --- a/prometheus/promhttp/http_test.go +++ b/prometheus/promhttp/http_test.go @@ -30,6 +30,7 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/client_golang/prometheus" + _ "github.com/prometheus/client_golang/prometheus/promhttp/zstd" ) type errorCollector struct{} @@ -484,7 +485,7 @@ func TestInstrumentMetricHandlerWithCompression(t *testing.T) { func TestNegotiateEncodingWriter(t *testing.T) { var defaultCompressions []string - for _, comp := range defaultCompressionFormats { + for _, comp := range defaultCompressionFormats() { defaultCompressions = append(defaultCompressions, string(comp)) } diff --git a/prometheus/promhttp/instrument_server.go b/prometheus/promhttp/instrument_server.go index 356edb786..9332b0249 100644 --- a/prometheus/promhttp/instrument_server.go +++ b/prometheus/promhttp/instrument_server.go @@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { labels := prometheus.Labels{} - if !(code || method) { + if !code && !method { return labels } diff --git a/prometheus/promhttp/internal/compression.go b/prometheus/promhttp/internal/compression.go new file mode 100644 index 000000000..c5039590f --- /dev/null +++ b/prometheus/promhttp/internal/compression.go @@ -0,0 +1,21 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "io" +) + +// NewZstdWriter enables zstd write support if non-nil. +var NewZstdWriter func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error) diff --git a/prometheus/promhttp/option_test.go b/prometheus/promhttp/option_test.go index 158271857..1e0c47509 100644 --- a/prometheus/promhttp/option_test.go +++ b/prometheus/promhttp/option_test.go @@ -27,7 +27,7 @@ const ( CtxResolverKey key = iota ) -func ExampleInstrumentHandlerWithExtraMethods() { +func ExampleWithExtraMethods() { counter := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "api_requests_total", @@ -70,7 +70,7 @@ func ExampleInstrumentHandlerWithExtraMethods() { } } -func ExampleInstrumentHandlerWithLabelResolver() { +func ExampleWithLabelFromCtx() { counter := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "api_requests_total", diff --git a/prometheus/promhttp/zstd/zstd.go b/prometheus/promhttp/zstd/zstd.go new file mode 100644 index 000000000..7e355e338 --- /dev/null +++ b/prometheus/promhttp/zstd/zstd.go @@ -0,0 +1,47 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zstd activates support for zstd compression. +// To enable zstd compression support, import like this: +// +// import ( +// _ "github.com/prometheus/client_golang/prometheus/promhttp/zstd" +// ) +// +// This support is currently implemented via the github.com/klauspost/compress library, +// so importing this package requires linking and building that library. +// Once stdlib support is added to the Go standard library (https://github.com/golang/go/issues/62513), +// this package is expected to become a no-op, and zstd support will be enabled by default. +package zstd + +import ( + "io" + + "github.com/klauspost/compress/zstd" + + "github.com/prometheus/client_golang/prometheus/promhttp/internal" +) + +func init() { + // Enable zstd support + internal.NewZstdWriter = func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error) { + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented, and move this package to be a no-op / backfill for older go versions. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, func() {}, err + } + + z.Reset(rw) + return z, func() { _ = z.Close() }, nil + } +} diff --git a/prometheus/push/push.go b/prometheus/push/push.go index e524aa130..1eb35ac9a 100644 --- a/prometheus/push/push.go +++ b/prometheus/push/push.go @@ -182,7 +182,8 @@ func (p *Pusher) Error() error { // For convenience, this method returns a pointer to the Pusher itself. func (p *Pusher) Grouping(name, value string) *Pusher { if p.error == nil { - if !model.LabelName(name).IsValid() { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidLabelName(name) { p.error = fmt.Errorf("grouping label has invalid name: %s", name) return p } diff --git a/prometheus/registry_test.go b/prometheus/registry_test.go index b8505c7d2..12b09d623 100644 --- a/prometheus/registry_test.go +++ b/prometheus/registry_test.go @@ -37,6 +37,7 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "go.uber.org/goleak" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -1339,3 +1340,28 @@ func TestCheckMetricConsistency(t *testing.T) { } reg.Unregister(invalidCollector) } + +func TestGatherDoesNotLeakGoroutines(t *testing.T) { + // Use goleak to verify that no unexpected goroutines are leaked during the test. + defer goleak.VerifyNone(t) + + // Create a new Prometheus registry without any default collectors. + reg := prometheus.NewRegistry() + + // Register 100 simple Gauge metrics with distinct names and constant labels. + for i := 0; i < 100; i++ { + reg.MustRegister(prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "test_metric_" + string(rune(i)), + Help: "Test metric", + ConstLabels: prometheus.Labels{"id": string(rune(i))}, + })) + } + + // Call Gather repeatedly to simulate stress and check for potential goroutine leaks. + for i := 0; i < 1000; i++ { + _, err := reg.Gather() + if err != nil { + t.Fatalf("unexpected error from Gather: %v", err) + } + } +} diff --git a/prometheus/summary.go b/prometheus/summary.go index 76a9e12f4..ac5203c6f 100644 --- a/prometheus/summary.go +++ b/prometheus/summary.go @@ -471,9 +471,13 @@ func (s *noObjectivesSummary) Observe(v float64) { n := atomic.AddUint64(&s.countAndHotIdx, 1) hotCounts := s.counts[n>>63] - atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 { - return oldVal + v - }) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } // Increment count last as we take it as a signal that the observation // is complete. atomic.AddUint64(&hotCounts.count, 1) @@ -515,13 +519,14 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error { // Finally add all the cold counts to the new hot counts and reset the cold counts. atomic.AddUint64(&hotCounts.count, count) atomic.StoreUint64(&coldCounts.count, 0) - - // Use atomicUpdateFloat to update hotCounts.sumBits atomically. - atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 { - return oldVal + sum.GetSampleSum() - }) - atomic.StoreUint64(&coldCounts.sumBits, 0) - + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } return nil } diff --git a/prometheus/testutil/testutil.go b/prometheus/testutil/testutil.go index 1258508e4..80a4d7c35 100644 --- a/prometheus/testutil/testutil.go +++ b/prometheus/testutil/testutil.go @@ -262,7 +262,7 @@ func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNa // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { - var tp expfmt.TextParser + tp := expfmt.NewTextParser(model.UTF8Validation) notNormalized, err := tp.TextToMetricFamilies(reader) if err != nil { return nil, fmt.Errorf("converting reader to metric families failed: %w", err) diff --git a/prometheus/vec.go b/prometheus/vec.go index 2c808eece..487b46656 100644 --- a/prometheus/vec.go +++ b/prometheus/vec.go @@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { return false } - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) + return m.deleteByHashWithLabelValues(h, lvs, m.curry) } // Delete deletes the metric where the variable labels are the same as those @@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool { return false } - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) + return m.deleteByHashWithLabels(h, labels, m.curry) } // DeletePartialMatch deletes all metrics where the variable labels contain all of those @@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int { labels, closer := constrainLabels(m.desc, labels) defer closer() - return m.metricMap.deleteByLabels(labels, m.curry) + return m.deleteByLabels(labels, m.curry) } // Without explicit forwarding of Describe, Collect, Reset, those methods won't @@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil + return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } // GetMetricWith returns the Metric for the given Labels map (the label names @@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil + return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { diff --git a/prometheus/vec_test.go b/prometheus/vec_test.go index fdd3abc36..03223f2f6 100644 --- a/prometheus/vec_test.go +++ b/prometheus/vec_test.go @@ -183,7 +183,7 @@ func TestDeletePartialMatchWithConstraints(t *testing.T) { func testDeletePartialMatch(t *testing.T, baseVec *GaugeVec) { assertNoMetric := func(t *testing.T) { - if n := len(baseVec.metricMap.metrics); n != 0 { + if n := len(baseVec.metrics); n != 0 { t.Error("expected no metrics, got", n) } } @@ -301,7 +301,7 @@ func testMetricVec(t *testing.T, vec *GaugeVec) { } var total int - for _, metrics := range vec.metricMap.metrics { + for _, metrics := range vec.metrics { for _, metric := range metrics { total++ copy(pair[:], metric.values) @@ -336,7 +336,7 @@ func testMetricVec(t *testing.T, vec *GaugeVec) { vec.Reset() - if len(vec.metricMap.metrics) > 0 { + if len(vec.metrics) > 0 { t.Fatalf("reset failed") } } @@ -373,7 +373,7 @@ func testConstrainedMetricVec(t *testing.T, vec *GaugeVec, constrain func(string } var total int - for _, metrics := range vec.metricMap.metrics { + for _, metrics := range vec.metrics { for _, metric := range metrics { total++ copy(pair[:], metric.values) @@ -408,7 +408,7 @@ func testConstrainedMetricVec(t *testing.T, vec *GaugeVec, constrain func(string vec.Reset() - if len(vec.metricMap.metrics) > 0 { + if len(vec.metrics) > 0 { t.Fatalf("reset failed") } } @@ -506,7 +506,7 @@ func TestCurryVecWithConstraints(t *testing.T) { func testCurryVec(t *testing.T, vec *CounterVec) { assertMetrics := func(t *testing.T) { n := 0 - for _, m := range vec.metricMap.metrics { + for _, m := range vec.metrics { n += len(m) } if n != 2 { @@ -533,7 +533,7 @@ func testCurryVec(t *testing.T, vec *CounterVec) { } assertNoMetric := func(t *testing.T) { - if n := len(vec.metricMap.metrics); n != 0 { + if n := len(vec.metrics); n != 0 { t.Error("expected no metrics, got", n) } } @@ -703,7 +703,7 @@ func testCurryVec(t *testing.T, vec *CounterVec) { func testConstrainedCurryVec(t *testing.T, vec *CounterVec, constraint func(string) string) { assertMetrics := func(t *testing.T) { n := 0 - for _, m := range vec.metricMap.metrics { + for _, m := range vec.metrics { n += len(m) } if n != 2 { @@ -744,7 +744,7 @@ func testConstrainedCurryVec(t *testing.T, vec *CounterVec, constraint func(stri } assertNoMetric := func(t *testing.T) { - if n := len(vec.metricMap.metrics); n != 0 { + if n := len(vec.metrics); n != 0 { t.Error("expected no metrics, got", n) } } diff --git a/prometheus/wrap.go b/prometheus/wrap.go index 25da157f1..2ed128506 100644 --- a/prometheus/wrap.go +++ b/prometheus/wrap.go @@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // metric names that are standardized across applications, as that would break // horizontal monitoring, for example the metrics provided by the Go collector // (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, +// fact, those metrics are already prefixed with "go_" or "process_", // respectively.) // // Conflicts between Collectors registered through the original Registerer with @@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { } } +// WrapCollectorWith returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapCollectorWith can be useful to work with multiple instances of a third +// party library that does not expose enough flexibility on the lifecycle of its +// registered metrics. +// For example, let's say you have a foo.New(reg Registerer) constructor that +// registers metrics but never unregisters them, and you want to create multiple +// instances of foo.Foo with different labels. +// The way to achieve that, is to create a new Registry, pass it to foo.New, +// then use WrapCollectorWith to wrap that Registry with the desired labels and +// register that as a collector in your main Registry. +// Then you can un-register the wrapped collector effectively un-registering the +// metrics registered by foo.New. +func WrapCollectorWith(labels Labels, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + labels: labels, + } +} + +// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided prefix to the name of all Metrics it collects. +// +// See the documentation of WrapCollectorWith for more details on the use case. +func WrapCollectorWithPrefix(prefix string, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + prefix: prefix, + } +} + type wrappingRegisterer struct { wrappedRegisterer Registerer prefix string diff --git a/prometheus/wrap_test.go b/prometheus/wrap_test.go index f4a5cb392..36a309e4b 100644 --- a/prometheus/wrap_test.go +++ b/prometheus/wrap_test.go @@ -43,7 +43,7 @@ func toMetricFamilies(cs ...Collector) []*dto.MetricFamily { return out } -func TestWrap(t *testing.T) { +func TestWrapRegisterer(t *testing.T) { now := time.Now() nowFn := func() time.Time { return now } simpleCnt := NewCounter(CounterOpts{ @@ -306,28 +306,34 @@ func TestWrap(t *testing.T) { if !s.gatherFails && err != nil { t.Fatal("gathering failed:", err) } - if len(wantMF) != len(gotMF) { - t.Fatalf("Expected %d metricFamilies, got %d", len(wantMF), len(gotMF)) + assertEqualMFs(t, wantMF, gotMF) + }) + } +} + +func assertEqualMFs(t *testing.T, wantMF, gotMF []*dto.MetricFamily) { + t.Helper() + + if len(wantMF) != len(gotMF) { + t.Fatalf("Expected %d metricFamilies, got %d", len(wantMF), len(gotMF)) + } + for i := range gotMF { + if !proto.Equal(gotMF[i], wantMF[i]) { + var want, got []string + + for i, mf := range wantMF { + want = append(want, fmt.Sprintf("%3d: %s", i, mf)) } - for i := range gotMF { - if !proto.Equal(gotMF[i], wantMF[i]) { - var want, got []string - - for i, mf := range wantMF { - want = append(want, fmt.Sprintf("%3d: %s", i, mf)) - } - for i, mf := range gotMF { - got = append(got, fmt.Sprintf("%3d: %s", i, mf)) - } - - t.Fatalf( - "unexpected output of gathering:\n\nWANT:\n%s\n\nGOT:\n%s\n", - strings.Join(want, "\n"), - strings.Join(got, "\n"), - ) - } + for i, mf := range gotMF { + got = append(got, fmt.Sprintf("%3d: %s", i, mf)) } - }) + + t.Fatalf( + "unexpected output of gathering:\n\nWANT:\n%s\n\nGOT:\n%s\n", + strings.Join(want, "\n"), + strings.Join(got, "\n"), + ) + } } } @@ -339,3 +345,124 @@ func TestNil(t *testing.T) { t.Fatal("registering failed:", err) } } + +func TestWrapCollector(t *testing.T) { + t.Run("can be registered and un-registered", func(t *testing.T) { + inner := NewPedanticRegistry() + g := NewGauge(GaugeOpts{Name: "testing"}) + g.Set(42) + err := inner.Register(g) + if err != nil { + t.Fatal("registering failed:", err) + } + + wrappedWithLabels := WrapCollectorWith(Labels{"lbl": "1"}, inner) + wrappedWithPrefix := WrapCollectorWithPrefix("prefix", inner) + reg := NewPedanticRegistry() + err = reg.Register(wrappedWithLabels) + if err != nil { + t.Fatal("registering failed:", err) + } + err = reg.Register(wrappedWithPrefix) + if err != nil { + t.Fatal("registering failed:", err) + } + + gathered, err := reg.Gather() + if err != nil { + t.Fatal("gathering failed:", err) + } + + lg := NewGauge(GaugeOpts{Name: "testing", ConstLabels: Labels{"lbl": "1"}}) + lg.Set(42) + pg := NewGauge(GaugeOpts{Name: "prefixtesting"}) + pg.Set(42) + expected := toMetricFamilies(lg, pg) + assertEqualMFs(t, expected, gathered) + + if !reg.Unregister(wrappedWithLabels) { + t.Fatal("unregistering failed") + } + if !reg.Unregister(wrappedWithPrefix) { + t.Fatal("unregistering failed") + } + + gathered, err = reg.Gather() + if err != nil { + t.Fatal("gathering failed:", err) + } + if len(gathered) != 0 { + t.Fatalf("expected 0 metric families, got %d", len(gathered)) + } + }) + + t.Run("can wrap same collector twice", func(t *testing.T) { + inner := NewPedanticRegistry() + g := NewGauge(GaugeOpts{Name: "testing"}) + g.Set(42) + err := inner.Register(g) + if err != nil { + t.Fatal("registering failed:", err) + } + + wrapped := WrapCollectorWith(Labels{"lbl": "1"}, inner) + reg := NewPedanticRegistry() + err = reg.Register(wrapped) + if err != nil { + t.Fatal("registering failed:", err) + } + + wrapped2 := WrapCollectorWith(Labels{"lbl": "2"}, inner) + err = reg.Register(wrapped2) + if err != nil { + t.Fatal("registering failed:", err) + } + + gathered, err := reg.Gather() + if err != nil { + t.Fatal("gathering failed:", err) + } + + lg := NewGauge(GaugeOpts{Name: "testing", ConstLabels: Labels{"lbl": "1"}}) + lg.Set(42) + lg2 := NewGauge(GaugeOpts{Name: "testing", ConstLabels: Labels{"lbl": "2"}}) + lg2.Set(42) + expected := toMetricFamilies(lg, lg2) + assertEqualMFs(t, expected, gathered) + }) + + t.Run("can be registered again after un-registering", func(t *testing.T) { + inner := NewPedanticRegistry() + g := NewGauge(GaugeOpts{Name: "testing"}) + g.Set(42) + err := inner.Register(g) + if err != nil { + t.Fatal("registering failed:", err) + } + + wrapped := WrapCollectorWith(Labels{"lbl": "1"}, inner) + reg := NewPedanticRegistry() + err = reg.Register(wrapped) + if err != nil { + t.Fatal("registering failed:", err) + } + + if !reg.Unregister(wrapped) { + t.Fatal("unregistering failed") + } + err = reg.Register(wrapped) + if err != nil { + t.Fatal("registering failed:", err) + } + + gathered, err := reg.Gather() + if err != nil { + t.Fatal("gathering failed:", err) + } + + lg := NewGauge(GaugeOpts{Name: "testing", ConstLabels: Labels{"lbl": "1"}}) + lg.Set(42) + expected := toMetricFamilies(lg) + assertEqualMFs(t, expected, gathered) + }) +} diff --git a/supported_go_versions.txt b/supported_go_versions.txt index fc1f801ec..e0e8312dd 100644 --- a/supported_go_versions.txt +++ b/supported_go_versions.txt @@ -1,3 +1,2 @@ +1.24 1.23 -1.22 -1.21 diff --git a/tutorials/whatsup/go.mod b/tutorials/whatsup/go.mod index 5147c6572..45d4134d0 100644 --- a/tutorials/whatsup/go.mod +++ b/tutorials/whatsup/go.mod @@ -1,18 +1,14 @@ module github.com/prometheus/client_golang/tutorials/whatsup -go 1.22.0 - -toolchain go1.22.6 +go 1.23.0 require ( github.com/bwplotka/tracing-go v0.0.0-20230421061608-abdf862ceccd github.com/efficientgo/core v1.0.0-rc.3 github.com/efficientgo/e2e v0.14.1-0.20230421070206-d72d43f3b937 github.com/oklog/run v1.1.0 - github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.62.0 - github.com/stretchr/testify v1.10.0 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/common v0.64.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -25,18 +21,17 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect @@ -48,13 +43,12 @@ require ( go.opentelemetry.io/otel/sdk v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect - golang.org/x/net v0.34.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sys v0.29.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.25.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/grpc v1.69.4 // indirect - google.golang.org/protobuf v1.36.3 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect + google.golang.org/protobuf v1.36.6 // indirect ) diff --git a/tutorials/whatsup/go.sum b/tutorials/whatsup/go.sum index 4b39b343e..44ec46613 100644 --- a/tutorials/whatsup/go.sum +++ b/tutorials/whatsup/go.sum @@ -26,8 +26,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -37,8 +37,8 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -71,12 +71,12 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= +github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -118,22 +118,22 @@ go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -143,8 +143,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=