diff --git a/.circleci/config.yml b/.circleci/config.yml
deleted file mode 100644
index 6331986054..0000000000
--- a/.circleci/config.yml
+++ /dev/null
@@ -1,178 +0,0 @@
-version: 2.1
-
-orbs:
- aws-cli: circleci/aws-cli@1.3.0
- docker: circleci/docker@1.3.0
-
-executors:
- linuxgo:
- parameters:
- docker:
- - image: cimg/go:1.15
- - image: redis:6
-
-commands:
- go-build:
- parameters:
- os:
- description: Target operating system
- type: enum
- enum: ["linux", "darwin"]
- default: "linux"
- arch:
- description: Target architecture
- type: enum
- enum: ["amd64", "arm64"]
- default: "amd64"
- steps:
- - run: |
- GOOS=<< parameters.os >> \
- GOARCH=<< parameters.arch >> \
- go build -ldflags "-X main.BuildID=${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" \
- -o $GOPATH/bin/refinery-<< parameters.os >>-<< parameters.arch >> \
- ./cmd/refinery
-
-jobs:
- test:
- executor: linuxgo
- steps:
- - checkout
- - run:
- name: install dockerize
- command: wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && sudo tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
- environment:
- DOCKERIZE_VERSION: v0.3.0
- - run:
- name: Wait for redis
- command: dockerize -wait tcp://localhost:6379 -timeout 1m
- - run:
- name: go_test with race
- command: go test -tags race --race --timeout 60s -v ./...
- - run:
- name: go_test
- command: go test -tags all --timeout 60s -v ./...
-
- build:
- executor: linuxgo
- steps:
- - checkout
- - go-build:
- os: linux
- arch: amd64
- - go-build:
- os: linux
- arch: arm64
- - go-build:
- os: darwin
- arch: amd64
- - run:
- name: apt_get_update
- command: sudo apt-get -qq update
- - run:
- name: apt_get_install
- command: sudo apt-get install -y build-essential rpm ruby ruby-dev
- - run:
- name: gem_install
- command: sudo gem install fpm
- - run: mkdir -p ~/artifacts
- - run:
- name: build_deb_amd64
- command: ./build-pkg.sh -m amd64 -v "${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" -t deb && mv *.deb ~/artifacts
- - run:
- name: build_deb_arm64
- command: ./build-pkg.sh -m arm64 -v "${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" -t deb && mv *.deb ~/artifacts
- - run:
- name: build_rpm_amd64
- command: ./build-pkg.sh -m amd64 -v "${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" -t rpm && mv *.rpm ~/artifacts
- - run:
- name: copy_binaries
- command: cp $GOPATH/bin/refinery-* ~/artifacts
- - run: echo "finished builds" && find ~/artifacts -ls
- - persist_to_workspace:
- root: ~/
- paths:
- - artifacts
- - store_artifacts:
- path: ~/artifacts
-
- publish_github:
- docker:
- - image: cibuilds/github:0.13.0
- steps:
- - attach_workspace:
- at: ~/
- - run:
- name: "Publish Release on GitHub"
- command: |
- echo "about to publish to tag ${CIRCLE_TAG}"
- ls -l ~/artifacts/*
- ghr -draft -n ${CIRCLE_TAG} -t ${GITHUB_TOKEN} -u ${CIRCLE_PROJECT_USERNAME} -r ${CIRCLE_PROJECT_REPONAME} -c ${CIRCLE_SHA1} ${CIRCLE_TAG} ~/artifacts
-
- publish_s3:
- executor: aws-cli/default
- steps:
- - attach_workspace:
- at: ~/
- - aws-cli/setup:
- aws-access-key-id: AWS_ACCESS_KEY_ID
- aws-secret-access-key: AWS_SECRET_ACCESS_KEY
- aws-region: AWS_REGION
- - run:
- name: sync_s3_artifacts
- command: |
- version=${CIRCLE_TAG:1}
- if [[ -z "$version" ]] ; then version=${CIRCLE_SHA1:0:7}; fi
- aws s3 sync ~/artifacts s3://honeycomb-builds/honeycombio/refinery/$version/
-
-workflows:
- build:
- jobs:
- - test:
- filters:
- tags:
- only: /.*/
- - build:
- requires:
- - test
- filters:
- tags:
- only: /.*/
- - publish_github:
- context: Honeycomb Secrets for Public Repos
- requires:
- - build
- filters:
- tags:
- only: /^v.*/
- branches:
- ignore: /.*/
- - publish_s3:
- context: Honeycomb Secrets for Public Repos
- requires:
- - build
- filters:
- tags:
- only: /.*/
- branches:
- # Forked pull requests have CIRCLE_BRANCH set to pull/XXX
- ignore: /pull\/[0-9]+/
- - docker/publish:
- tag: latest
- extra_build_args: --build-arg BUILD_ID=${CIRCLE_SHA1:0:7}
- image: $CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
- requires:
- - build
- filters:
- branches:
- only: main
- - docker/publish:
- tag: latest,${CIRCLE_TAG:1}
- extra_build_args: --build-arg BUILD_ID=${CIRCLE_TAG:1}
- image: $CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
- requires:
- - build
- filters:
- tags:
- only: /^v.*/
- branches:
- ignore: /.*/
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
deleted file mode 100644
index 8dcecb488f..0000000000
--- a/.github/CODEOWNERS
+++ /dev/null
@@ -1 +0,0 @@
-* @honeycombio/integrations-team
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 0000000000..1b338b0d9d
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,76 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ "main" ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ "main" ]
+ schedule:
+ - cron: '28 11 * * 2'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'go' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Use only 'java' to analyze code written in Java, Kotlin or both
+ # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
+ # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v2
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+
+ # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
+ # queries: security-extended,security-and-quality
+
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v2
+
+ # âšī¸ Command-line programs to run using the OS shell.
+ # đ See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+
+ # If the Autobuild fails above, remove it and uncomment the following three lines.
+ # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
+
+ # - run: |
+ # echo "Run, Build Application using script"
+ # ./location_of_script_within_repo/buildscript.sh
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v2
+ with:
+ category: "/language:${{matrix.language}}"
diff --git a/.github/workflows/notify.yml b/.github/workflows/notify.yml
new file mode 100644
index 0000000000..920520f108
--- /dev/null
+++ b/.github/workflows/notify.yml
@@ -0,0 +1,22 @@
+name: Notify
+on:
+ push:
+ branches: [ "main" ]
+ pull_request:
+ branches: [ "main" ]
+ release:
+ types: [ published ]
+
+jobs:
+ notify:
+ name: Notify via Google Chat
+ runs-on: ubuntu-latest
+ steps:
+ - name: Google Chat Notification
+ uses: nakamuraos/google-chat-notifications@v2.0.1
+ with:
+ title: ${{ github.event_name }}
+ subtitle: ${{ github.event.head_commit.message }}
+ webhookUrl: ${{ secrets.GOOGLE_CHAT_WEBHOOK }}
+ status: ${{ job.status }}
+ if: always()
\ No newline at end of file
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000000..6454ef1fd4
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,74 @@
+on:
+ release:
+ types: [ created ]
+name: Handle Release
+jobs:
+ generate-deb:
+ name: Create debian package
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout the repository
+ uses: actions/checkout@v3
+ - uses: addnab/docker-run-action@v3
+ with:
+ image: golang:1.20.4-buster
+ options: -v ${{ github.workspace }}:${{ github.workspace }} --env IS_GITHUB_ACTION=true --env VERSION_TAG=${{ github.event.release.tag_name }}
+ run: |
+ cd ${{ github.workspace }}
+ /bin/bash build/vm/tracing-deb/script.sh
+ - name: Upload the artifacts
+ uses: skx/github-action-publish-binaries@master
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ args: "build/vm/tracing-deb/output/*"
+
+ generate-rpm:
+ name: Create RPM package
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout the repository
+ uses: actions/checkout@v3
+ - uses: addnab/docker-run-action@v3
+ with:
+ image: rockylinux:8
+ options: -v ${{ github.workspace }}:${{ github.workspace }} --env IS_GITHUB_ACTION=true --env VERSION_TAG=${{ github.event.release.tag_name }}
+ run: |
+ curl -L -O https://go.dev/dl/go1.20.4.linux-amd64.tar.gz
+ rm -rf /usr/local/go && tar -C /usr/local -xzf go1.20.4.linux-amd64.tar.gz
+ cd ${{ github.workspace }}
+ PATH=$PATH:/usr/local/go/bin /bin/bash build/vm/tracing-rpm/script.sh
+ mkdir -p ${{ github.workspace }}/output
+ cp -r /root/rpmbuild/RPMS/x86_64/* ${{ github.workspace }}/output
+ - name: Upload the artifacts
+ uses: skx/github-action-publish-binaries@master
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ args: "${{ github.workspace }}/output/*"
+
+ oci-container-image:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v2
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: LokeshOpsramp
+ password: ${{ secrets.SUPERSECRETPASSWORD }}
+ - name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v4
+ with:
+ images: ghcr.io/LokeshOpsramp/trace-proxy
+ tags: |
+ type=semver,pattern=${{ github.event.release.tag_name }}
+ - name: Build and push
+ uses: docker/build-push-action@v4
+ with:
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 76638ae4d0..e38a4ffbde 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,7 +11,10 @@
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
-refinery
-!/cmd/refinery
-test_redimem
-!/cmd/test_redimem
+dockerize*
+
+# IDE configs
+.idea/
+.vscode/*
+.history/
+*.vsix
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..bf46524ab7
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,5 @@
+# Code of Conduct
+
+This project has adopted the Honeycomb User Community Code of Conduct to clarify expected behavior in our community.
+
+https://www.honeycomb.io/honeycomb-user-community-code-of-conduct/
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000..9af4c28183
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,3 @@
+# Contributing Guide
+
+Please see our [general guide for OSS lifecycle and practices.](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md)
diff --git a/Dockerfile b/Dockerfile
index 0474fa666f..23ca560256 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,9 +1,32 @@
-FROM golang:alpine as builder
+FROM --platform=$BUILDPLATFORM golang:alpine as builder
+ARG TARGETOS
+ARG TARGETARCH
-RUN apk update && apk add --no-cache git ca-certificates && update-ca-certificates
+RUN apk update && apk add --no-cache git ca-certificates curl && update-ca-certificates
-ARG BUILD_ID=dev
+#Setting up tini
+ENV TINI_URL_ARM="https://coreupdate.central.arubanetworks.com/packages/tini-arm64"
+ENV TINI_ESUM_ARM="c3c8377b2b6bd62e8086be40ce967dd4a6910cec69b475992eff1800ec44b08e"
+ENV TINI_ESUM_AMD="57a120ebc06d16b3fae6a60b6b16da5a20711db41f8934c2089dea0d3eaa4f70"
+ENV TINI_URL_AMD="https://coreupdate.central.arubanetworks.com/packages/tini-amd64"
+RUN set -eux; \
+ case "${TARGETARCH}" in \
+ aarch64|arm64) \
+ ESUM=$TINI_ESUM_ARM; \
+ BINARY_URL=$TINI_URL_ARM; \
+ ;; \
+ amd64|x86_64) \
+ ESUM=$TINI_ESUM_AMD; \
+ BINARY_URL=$TINI_URL_AMD; \
+ ;; \
+ esac; \
+ \
+ curl -fL -o /usr/local/bin/tini "${BINARY_URL}"; \
+ echo "${ESUM} /usr/local/bin/tini" | sha256sum -c -; \
+ chmod +x /usr/local/bin/tini
+
+ARG BUILD_ID="15.0.0"
WORKDIR /app
ADD go.mod go.sum ./
@@ -14,14 +37,28 @@ RUN go mod verify
ADD . .
RUN CGO_ENABLED=0 \
- GOOS=linux \
- GOARCH=amd64 \
+ GOOS=${TARGETOS} \
+ GOARCH=${TARGETARCH} \
go build -ldflags "-X main.BuildID=${BUILD_ID}" \
- -o refinery \
- ./cmd/refinery
+ -o tracing-proxy \
+ ./cmd/tracing-proxy
+
+FROM --platform=$BUILDPLATFORM alpine:3.18
+
+RUN apk update && apk add --no-cache bash jq ca-certificates && update-ca-certificates
+
+COPY --from=builder /app/config_complete.yaml /etc/tracing-proxy/config.yaml
+COPY --from=builder /app/rules_complete.yaml /etc/tracing-proxy/rules.yaml
+
+COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy
+COPY --from=builder /usr/local/bin/tini /usr/local/bin/tini
-FROM scratch
+COPY --from=builder /app/start.sh /usr/bin/start.sh
-COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
+#ENTRYPOINT ["tini", \
+# "-F", "/config/data/infra_elasticache.json", \
+# "-F", "/config/data/infra_clusterinfo.json", \
+# "-F", "/config/data/config_tracing-proxy.json", \
+# "--"]
-COPY --from=builder /app/refinery /usr/bin/refinery
+CMD ["/usr/bin/start.sh"]
\ No newline at end of file
diff --git a/Dockerfile_debian b/Dockerfile_debian
new file mode 100644
index 0000000000..b47e5d07dc
--- /dev/null
+++ b/Dockerfile_debian
@@ -0,0 +1,64 @@
+FROM --platform=$BUILDPLATFORM golang:1.20.7-bullseye as builder
+ARG TARGETOS
+ARG TARGETARCH
+
+RUN apt update -y && apt install git ca-certificates curl -y && update-ca-certificates
+
+#Setting up tini
+ENV TINI_URL_ARM="https://coreupdate.central.arubanetworks.com/packages/tini-arm64"
+ENV TINI_ESUM_ARM="c3c8377b2b6bd62e8086be40ce967dd4a6910cec69b475992eff1800ec44b08e"
+ENV TINI_ESUM_AMD="57a120ebc06d16b3fae6a60b6b16da5a20711db41f8934c2089dea0d3eaa4f70"
+ENV TINI_URL_AMD="https://coreupdate.central.arubanetworks.com/packages/tini-amd64"
+
+RUN set -eux; \
+ case "${TARGETARCH}" in \
+ aarch64|arm64) \
+ ESUM=$TINI_ESUM_ARM; \
+ BINARY_URL=$TINI_URL_ARM; \
+ ;; \
+ amd64|x86_64) \
+ ESUM=$TINI_ESUM_AMD; \
+ BINARY_URL=$TINI_URL_AMD; \
+ ;; \
+ esac; \
+ \
+ curl -fL -o /usr/local/bin/tini "${BINARY_URL}"; \
+ echo "${ESUM} /usr/local/bin/tini" | sha256sum -c -; \
+ chmod +x /usr/local/bin/tini
+
+ARG BUILD_ID="15.0.0"
+WORKDIR /app
+
+ADD go.mod go.sum ./
+
+RUN go mod download
+RUN go mod verify
+
+ADD . .
+
+RUN CGO_ENABLED=0 \
+ GOOS=${TARGETOS} \
+ GOARCH=${TARGETARCH} \
+ go build -ldflags "-X main.BuildID=${BUILD_ID}" \
+ -o tracing-proxy \
+ ./cmd/tracing-proxy
+
+FROM --platform=$BUILDPLATFORM debian:bullseye-slim
+
+RUN apt update -y && apt install bash jq ca-certificates -y && update-ca-certificates
+
+COPY --from=builder /app/config_complete.yaml /etc/tracing-proxy/config.yaml
+COPY --from=builder /app/rules_complete.yaml /etc/tracing-proxy/rules.yaml
+
+COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy
+COPY --from=builder /usr/local/bin/tini /usr/local/bin/tini
+
+COPY --from=builder /app/start.sh /usr/bin/start.sh
+
+ENTRYPOINT ["tini", \
+ "-F", "/config/data/infra_elasticache.json", \
+ "-F", "/config/data/infra_clusterinfo.json", \
+ "-F", "/config/data/config_tracing-proxy.json", \
+ "--"]
+
+CMD ["/usr/bin/start.sh"]
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000000..d5ae8318b2
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,58 @@
+MAKEFLAGS += --warn-undefined-variables
+MAKEFLAGS += --no-builtin-rules
+MAKEFLAGS += --no-builtin-variables
+
+.PHONY: test
+#: run all tests
+test: test_with_race test_all
+
+.PHONY: test_with_race
+#: run only tests tagged with potential race conditions
+test_with_race: wait_for_redis
+ @echo
+ @echo "+++ testing - race conditions?"
+ @echo
+ go test -tags race --race --timeout 60s -v ./...
+
+.PHONY: test_all
+#: run all tests, but with no race condition detection
+test_all: wait_for_redis
+ @echo
+ @echo "+++ testing - all the tests"
+ @echo
+ go test -tags all --timeout 60s -v ./...
+
+.PHONY: wait_for_redis
+# wait for Redis to become available for test suite
+wait_for_redis: dockerize
+ @echo
+ @echo "+++ We need a Redis running to run the tests."
+ @echo
+ @echo "Checking with dockerize $(shell ./dockerize --version)"
+ @./dockerize -wait tcp://localhost:6379 -timeout 30s
+
+# ensure the dockerize command is available
+dockerize: dockerize.tar.gz
+ tar xzvmf dockerize.tar.gz
+
+HOST_OS := $(shell uname -s | tr A-Z a-z)
+# You can override this version from an environment variable.
+DOCKERIZE_VERSION ?= v0.6.1
+DOCKERIZE_RELEASE_ASSET := dockerize-${HOST_OS}-amd64-${DOCKERIZE_VERSION}.tar.gz
+
+dockerize.tar.gz:
+ @echo
+ @echo "+++ Retrieving dockerize tool for Redis readiness check."
+ @echo
+# make sure that file is available
+ sudo apt-get update
+ sudo apt-get -y install file
+ curl --location --silent --show-error \
+ --output dockerize.tar.gz \
+ https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/${DOCKERIZE_RELEASE_ASSET} \
+ && file dockerize.tar.gz | grep --silent gzip
+
+.PHONY: clean
+clean:
+ rm -f dockerize.tar.gz
+ rm -f dockerize
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000000..9b25a98729
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,13 @@
+Copyright (c) 2016-Present Honeycomb, Hound Technology, Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/OSSMETADATA b/OSSMETADATA
new file mode 100755
index 0000000000..b96d4a4dfa
--- /dev/null
+++ b/OSSMETADATA
@@ -0,0 +1 @@
+osslifecycle=active
diff --git a/README.md b/README.md
index ec797cffcb..8cc1db892d 100644
--- a/README.md
+++ b/README.md
@@ -1,121 +1,46 @@
-# Refinery - the Honeycomb Sampling Proxy
-
-
-
-**Beta Release** This is the initial draft. Please expect and help find bugs! :) Refinery [](https://circleci.com/gh/honeycombio/refinery)
+# Tracing-Proxy - Sampling Proxy For OpenTelemetry Traces
+
+[](https://goreportcard.com/report/github.com/opsramp/tracing-proxy)
+
+
+
+
+
+
+
+
## Purpose
-Refinery is a trace-aware sampling proxy. It collects spans emitted by your application, gathers them into traces, and examines them as a whole. This enables Refinery to make an intelligent sampling decision (whether to keep or discard) based on the entire trace. Buffering the spans allows you to use fields that might be present in different spans within the trace to influence the sampling decision. For example, the root span might have HTTP status code, whereas another span might have information on whether the request was served from a cache. Using Refinery, you can choose to keep only traces that had a 500 status code and were also served from a cache.
-
-## Setting up Refinery
-
-Refinery is designed to sit within your infrastructure where all sources of Honeycomb events (aka spans if you're doing tracing) can reach it. A standard deployment will have a cluster of servers running Refinery accessible via a load balancer. Refinery instances must be able to communicate with each other to concentrate traces on single servers.
-
-Within your application (or other Honeycomb event sources) you would configure the `API Host` to be http(s)://load-balancer/. Everything else remains the same (api key, dataset name, etc. - all that lives with the originating client).
+Tracing-Proxy is a trace-aware sampling proxy. It collects spans emitted by your application, gathers them into traces,
+and examines them as a whole. This enables the proxy to make an intelligent sampling decision (whether to keep or
+discard) based on the entire trace. Buffering the spans allows you to use fields that might be present in different
+spans within the trace to influence the sampling decision. For example, the root span might have HTTP status code,
+whereas another span might have information on whether the request was served from a cache. Using this proxy, you can
+choose to keep only traces that had a 500 status code and were also served from a cache.
### Minimum configuration
-The Refinery cluster should have at least 2 servers with 2GB RAM and access to 2 cores each.
-
-Additional RAM and CPU can be used by increasing configuration values to have a larger `CacheCapacity`. The cluster should be monitored for panics caused by running out of memory and scaled up (with either more servers or more RAM per server) when they occur.
-
-### Builds
-
-Refinery is built by [CircleCI](https://circleci.com/gh/honeycombio/refinery). Released versions of Refinery are available via Github under the Releases tab.
-
-## Configuration
-
-Configuration is done in one of two ways, either entirely by the config file or a combination of the config file and a Redis backend for managing the list of peers in the cluster. When using Redis, it only manages peers - everything else is managed by the config file.
-
-There are a few vital configuration options; read through this list and make sure all the variables are set.
-
-### File-based Config
-
-- API Keys: Refinery itself needs to be configured with a list of your API keys. This lets it respond with a 401/Unauthorized if an unexpected API key is used. You can configure Refinery to accept all API keys by setting it to `*` but then you will lose the authentication feedback to your application. Refinery will accept all events even if those events will eventually be rejected by the Honeycomb API due to an API key issue.
-
-- Goal Sample Rate and the list of fields you'd like to use to generate the keys off which sample rate is chosen. This is where the power of the proxy comes in - being able to dynamically choose sample rates based on the contents of the traces as they go by. There is an overall default and dataset-specific sections for this configuration, so that different datasets can have different sets of fields and goal sample rates.
-
-- Trace timeout - it should be set higher (maybe double?) the longest expected trace. If all of your traces complete in under 10 seconds, 30 is a good value here. If you have traces that can last minutes, it should be raised accordingly. Note that the trace doesn't _have_ to complete before this timer expires - but the sampling decision will be made at that time. So any spans that contain fields that you want to use to compute the sample rate should arrive before this timer expires. Additional spans that arrive after the timer has expired will be sent or dropped according to the sampling decision made when the timer expired.
-
-- Peer list: this is a list of all the other servers participating in this Refinery cluster. Traces are evenly distributed across all available servers, and any one trace must be concentrated on one server, regardless of which server handled the incoming spans. The peer list lets the cluster move spans around to the server that is handling the trace. (Not used in the Redis-based config.)
-
-- Buffer size: The `InMemCollector`'s `CacheCapacity` setting determines how many in-flight traces you can have. This should be large enough to avoid overflow. Some multiple (2x, 3x) the total number of in-flight traces you expect is a good place to start. If it's too low you will see the `collect_cache_buffer_overrun` metric increment. If you see that, you should increase the size of the buffer.
-
-There are a few components of Refinery with multiple implementations; the config file lets you choose which you'd like. As an example, there are two logging implementations - one that uses `logrus` and sends logs to STDOUT and a `honeycomb` implementation that sends the log messages to a Honeycomb dataset instead. Components with multiple implementations have one top level config item that lets you choose which implementation to use and then a section further down with additional config options for that choice (for example, the Honeycomb logger requires an API key).
-
-When configuration changes, send Refinery a USR1 signal and it will re-read the configuration.
-
-### Redis-based Config
-
-In the Redis-based config mode, all config options _except_ peer management are still handled by the config file. Only coordinating the list of peers in the Refinery cluster is managed with Redis.
-
-To enable the redis-based config:
-
-- set PeerManagement.Type in the config file to "redis"
-
-When launched in redis-config mode, Refinery needs a redis host to use for managing the list of peers in the Refinery cluster. This hostname and port can be specified in one of two ways:
-
-- set the `REFINERY_REDIS_HOST` environment variable
-- set the `RedisHost` field in the config file
-
-The redis host should be a hostname and a port, for example `redis.mydomain.com:6379`. The example config file has `localhost:6379` which obviously will not work with more than one host.
+The Tracing-Proxy cluster should have at least 2 servers with 2GB RAM and access to 2 cores each.
-## How sampling decisions are made
-
-In the configuration file, you can choose from a few sampling methods and specify options for each. The `DynamicSampler` is the most interesting and most commonly used. It uses the `AvgSampleRate` algorithm from the [`dynsampler-go`](https://github.com/honeycombio/dynsampler-go) package. Briefly described, you configure Refinery to examine the trace for a set of fields (for example, `request.status_code` and `request.method`). It collects all the values found in those fields anywhere in the trace (eg "200" and "GET") together into a key it hands to the dynsampler. The dynsampler code will look at the frequency that key appears during the previous 30 seconds (or other value set by the `ClearFrequencySec` setting) and use that to hand back a desired sample rate. More frequent keys are sampled more heavily, so that an even distribution of traffic across the keyspace is represented in Honeycomb.
-
-By selecting fields well, you can drop significant amounts of traffic while still retaining good visibility into the areas of traffic that interest you. For example, if you want to make sure you have a complete list of all URL handlers invoked, you would add the URL (or a normalized form) as one of the fields to include. Be careful in your selection though, because if the combination of fields cretes a unique key each time, you won't sample out any traffic. Because of this it is not effective to use fields that have unique values (like a UUID) as one of the sampling fields. Each field included should ideally have values that appear many times within any given 30 second window in order to effectively turn in to a sample rate.
-
-For more detail on how this algorithm works, please refer to the `dynsampler` package itself.
-
-## Dry Run Mode
-
-When getting started with Refinery or when updating sampling rules, it may be helpful to verify that the rules are working as expected before you start dropping traffic. By enabling dry run mode, all spans in each trace will be marked with the sampling decision in a field called `refinery_kept`. All traces will be sent to Honeycomb regardless of the sampling decision. You can then run queries in Honeycomb on this field to check your results and verify that the rules are working as intended. Enable dry run mode by adding `DryRun = true` in your configuration, as noted in `rules_complete.toml`.
-
-When dry run mode is enabled, the metric `trace_send_kept` will increment for each trace, and the metric for `trace_send_dropped` will remain 0, reflecting that we are sending all traces to Honeycomb.
+Additional RAM and CPU can be used by increasing configuration values to have a larger `CacheCapacity`. The cluster
+should be monitored for panics caused by running out of memory and scaled up (with either more servers or more RAM per
+server) when they occur.
## Scaling Up
-Refinery uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces exceeds the size of the buffer, things will start to go wrong. If you have stastics configured, a counter named `collect_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the throughput of traces (eg traces / second started) and multiply that by the maximum duration of a trace (say, 3 seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom.
-
-Determining the number of machines necessary in the cluster is not an exact science, and is best influenced by watching for buffer overruns. But for a rough heuristic, count on a single machine using about 2G of memory to handle 5000 incoming events and tracking 500 sub-second traces per second (for each full trace lasting less than a second and an average size of 10 spans per trace).
-
-## Understanding Regular Operation
-
-Refinery emits a number of metrics to give some indication about the health of the process. These metrics can be exposed to Prometheus or sent up to Honeycomb. The interesting ones to watch are:
-
-- Sample rates: how many traces are kept / dropped, and what does the sample rate distribution look like?
-- [incoming|peer]_router_\*: how many events (no trace info) vs. spans (have trace info) have been accepted, and how many sent on to peers?
-- collect_cache_buffer_overrun: this should remain zero; a positive value indicates the need to grow the size of the collector's circular buffer (via configuration `CacheCapacity`).
-- process_uptime_seconds: records the uptime of each process; look for unexpected restarts as a key towards memory constraints.
-
-## Troubleshooting
-
-The default logging level of `warn` is almost entirely silent. The `debug` level emits too much data to be used in production, but contains excellent information in a pre-production enviromnent. Setting the logging level to `debug` during initial configuration will help understand what's working and what's not, but when traffic volumes increase it should be set to `warn`.
-
-## Restarts
-
-Refinery does not yet buffer traces or sampling decisions to disk. When you restart the process all in-flight traces will be flushed (sent upstream to Honeycomb), but you will lose the record of past trace decisions. When started back up, it will start with a clean slate.
-
-## Architecture of Refinery itself (for contributors)
-
-Within each directory, the interface the dependency exports is in the file with the same name as the directory and then (for the most part) each of the other files are alternative implementations of that interface. For example, in `logger`, `/logger/logger.go` contains the interface definition and `logger/honeycomb.go` contains the implementation of the `logger` interface that will send logs to Honeycomb.
-
-`main.go` sets up the app and makes choices about which versions of dependency implementations to use (eg which logger, which sampler, etc.) It starts up everything and then launches `App`
-
-`app/app.go` is the main control point. When its `Start` function ends, the program shuts down. It launches two `Router`s which listen for incoming events.
-
-`route/route.go` listens on the network for incoming traffic. There are two routers running and they handle different types of incoming traffic: events coming from the outside world (the `incoming` router) and events coming from another member of the Refinery cluster (`peer` traffic). Once it gets an event, it decides where it should go next: is this incoming request an event (or batch of events), and if so, does it have a trace ID? Everything that is not an event or an event that does not have a trace ID is immediately handed to `transmission` to be forwarded on to Honeycomb. If it is an event with a trace ID, the router extracts the trace ID and then uses the `sharder` to decide which member of the Refinery cluster should handle this trace. If it's a peer, the event will be forwarded to that peer. If it's us, the event will be transformed into an internal representation and handed to the `collector` to bundle spans into traces.
-
-`collect/collect.go` the collector is responsible for bundling spans together into traces and deciding when to send them to Honeycomb or if they should be dropped. The first time a trace ID is seen, the collector starts a timer. If the root span (aka a span with a trace ID and no parent ID) arrives before the timer expires, then the trace is considered complete. The trace is sent and the timer is canceled. If the timer expires before the root span arrives, the trace will be sent whether or not it is complete. Just before sending, the collector asks the `sampler` for a sample rate and whether or not to keep the trace. The collector obeys this sampling decision and records it (the record is applied to any spans that may come in as part of the trace after the decision has been made). After making the sampling decision, if the trace is to be kept, it is passed along to the `transmission` for actual sending.
-
-`transmit/transmit.go` is a wrapper around the HTTP interactions with the Honeycomb API. It handles batching events together and sending them upstream.
-
-`logger` and `metrics` are for managing the logs and metrics that Refinery itself produces.
-
-`sampler` contains algorithms to compute sample rates based on the traces provided.
-
-`sharder` determines which peer in a clustered Refinery config is supposed to handle and individual trace.
-
-`types` contains a few type definitions that are used to hand data in between packages.
+Tracing-Proxy uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use
+shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces
+exceeds the size of the buffer, things will start to go wrong. If you have statistics configured, a counter
+named `collector_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that
+traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated
+as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be
+inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size
+of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the
+throughput of traces (e.g. traces / second started) and multiply that by the maximum duration of a trace (say, 3
+seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom.
+
+Determining the number of machines necessary in the cluster is not an exact science, and is best influenced by watching
+for buffer overruns. But for a rough heuristic, count on a single machine using about 2G of memory to handle 5000
+incoming events and tracking 500 sub-second traces per second (for each full trace lasting less than a second and an
+average size of 10 spans per trace).
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000000..c0ce73b5ca
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,3 @@
+# Reporting Security Issues
+
+If you discover a security vulnerability, please open an issue with label `type: security`.
diff --git a/SUPPORT.md b/SUPPORT.md
new file mode 100644
index 0000000000..9164e0642d
--- /dev/null
+++ b/SUPPORT.md
@@ -0,0 +1,3 @@
+# How to Get Help
+
+This project uses GitHub issues to track bugs, feature requests, and questions about using the project. Please search for existing issues before filing a new one.
diff --git a/app/app.go b/app/app.go
index d6731d14e3..07e4342917 100644
--- a/app/app.go
+++ b/app/app.go
@@ -1,22 +1,25 @@
package app
import (
- "github.com/honeycombio/refinery/collect"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/route"
+ "github.com/opsramp/tracing-proxy/collect"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/route"
+ "net/http"
)
+var OpsrampToken string
+
type App struct {
Config config.Config `inject:""`
Logger logger.Logger `inject:""`
IncomingRouter route.Router `inject:"inline"`
PeerRouter route.Router `inject:"inline"`
Collector collect.Collector `inject:""`
- Metrics metrics.Metrics `inject:""`
-
- // Version is the build ID for Refinery so that the running process may answer
+ Metrics metrics.Metrics `inject:"metrics"`
+ Client http.Client
+ // Version is the build ID for tracing-proxy so that the running process may answer
// requests for the version
Version string
}
@@ -26,7 +29,6 @@ type App struct {
// program will exit.
func (a *App) Start() error {
a.Logger.Debug().Logf("Starting up App...")
-
a.IncomingRouter.SetVersion(a.Version)
a.PeerRouter.SetVersion(a.Version)
@@ -34,7 +36,10 @@ func (a *App) Start() error {
// and external sources
a.IncomingRouter.LnS("incoming")
a.PeerRouter.LnS("peer")
-
+ a.Metrics.RegisterWithDescriptionLabels("collector_info", "gauge", "Version Of Tracing-Proxy Running", []string{"version"})
+ a.Metrics.GaugeWithLabels("collector_info", map[string]string{
+ "version": a.Version,
+ }, 1)
return nil
}
diff --git a/app/app_test.go b/app/app_test.go
deleted file mode 100644
index eb89bb6ac2..0000000000
--- a/app/app_test.go
+++ /dev/null
@@ -1,646 +0,0 @@
-// +build all race
-
-package app
-
-import (
- "bytes"
- "compress/gzip"
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/http/httptest"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/facebookgo/inject"
- "github.com/facebookgo/startstop"
- "github.com/klauspost/compress/zstd"
- "github.com/stretchr/testify/assert"
- "gopkg.in/alexcesaro/statsd.v2"
-
- "github.com/honeycombio/libhoney-go"
- "github.com/honeycombio/libhoney-go/transmission"
- "github.com/honeycombio/refinery/collect"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/internal/peer"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/sample"
- "github.com/honeycombio/refinery/sharder"
- "github.com/honeycombio/refinery/transmit"
-)
-
-type countingWriterSender struct {
- transmission.WriterSender
-
- count int
- target int
- ch chan struct{}
- mutex sync.Mutex
-}
-
-func (w *countingWriterSender) Add(ev *transmission.Event) {
- w.WriterSender.Add(ev)
-
- w.mutex.Lock()
- defer w.mutex.Unlock()
-
- w.count++
- if w.ch != nil && w.count >= w.target {
- close(w.ch)
- w.ch = nil
- }
-}
-
-func (w *countingWriterSender) resetCount() {
- w.mutex.Lock()
- w.count = 0
- w.mutex.Unlock()
-}
-
-func (w *countingWriterSender) waitForCount(t testing.TB, target int) {
- w.mutex.Lock()
- if w.count >= target {
- w.mutex.Unlock()
- return
- }
-
- ch := make(chan struct{})
- w.ch = ch
- w.target = target
- w.mutex.Unlock()
-
- select {
- case <-ch:
- case <-time.After(10 * time.Second):
- t.Errorf("timed out waiting for %d events", target)
- }
-}
-
-type testPeers struct {
- peers []string
-}
-
-func (p *testPeers) GetPeers() ([]string, error) {
- return p.peers, nil
-}
-
-func (p *testPeers) RegisterUpdatedPeersCallback(callback func()) {
-}
-
-func newStartedApp(
- t testing.TB,
- libhoneyT transmission.Sender,
- basePort int,
- peers peer.Peers,
-) (*App, inject.Graph) {
- c := &config.MockConfig{
- GetSendDelayVal: 0,
- GetTraceTimeoutVal: 10 * time.Millisecond,
- GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1},
- SendTickerVal: 2 * time.Millisecond,
- PeerManagementType: "file",
- GetUpstreamBufferSizeVal: 10000,
- GetPeerBufferSizeVal: 10000,
- GetListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort),
- GetPeerListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort+1),
- GetAPIKeysVal: []string{"KEY"},
- GetHoneycombAPIVal: "http://api.honeycomb.io",
- GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{CacheCapacity: 10000},
- }
-
- var err error
- if peers == nil {
- peers, err = peer.NewPeers(c)
- assert.NoError(t, err)
- }
-
- a := App{}
-
- lgr := &logger.LogrusLogger{
- Config: c,
- }
- lgr.SetLevel("error")
- lgr.Start()
-
- // TODO use real metrics
- metricsr := &metrics.MockMetrics{}
- metricsr.Start()
-
- collector := &collect.InMemCollector{
- BlockOnAddSpan: true,
- }
-
- peerList, err := peers.GetPeers()
- assert.NoError(t, err)
-
- var shrdr sharder.Sharder
- if len(peerList) > 1 {
- shrdr = &sharder.DeterministicSharder{}
- } else {
- shrdr = &sharder.SingleServerSharder{}
- }
-
- samplerFactory := &sample.SamplerFactory{}
-
- upstreamClient, err := libhoney.NewClient(libhoney.ClientConfig{
- Transmission: libhoneyT,
- })
- assert.NoError(t, err)
-
- sdPeer, _ := statsd.New(statsd.Prefix("refinery.peer"))
- peerClient, err := libhoney.NewClient(libhoney.ClientConfig{
- Transmission: &transmission.Honeycomb{
- MaxBatchSize: 500,
- BatchTimeout: libhoney.DefaultBatchTimeout,
- MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches,
- PendingWorkCapacity: uint(c.GetPeerBufferSize()),
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 3 * time.Second,
- }).Dial,
- },
- BlockOnSend: true,
- DisableGzipCompression: true,
- EnableMsgpackEncoding: true,
- Metrics: sdPeer,
- },
- })
- assert.NoError(t, err)
-
- var g inject.Graph
- err = g.Provide(
- &inject.Object{Value: c},
- &inject.Object{Value: peers},
- &inject.Object{Value: lgr},
- &inject.Object{Value: http.DefaultTransport, Name: "upstreamTransport"},
- &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: upstreamClient, Name: "upstream_"}, Name: "upstreamTransmission"},
- &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: peerClient, Name: "peer_"}, Name: "peerTransmission"},
- &inject.Object{Value: shrdr},
- &inject.Object{Value: collector},
- &inject.Object{Value: metricsr},
- &inject.Object{Value: "test", Name: "version"},
- &inject.Object{Value: samplerFactory},
- &inject.Object{Value: &a},
- )
- assert.NoError(t, err)
-
- err = g.Populate()
- assert.NoError(t, err)
-
- err = startstop.Start(g.Objects(), nil)
- assert.NoError(t, err)
-
- // Racy: wait just a moment for ListenAndServe to start up.
- time.Sleep(10 * time.Millisecond)
- return &a, g
-}
-
-func post(t testing.TB, req *http.Request) {
- resp, err := httpClient.Do(req)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
-}
-
-func TestAppIntegration(t *testing.T) {
- t.Parallel()
-
- var out bytes.Buffer
- _, graph := newStartedApp(t, &transmission.WriterSender{W: &out}, 10000, nil)
-
- // Send a root span, it should be sent in short order.
- req := httptest.NewRequest(
- "POST",
- "http://localhost:10000/1/batch/dataset",
- strings.NewReader(`[{"data":{"trace.trace_id":"1","foo":"bar"}}]`),
- )
- req.Header.Set("X-Honeycomb-Team", "KEY")
- req.Header.Set("Content-Type", "application/json")
-
- resp, err := http.DefaultTransport.RoundTrip(req)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
- resp.Body.Close()
-
- err = startstop.Stop(graph.Objects(), nil)
- assert.NoError(t, err)
-
- // Wait for span to be sent.
- deadline := time.After(time.Second)
- for {
- if out.Len() > 62 {
- break
- }
- select {
- case <-deadline:
- t.Error("timed out waiting for output")
- return
- case <-time.After(time.Millisecond):
- }
- }
- assert.Equal(t, `{"data":{"foo":"bar","trace.trace_id":"1"},"dataset":"dataset"}`+"\n", out.String())
-}
-
-func TestPeerRouting(t *testing.T) {
- t.Parallel()
-
- peers := &testPeers{
- peers: []string{
- "http://localhost:11001",
- "http://localhost:11003",
- },
- }
-
- var apps [2]*App
- var addrs [2]string
- var senders [2]*transmission.MockSender
- for i := range apps {
- var graph inject.Graph
- basePort := 11000 + (i * 2)
- senders[i] = &transmission.MockSender{}
- apps[i], graph = newStartedApp(t, senders[i], basePort, peers)
- defer startstop.Stop(graph.Objects(), nil)
-
- addrs[i] = "localhost:" + strconv.Itoa(basePort)
- }
-
- // Deliver to host 1, it should be passed to host 0 and emitted there.
- req, err := http.NewRequest(
- "POST",
- "http://localhost:11002/1/batch/dataset",
- nil,
- )
- assert.NoError(t, err)
- req.Header.Set("X-Honeycomb-Team", "KEY")
- req.Header.Set("Content-Type", "application/json")
-
- blob := `[` + string(spans[0]) + `]`
- req.Body = ioutil.NopCloser(strings.NewReader(blob))
- post(t, req)
- assert.Eventually(t, func() bool {
- return len(senders[0].Events()) == 1
- }, 2*time.Second, 2*time.Millisecond)
-
- expectedEvent := &transmission.Event{
- APIKey: "KEY",
- Dataset: "dataset",
- SampleRate: 2,
- APIHost: "http://api.honeycomb.io",
- Timestamp: now,
- Data: map[string]interface{}{
- "trace.trace_id": "1",
- "trace.span_id": "0",
- "trace.parent_id": "0000000000",
- "key": "value",
- "field0": float64(0),
- "field1": float64(1),
- "field2": float64(2),
- "field3": float64(3),
- "field4": float64(4),
- "field5": float64(5),
- "field6": float64(6),
- "field7": float64(7),
- "field8": float64(8),
- "field9": float64(9),
- "field10": float64(10),
- "long": "this is a test of the emergency broadcast system",
- "foo": "bar",
- },
- }
- assert.Equal(t, expectedEvent, senders[0].Events()[0])
-
- // Repeat, but deliver to host 1 on the peer channel, it should not be
- // passed to host 0.
- req, err = http.NewRequest(
- "POST",
- "http://localhost:11003/1/batch/dataset",
- nil,
- )
- assert.NoError(t, err)
- req.Header.Set("X-Honeycomb-Team", "KEY")
- req.Header.Set("Content-Type", "application/json")
-
- req.Body = ioutil.NopCloser(strings.NewReader(blob))
- post(t, req)
- assert.Eventually(t, func() bool {
- return len(senders[1].Events()) == 1
- }, 2*time.Second, 2*time.Millisecond)
- assert.Equal(t, expectedEvent, senders[0].Events()[0])
-}
-
-func TestEventsEndpoint(t *testing.T) {
- t.Parallel()
-
- peers := &testPeers{
- peers: []string{
- "http://localhost:13001",
- "http://localhost:13003",
- },
- }
-
- var apps [2]*App
- var addrs [2]string
- var senders [2]*transmission.MockSender
- for i := range apps {
- var graph inject.Graph
- basePort := 13000 + (i * 2)
- senders[i] = &transmission.MockSender{}
- apps[i], graph = newStartedApp(t, senders[i], basePort, peers)
- defer startstop.Stop(graph.Objects(), nil)
-
- addrs[i] = "localhost:" + strconv.Itoa(basePort)
- }
-
- // Deliver to host 1, it should be passed to host 0 and emitted there.
- zEnc, _ := zstd.NewWriter(nil)
- blob := zEnc.EncodeAll([]byte(`{"foo":"bar","trace.trace_id":"1"}`), nil)
- req, err := http.NewRequest(
- "POST",
- "http://localhost:13002/1/events/dataset",
- bytes.NewReader(blob),
- )
- assert.NoError(t, err)
- req.Header.Set("X-Honeycomb-Team", "KEY")
- req.Header.Set("Content-Type", "application/json")
- req.Header.Set("Content-Encoding", "zstd")
- req.Header.Set("X-Honeycomb-Event-Time", now.Format(time.RFC3339Nano))
- req.Header.Set("X-Honeycomb-Samplerate", "10")
-
- post(t, req)
- assert.Eventually(t, func() bool {
- return len(senders[0].Events()) == 1
- }, 2*time.Second, 2*time.Millisecond)
-
- assert.Equal(
- t,
- &transmission.Event{
- APIKey: "KEY",
- Dataset: "dataset",
- SampleRate: 10,
- APIHost: "http://api.honeycomb.io",
- Timestamp: now,
- Data: map[string]interface{}{
- "trace.trace_id": "1",
- "foo": "bar",
- },
- },
- senders[0].Events()[0],
- )
-
- // Repeat, but deliver to host 1 on the peer channel, it should not be
- // passed to host 0.
-
- blob = blob[:0]
- buf := bytes.NewBuffer(blob)
- gz := gzip.NewWriter(buf)
- gz.Write([]byte(`{"foo":"bar","trace.trace_id":"1"}`))
- gz.Close()
-
- req, err = http.NewRequest(
- "POST",
- "http://localhost:13003/1/events/dataset",
- buf,
- )
- assert.NoError(t, err)
- req.Header.Set("X-Honeycomb-Team", "KEY")
- req.Header.Set("Content-Type", "application/json")
- req.Header.Set("Content-Encoding", "gzip")
- req.Header.Set("X-Honeycomb-Event-Time", now.Format(time.RFC3339Nano))
- req.Header.Set("X-Honeycomb-Samplerate", "10")
-
- post(t, req)
- assert.Eventually(t, func() bool {
- return len(senders[1].Events()) == 1
- }, 2*time.Second, 2*time.Millisecond)
-
- assert.Equal(
- t,
- &transmission.Event{
- APIKey: "KEY",
- Dataset: "dataset",
- SampleRate: 10,
- APIHost: "http://api.honeycomb.io",
- Timestamp: now,
- Data: map[string]interface{}{
- "trace.trace_id": "1",
- "foo": "bar",
- },
- },
- senders[1].Events()[0],
- )
-}
-
-var (
- now = time.Now().UTC()
- nowString = now.Format(time.RFC3339Nano)
- spanFormat = `{"data":{` +
- `"trace.trace_id":"%d",` +
- `"trace.span_id":"%d",` +
- `"trace.parent_id":"0000000000",` +
- `"key":"value",` +
- `"field0":0,` +
- `"field1":1,` +
- `"field2":2,` +
- `"field3":3,` +
- `"field4":4,` +
- `"field5":5,` +
- `"field6":6,` +
- `"field7":7,` +
- `"field8":8,` +
- `"field9":9,` +
- `"field10":10,` +
- `"long":"this is a test of the emergency broadcast system",` +
- `"foo":"bar"` +
- `},"dataset":"dataset",` +
- `"time":"` + nowString + `",` +
- `"samplerate":2` +
- `}`
- spans [][]byte
-
- httpClient = &http.Client{Transport: &http.Transport{
- DialContext: (&net.Dialer{
- Timeout: 1 * time.Second,
- KeepAlive: 30 * time.Second,
- DualStack: true,
- }).DialContext,
- MaxIdleConns: 100,
- MaxConnsPerHost: 100,
- IdleConnTimeout: 90 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- }}
-)
-
-// Pre-build spans to send, none are root spans
-func init() {
- var tid int
- spans = make([][]byte, 100000)
- for i := range spans {
- if i%10 == 0 {
- tid++
- }
- spans[i] = []byte(fmt.Sprintf(spanFormat, tid, i))
- }
-}
-
-func BenchmarkTraces(b *testing.B) {
- ctx := context.Background()
-
- sender := &countingWriterSender{
- WriterSender: transmission.WriterSender{
- W: ioutil.Discard,
- },
- }
- _, graph := newStartedApp(b, sender, 11000, nil)
-
- req, err := http.NewRequest(
- "POST",
- "http://localhost:11000/1/batch/dataset",
- nil,
- )
- assert.NoError(b, err)
- req.Header.Set("X-Honeycomb-Team", "KEY")
- req.Header.Set("Content-Type", "application/json")
-
- b.Run("single", func(b *testing.B) {
- sender.resetCount()
- for n := 0; n < b.N; n++ {
- blob := `[` + string(spans[n%len(spans)]) + `]`
- req.Body = ioutil.NopCloser(strings.NewReader(blob))
- post(b, req)
- }
- sender.waitForCount(b, b.N)
- })
-
- b.Run("batch", func(b *testing.B) {
- sender.resetCount()
-
- // over-allocate blob for 50 spans
- blob := make([]byte, 0, len(spanFormat)*100)
- for n := 0; n < (b.N/50)+1; n++ {
- blob = append(blob[:0], '[')
- for i := 0; i < 50; i++ {
- blob = append(blob, spans[((n*50)+i)%len(spans)]...)
- blob = append(blob, ',')
- }
- blob[len(blob)-1] = ']'
- req.Body = ioutil.NopCloser(bytes.NewReader(blob))
-
- post(b, req)
- }
- sender.waitForCount(b, b.N)
- })
-
- b.Run("multi", func(b *testing.B) {
- sender.resetCount()
- var wg sync.WaitGroup
- for i := 0; i < 10; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- req := req.Clone(ctx)
- blob := make([]byte, 0, len(spanFormat)*100)
- for n := 0; n < (b.N/500)+1; n++ {
- blob = append(blob[:0], '[')
- for i := 0; i < 50; i++ {
- blob = append(blob, spans[((n*50)+i)%len(spans)]...)
- blob = append(blob, ',')
- }
- blob[len(blob)-1] = ']'
- req.Body = ioutil.NopCloser(bytes.NewReader(blob))
-
- resp, err := httpClient.Do(req)
- assert.NoError(b, err)
- if resp != nil {
- assert.Equal(b, http.StatusOK, resp.StatusCode)
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
- }
- }
- }()
- }
- wg.Wait()
- sender.waitForCount(b, b.N)
- })
-
- err = startstop.Stop(graph.Objects(), nil)
- assert.NoError(b, err)
-}
-
-func BenchmarkDistributedTraces(b *testing.B) {
- sender := &countingWriterSender{
- WriterSender: transmission.WriterSender{
- W: ioutil.Discard,
- },
- }
-
- peers := &testPeers{
- peers: []string{
- "http://localhost:12001",
- "http://localhost:12003",
- "http://localhost:12005",
- "http://localhost:12007",
- "http://localhost:12009",
- },
- }
-
- var apps [5]*App
- var addrs [5]string
- for i := range apps {
- var graph inject.Graph
- basePort := 12000 + (i * 2)
- apps[i], graph = newStartedApp(b, sender, basePort, peers)
- defer startstop.Stop(graph.Objects(), nil)
-
- addrs[i] = "localhost:" + strconv.Itoa(basePort)
- }
-
- req, err := http.NewRequest(
- "POST",
- "http://localhost:12000/1/batch/dataset",
- nil,
- )
- assert.NoError(b, err)
- req.Header.Set("X-Honeycomb-Team", "KEY")
- req.Header.Set("Content-Type", "application/json")
-
- b.Run("single", func(b *testing.B) {
- sender.resetCount()
- for n := 0; n < b.N; n++ {
- blob := `[` + string(spans[n%len(spans)]) + `]`
- req.Body = ioutil.NopCloser(strings.NewReader(blob))
- req.URL.Host = addrs[n%len(addrs)]
- post(b, req)
- }
- sender.waitForCount(b, b.N)
- })
-
- b.Run("batch", func(b *testing.B) {
- sender.resetCount()
-
- // over-allocate blob for 50 spans
- blob := make([]byte, 0, len(spanFormat)*100)
- for n := 0; n < (b.N/50)+1; n++ {
- blob = append(blob[:0], '[')
- for i := 0; i < 50; i++ {
- blob = append(blob, spans[((n*50)+i)%len(spans)]...)
- blob = append(blob, ',')
- }
- blob[len(blob)-1] = ']'
- req.Body = ioutil.NopCloser(bytes.NewReader(blob))
- req.URL.Host = addrs[n%len(addrs)]
-
- post(b, req)
- }
- sender.waitForCount(b, b.N)
- })
-}
diff --git a/build-docker.sh b/build-docker.sh
new file mode 100755
index 0000000000..4a6324a1db
--- /dev/null
+++ b/build-docker.sh
@@ -0,0 +1,24 @@
+set -o nounset
+set -o pipefail
+set -o xtrace
+
+TAGS="latest"
+VERSION="dev"
+if [[ -n ${CIRCLE_TAG:-} ]]; then
+ # trim 'v' prefix if present
+ VERSION=${CIRCLE_TAG#"v"}
+ # append version to image tags
+ TAGS+=",$VERSION"
+fi
+
+unset GOOS
+unset GOARCH
+export KO_DOCKER_REPO=${KO_DOCKER_REPO:-ko.local}
+export GOFLAGS="-ldflags=-X=main.BuildID=$VERSION"
+export SOURCE_DATE_EPOCH=$(date +%s)
+# shellcheck disable=SC2086
+ko publish \
+ --tags "${TAGS}" \
+ --base-import-paths \
+ --platform "linux/amd64,linux/arm64" \
+ ./cmd/tracing-proxy
diff --git a/build-pkg.sh b/build-pkg.sh
index c26cc0a6cd..1cd44f1111 100755
--- a/build-pkg.sh
+++ b/build-pkg.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-# Build deb or rpm packages for Refinery.
+# Build deb or rpm packages for tracing-proxy.
set -e
function usage() {
@@ -30,14 +30,14 @@ if [ -z "$version" ]; then
version=v0.0.0-dev
fi
-fpm -s dir -n refinery \
- -m "Honeycomb " \
+fpm -s dir -n tracing-proxy \
+ -m "Opsramp " \
-v ${version#v} \
-t $pkg_type \
-a $arch \
--pre-install=./preinstall \
- $GOPATH/bin/refinery-linux-${arch}=/usr/bin/refinery \
- ./refinery.upstart=/etc/init/refinery.conf \
- ./refinery.service=/lib/systemd/system/refinery.service \
- ./config.toml=/etc/refinery/refinery.toml \
- ./rules.toml=/etc/refinery/rules.toml
+ $GOPATH/bin/tracing-proxy-linux-${arch}=/usr/bin/tracing-proxy \
+ ./tracing-proxy.upstart=/etc/init/tracing-proxy.conf \
+ ./tracing-proxy.service=/lib/systemd/system/tracing-proxy.service \
+ ./config.toml=/etc/tracing-proxy/tracing-proxy.toml \
+ ./rules.toml=/etc/tracing-proxy/rules.toml
diff --git a/build/README.md b/build/README.md
new file mode 100644
index 0000000000..a7ad4b4923
--- /dev/null
+++ b/build/README.md
@@ -0,0 +1,81 @@
+# Publishing Helm Chart
+
+## Packaging the Chart
+
+```shell
+$ helm package CHART-PATH
+```
+
+Replace CHART-PATH with the path to the directory that contains your Chart.yaml file.
+
+Helm uses the chart name and version for the archive file name. In case of opsramp-tracing-proxy it would be similar to
+opsramp-tracing-proxy-0.1.0.tgz
+
+## Pushing the Chart to Google Artifact Repository
+
+### Install and Initialize Google Cloud CLI
+
+**Link:** https://cloud.google.com/sdk/docs/install-sdk
+
+### Configure Docker Config for Push
+
+```shell
+$ gcloud auth configure-docker REPO-LOCATION
+```
+
+REPO-LOCATION can be found [location](https://cloud.google.com/artifact-registry/docs/repositories/repo-locations)
+
+### Pushing the Chart
+
+```shell
+$ helm push opsramp-tracing-proxy-0.1.0.tgz oci://LOCATION-docker.pkg.dev/PROJECT/REPOSITORY
+```
+
+Replace the following values:
+
+**LOCATION** is the regional or
+multi-regional [location](https://cloud.google.com/artifact-registry/docs/repositories/repo-locations) of the
+repository.
+
+**PROJECT** is your Google
+Cloud [project ID](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects). If
+your project ID contains a colon (:), see Domain-scoped projects.
+
+**REPOSITORY** is the name of the repository.
+
+### Verify that the push operation was successful
+
+```shell
+$ gcloud artifacts docker images list LOCATION-docker.pkg.dev/PROJECT/REPOSITORY
+```
+
+## Installing the Helm Chart
+
+### Installing the Chart
+
+```shell
+$ helm pull oci://LOCATION-docker.pkg.dev/PROJECT/REPOSITORY/IMAGE \
+ --version VERSION \
+ --untar
+$ cd opsramp-tracing-proxy
+
+$ kubectl create ns NAMESPACE
+$ helm install opsramp-tracing-proxy -n opsramp-tracing-proxy .
+```
+
+Replace the following values:
+
+**LOCATION** is the regional or
+multi-regional [location](https://cloud.google.com/artifact-registry/docs/repositories/repo-locations) of the
+repository.
+
+**PROJECT** is your Google Cloud project ID. If
+your [project ID](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects)
+contains a colon (:), see [Domain-scoped](https://cloud.google.com/artifact-registry/docs/docker/names#domain) projects.
+
+**REPOSITORY** is the name of the repository where the image is stored.
+
+**IMAGE** is the name of the image in the repository.
+
+**VERSION** is semantic version of the chart. This flag is required. Helm does not support pulling a chart using a tag.
+
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/.helmignore b/build/kubernetes/helm/opsramp-tracing-proxy/.helmignore
new file mode 100644
index 0000000000..0e8a0eb36f
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml
new file mode 100644
index 0000000000..a4706d35f3
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml
@@ -0,0 +1,16 @@
+apiVersion: v2
+name: opsramp-tracing-proxy
+description: A Helm chart for OpsRamp Tracing Proxy
+
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 15.0.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "15.0.0"
\ No newline at end of file
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/templates/_helpers.tpl b/build/kubernetes/helm/opsramp-tracing-proxy/templates/_helpers.tpl
new file mode 100644
index 0000000000..37cab6a7ba
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/templates/_helpers.tpl
@@ -0,0 +1,95 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "opsramp-tracing-proxy.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "opsramp-tracing-proxy.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "opsramp-tracing-proxy.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "opsramp-tracing-proxy.labels" -}}
+helm.sh/chart: {{ include "opsramp-tracing-proxy.chart" . }}
+{{ include "opsramp-tracing-proxy.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "opsramp-tracing-proxy.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "opsramp-tracing-proxy.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Service Ports
+*/}}
+{{- define "httpPort" -}}
+{{ if .Values.service }} {{ default 8082 .Values.service.http }} {{ else }} 8082 {{ end }}
+{{- end }}
+{{- define "grpcPort" -}}
+{{ if .Values.service }} {{ default 9090 .Values.service.grpc }} {{ else }} 9090 {{ end }}
+{{- end }}
+{{- define "httpPeerPort" -}}
+{{ if .Values.service }} {{ default 8081 .Values.service.peer }} {{ else }} 8081 {{ end }}
+{{- end }}
+{{- define "grpcPeerPort" -}}
+{{ if .Values.service }} {{ default 8084 .Values.service.grpcPeer }} {{ else }} 8084 {{ end }}
+{{- end }}
+
+
+{{/*
+Image Defaults
+*/}}
+{{- define "imagePullPolicy" -}}
+{{ if .Values.image }} {{ default "Always" .Values.image.pullPolicy | quote }} {{ else }} "Always" {{ end }}
+{{- end }}
+
+
+{{/*
+Redis Defaults
+*/}}
+{{- define "opsramp-tracing-proxy.redis.fullname" -}}
+{{ include "opsramp-tracing-proxy.fullname" . }}-redis
+{{- end }}
+{{- define "opsramp-tracing-proxy.redis.labels" -}}
+helm.sh/chart: {{ include "opsramp-tracing-proxy.chart" . }}
+{{ include "opsramp-tracing-proxy.redis.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+{{- define "opsramp-tracing-proxy.redis.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "opsramp-tracing-proxy.name" . }}-redis
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
\ No newline at end of file
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment-redis.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment-redis.yaml
new file mode 100644
index 0000000000..925f537abd
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment-redis.yaml
@@ -0,0 +1,49 @@
+{{- if .Values.redis.enabled }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "opsramp-tracing-proxy.redis.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "opsramp-tracing-proxy.redis.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "opsramp-tracing-proxy.redis.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "opsramp-tracing-proxy.redis.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ containers:
+ - name: redis
+ image: "{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}"
+ imagePullPolicy: {{ .Values.redis.image.pullPolicy }}
+ ports:
+ - name: redis
+ containerPort: 6379
+ protocol: TCP
+ {{- with .Values.redis.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.redis.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.redis.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- end }}
\ No newline at end of file
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment.yaml
new file mode 100644
index 0000000000..4abe0ea96f
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment.yaml
@@ -0,0 +1,69 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "opsramp-tracing-proxy.fullname" . }}
+ labels:
+ {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{- include "imagePullPolicy" . }}
+ command:
+ - "/usr/bin/tracing-proxy"
+ - "-c"
+ - "/etc/tracing-proxy/config.yaml"
+ - "-r"
+ - "/etc/tracing-proxy/rules.yaml"
+ ports:
+ - name: http
+ containerPort: {{include "httpPort" . | trim }}
+ protocol: TCP
+ - name: peer
+ containerPort: {{include "httpPeerPort" . | trim }}
+ protocol: TCP
+ - containerPort: {{include "grpcPort" . | trim }}
+ name: grpc
+ - containerPort: {{include "grpcPeerPort" . | trim }}
+ name: grpc-peer
+ {{- with .Values.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: {{ include "opsramp-tracing-proxy.fullname" . }}-rules
+ mountPath: /etc/tracing-proxy/rules.yaml
+ subPath: rules.yaml
+ readOnly: true
+ - name: {{ include "opsramp-tracing-proxy.fullname" . }}-config
+ mountPath: /etc/tracing-proxy/config.yaml
+ subPath: config.yaml
+ readOnly: true
+ volumes:
+ - configMap:
+ name: {{ include "opsramp-tracing-proxy.fullname" . }}-rules
+ name: {{ include "opsramp-tracing-proxy.fullname" . }}-rules
+ - configMap:
+ name: {{ include "opsramp-tracing-proxy.fullname" . }}-config
+ name: {{ include "opsramp-tracing-proxy.fullname" . }}-config
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-config-cm.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-config-cm.yaml
new file mode 100644
index 0000000000..865274a11f
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-config-cm.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "opsramp-tracing-proxy.fullname" . }}-config
+ labels:
+ {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }}
+data:
+ config.yaml: |-
+ {{- tpl (toYaml .Values.config) . | nindent 4 }}
\ No newline at end of file
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml
new file mode 100644
index 0000000000..584cf7b035
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "opsramp-tracing-proxy.fullname" . }}-rules
+ labels:
+ {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }}
+data:
+ rules.yaml: |-
+ {{- toYaml .Values.rules | nindent 4 }}
\ No newline at end of file
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/templates/service-redis.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/service-redis.yaml
new file mode 100644
index 0000000000..835a3bd372
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/templates/service-redis.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.redis.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "opsramp-tracing-proxy.redis.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "opsramp-tracing-proxy.redis.labels" . | nindent 4 }}
+spec:
+ type: ClusterIP
+ ports:
+ - name: tcp-redis
+ port: 6379
+ protocol: TCP
+ targetPort: redis
+ selector:
+ {{- include "opsramp-tracing-proxy.redis.selectorLabels" . | nindent 4 }}
+{{- end}}
\ No newline at end of file
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/templates/service.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/service.yaml
new file mode 100644
index 0000000000..64b557ee66
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/templates/service.yaml
@@ -0,0 +1,31 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "opsramp-tracing-proxy.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }}
+ {{- with .Values.service.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{include "httpPort" . | trim }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ - port: {{include "httpPeerPort" . | trim }}
+ targetPort: peer
+ protocol: TCP
+ name: peer
+ - port: {{include "grpcPort" . | trim }}
+ targetPort: grpc
+ name: grpc
+ - port: {{include "grpcPeerPort" . | trim }}
+ targetPort: grpc-peer
+ name: grpc-peer
+
+ selector:
+ {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 4 }}
\ No newline at end of file
diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml
new file mode 100644
index 0000000000..1f537c638d
--- /dev/null
+++ b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml
@@ -0,0 +1,491 @@
+# use replicaCount to specify the size of the trace-proxy cluster
+replicaCount: 3
+
+# configure the cpu and memory limits for each node in the cluster
+#resources:
+# limits:
+# cpu: "2000m"
+# memory: "4Gi"
+# requests:
+# cpu: "500m"
+# memory: "1Gi"
+
+
+image:
+ repository: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy
+ pullPolicy: Always # use "IfNotPresent" to avoid pulling the image every time
+ tag: "latest" # if empty, then defaults to the chart appVersion.
+
+
+podAnnotations: { }
+imagePullSecrets: [ ]
+nameOverride: ""
+fullnameOverride: ""
+
+service:
+ type: ClusterIP
+ http: 8082
+ peer: 8081
+ grpc: 9090
+ grpcPeer: 8084
+ annotations: { }
+
+config:
+ ########################
+ ## Trace Proxy Config ##
+ ########################
+
+ # ListenAddr is the IP and port on which to listen for incoming events. Incoming
+ # traffic is expected to be HTTP, so if using SSL put something like nginx in
+ # front to do the TLS Termination.
+ ListenAddr: 0.0.0.0:{{include "httpPort" . | trim }}
+
+ # GRPCListenAddr is the IP and port on which to listen for incoming events over
+ # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in
+ # front to do the TLS Termination.
+ GRPCListenAddr: 0.0.0.0:{{include "grpcPort" . | trim }}
+
+ # PeerListenAddr is the IP and port on which to listen for traffic being
+ # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL
+ # put something like nginx in front to do the decryption. Must be different from
+ # ListenAddr
+ PeerListenAddr: 0.0.0.0:{{include "httpPeerPort" . | trim }}
+
+ GRPCPeerListenAddr: 0.0.0.0:{{include "grpcPeerPort" . | trim }}
+
+ # CompressPeerCommunication determines whether to compress span data
+ # it forwards to peers. If it costs money to transmit data between different
+ # instances (e.g. they're spread across AWS availability zones), then you
+ # almost certainly want compression enabled to reduce your bill. The option to
+ # disable it is provided as an escape hatch for deployments that value lower CPU
+ # utilization over data transfer costs.
+ CompressPeerCommunication: true
+
+ # OpsrampAPI is the URL for the upstream Opsramp API.
+ OpsrampAPI: ""
+
+ # Dataset you want to use for sampling
+ Dataset: "ds"
+
+ #Tls Options
+ UseTls: true
+ UseTlsInsecure: false
+
+ # LoggingLevel valid options are "debug", "info", "error", and "panic".
+ LoggingLevel: error
+
+ # SendDelay is a short timer that will be triggered when a trace is complete.
+ # Trace Proxy will wait for this duration before actually sending the trace. The
+ # reason for this short delay is to allow for small network delays or clock
+ # jitters to elapse and any final spans to arrive before actually sending the
+ # trace. This supports duration strings with supplied units. Set to 0 for
+ # immediate sends.
+ SendDelay: 2s
+
+ # BatchTimeout dictates how frequently to send unfulfilled batches. By default
+ # this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms.
+ # Eligible for live reload.
+ BatchTimeout: 1s
+
+ # TraceTimeout is a long timer; it represents the outside boundary of how long
+ # to wait before sending an incomplete trace. Normally traces are sent when the
+ # root span arrives. Sometimes the root span never arrives (due to crashes or
+ # whatever), and this timer will send a trace even without having received the
+ # root span. If you have particularly long-lived traces you should increase this
+ # timer. This supports duration strings with supplied units.
+ TraceTimeout: 60s
+
+ # MaxBatchSize is the number of events to be included in the batch for sending
+ MaxBatchSize: 500
+
+ # SendTicker is a short timer; it determines the duration to use to check for traces to send
+ SendTicker: 100ms
+
+ # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use
+ # when buffering events that will be forwarded to peers or the upstream API.
+ UpstreamBufferSize: 1000
+ PeerBufferSize: 1000
+
+ # AddHostMetadataToTrace determines whether to add information about
+ # the host that tracing proxy is running on to the spans that it processes.
+ # If enabled, information about the host will be added to each span with the
+ # key 'meta.local_hostname'.
+ AddHostMetadataToTrace: false
+
+ # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics
+ # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"}
+ # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5
+ # based on sorted order of keys
+ # "app" label is mandatory
+ AddAdditionalMetadata: { "app": "default" }
+
+ # EnvironmentCacheTTL is the amount of time a cache entry will live that associates
+ # an API key with an environment name.
+ # Cache misses lookup the environment name using OpsRampAPI config value.
+ # Default is 1 hour ("1h").
+ EnvironmentCacheTTL: "1h"
+
+ # QueryAuthToken, if specified, provides a token that must be specified with
+ # the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed.
+ # These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and
+ # are not typically needed in normal operation.
+ # Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN.
+ # If left unspecified, the /query endpoints are inaccessible.
+ # QueryAuthToken: "some-random-value"
+
+ # AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which
+ # contains text indicating which rule was evaluated that caused the trace to be included.
+ AddRuleReasonToTrace: true
+
+ # AdditionalErrorFields should be a list of span fields that should be included when logging
+ # errors that happen during ingestion of events (for example, the span too large error).
+ # This is primarily useful in trying to track down misbehaving senders in a large installation.
+ # The fields `dataset`, `apihost`, and `environment` are always included.
+ # If a field is not present in the span, it will not be present in the error log.
+ # Default is ["trace.span_id"].
+ AdditionalErrorFields:
+ - trace.span_id
+
+ # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate
+ # the number of child spans on the trace at the time the sampling decision was made.
+ # This value is available to the rules-based sampler, making it possible to write rules that
+ # are dependent upon the number of spans in the trace.
+ # Default is false.
+ AddSpanCountToRoot: false
+
+ # CacheOverrunStrategy controls the cache management behavior under memory pressure.
+ # "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again,
+ # which is generally not helpful unless it occurs because of a permanent change in traffic patterns.
+ # In the "impact" strategy, the items having the most impact on the cache size are
+ # ejected from the cache earlier than normal but the cache is not resized.
+ # In all cases, it only applies if MaxAlloc is nonzero.
+ # Default is "resize" for compatibility but "impact" is recommended for most installations.
+ CacheOverrunStrategy: "impact"
+
+ #########################
+ ## Retry Configuration ##
+ #########################
+ RetryConfiguration:
+ # InitialInterval the time to wait after the first failure before retrying.
+ InitialInterval: 500ms
+ # RandomizationFactor is a random factor used to calculate next backoff
+ # Randomized interval = RetryInterval * (1 Âą RandomizationFactor)
+ RandomizationFactor: 0.5
+ # Multiplier is the value multiplied by the backoff interval bounds
+ Multiplier: 1.5
+ # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between
+ # consecutive retries will always be `MaxInterval`.
+ MaxInterval: 60s
+ # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request.
+ # Once this value is reached, the data is discarded.
+ MaxElapsedTime: 15m
+
+ #########################
+ ## Proxy Configuration ##
+ #########################
+ ProxyConfiguration:
+ # Protocol accepts http and https
+ Protocol: "http"
+ # Host takes the proxy server address
+ Host: ""
+ # Port takes the proxy server port
+ Port: 3128
+ # UserName takes the proxy username
+ Username: ""
+ # Password takes the proxy password
+ Password: ""
+
+ ##################################
+ ## Authentication Configuration ##
+ ##################################
+ AuthConfiguration:
+ # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made
+ Endpoint: ""
+ # Key - authentication key provided in OpsRamp Portal
+ Key: ""
+ # Secret - authentication Secret provided in OpsRamp Portal
+ Secret: ""
+ # TenantId - tenant/client id to which the traces are to be posted
+ TenantId: ""
+
+ ############################
+ ## Implementation Choices ##
+ ############################
+ # Each of the config options below chooses an implementation of a Trace Proxy
+ # component to use. Depending on the choice, there may be more configuration
+ # required below in the section for that choice. Changing implementation choices
+ # requires a process restart.
+ # Collector describes which collector to use for collecting traces. The only
+ # current valid option is "InMemCollector". More can be added by adding
+ # implementations of the Collector interface.
+ Collector: "InMemCollector"
+
+ # InMemCollector brings together all the settings that are relevant to
+ # collecting spans together to make traces.
+ InMemCollector:
+
+ # The collection cache is used to collect all spans into a trace as well as
+ # remember the sampling decision for any spans that might come in after the
+ # trace has been marked "complete" (either by timing out or seeing the root
+ # span). The number of traces in the cache should be many multiples (100x to
+ # 1000x) of the total number of concurrently active traces (trace throughput *
+ # trace duration).
+ CacheCapacity: 1000
+
+ # MaxAlloc is optional. If set, it must be an integer >= 0.
+ # If set to a non-zero value, once per tick (see SendTicker) the collector
+ # will compare total allocated bytes to this value. If allocation is too
+ # high, cache capacity will be reduced and an error will be logged.
+ # Useful values for this setting are generally in the range of 75%-90% of
+ # available system memory. Using 80% is the recommended.
+ # This value should be set in according to the resources.limits.memory
+ # By default that setting is 4GB, and this is set to 85% of that limit
+ # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837
+ # MaxAlloc: 3435973836
+ MaxAlloc: 0
+
+ #####################
+ ## Peer Management ##
+ #####################
+
+ # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed
+ PeerManagement:
+ # Strategy controls the way that traces are assigned to Trace Proxy nodes.
+ # The "legacy" strategy uses a simple algorithm that unfortunately causes
+ # 1/2 of the in-flight traces to be assigned to a different node whenever the
+ # number of nodes changes.
+ # The legacy strategy is deprecated and is intended to be removed in a future release.
+ # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the
+ # number of nodes) are disrupted when the node count changes.
+ # Not eligible for live reload.
+ Strategy: "hash"
+
+ ###########################################################
+ ###### Redis (Suitable for all types of deployments) ######
+ ###########################################################
+ # The type should always be redis when deployed to Kubernetes environments
+ Type: "redis"
+
+ # RedisHost is used to connect to redis for peer cluster membership management.
+ # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes
+ # precedence and this value is ignored.
+ # Not eligible for live reload.
+ # RedisHost will default to the name used for the release or name overrides depending on what is used,
+ # but can be overriden to a specific value.
+ RedisHost: '{{include "opsramp-tracing-proxy.redis.fullname" .}}:6379'
+
+ # RedisUsername is the username used to connect to redis for peer cluster membership management.
+ # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes
+ # precedence and this value is ignored.
+ # Not eligible for live reload.
+ RedisUsername: ""
+
+ # RedisPassword is the password used to connect to redis for peer cluster membership management.
+ # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes
+ # precedence and this value is ignored.
+ # Not eligible for live reload.
+ RedisPassword: ""
+
+ # RedisPrefix is a string used as a prefix for the keys in redis while storing
+ # the peer membership. It might be useful to set this in any situation where
+ # multiple trace-proxy clusters or multiple applications want to share a single
+ # Redis instance. It may not be blank.
+ RedisPrefix: "tracing-proxy"
+
+ # RedisDatabase is an integer from 0-15 indicating the database number to use
+ # for the Redis instance storing the peer membership. It might be useful to set
+ # this in any situation where multiple trace-proxy clusters or multiple
+ # applications want to share a single Redis instance.
+ RedisDatabase: 0
+
+ # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2.
+ # Not eligible for live reload.
+ UseTLS: false
+
+ # UseTLSInsecure disables certificate checks
+ # Not eligible for live reload.
+ UseTLSInsecure: false
+
+ # IdentifierInterfaceName is optional.
+ # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name.
+ # When configured the pod's IP will be used in the peer list
+ IdentifierInterfaceName: eth0
+
+ # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first
+ # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use
+ # the first IPV6 unicast address found.
+ UseIPV6Identifier: false
+ ###########################################################
+
+ # LogrusLogger is a section of the config only used if you are using the
+ # LogrusLogger to send all logs to STDOUT using the logrus package.
+ LogrusLogger:
+ # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"]
+ LogFormatter: 'json'
+ # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"]
+ LogOutput: 'stdout'
+
+ MetricsConfig:
+ # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp
+ Enable: true
+
+ # ListenAddr determines the interface and port on which Prometheus will
+ # listen for requests for /metrics. Must be different from the main Trace Proxy
+ # listener.
+ ListenAddr: '0.0.0.0:2112'
+
+ # OpsRampAPI is the URL for the upstream OpsRamp API.
+ OpsRampAPI: ""
+
+ # ReportingInterval is the frequency specified in seconds at which
+ # the metrics are collected and sent to OpsRamp
+ ReportingInterval: 10
+
+ # MetricsList is a list of regular expressions which match the metric
+ # names. Keep the list as small as possible since too many regular expressions can lead to bad performance.
+ # Internally, all the items in the list are concatenated using '|' to make the computation faster.
+ MetricsList: [ ".*" ]
+
+ GRPCServerParameters:
+ # MaxConnectionIdle is a duration for the amount of time after which an
+ # idle connection would be closed by sending a GoAway. Idleness duration is
+ # defined since the most recent time the number of outstanding RPCs became
+ # zero or the connection establishment.
+ # 0s sets duration to infinity which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219
+ # MaxConnectionIdle: "1m"
+
+ # MaxConnectionAge is a duration for the maximum amount of time a
+ # connection may exist before it will be closed by sending a GoAway. A
+ # random jitter of +/-10% will be added to MaxConnectionAge to spread out
+ # connection storms.
+ # 0s sets duration to infinity which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222
+ # MaxConnectionAge: "0s"
+
+ # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
+ # which the connection will be forcibly closed.
+ # 0s sets duration to infinity which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227
+ # MaxConnectionAgeGrace: "0s"
+
+ # After a duration of this time if the server doesn't see any activity it
+ # pings the client to see if the transport is still alive.
+ # If set below 1s, a minimum value of 1s will be used instead.
+ # 0s sets duration to 2 hours which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230
+ # Time: "10s"
+
+ # After having pinged for keepalive check, the server waits for a duration
+ # of Timeout and if no activity is seen even after that the connection is
+ # closed.
+ # 0s sets duration to 20 seconds which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233
+ # Timeout: "2s"
+
+ ################################
+ ## Sample Cache Configuration ##
+ ################################
+
+ # Sample Cache Configuration controls the sample cache used to retain information about trace
+ # status after the sampling decision has been made.
+ SampleCacheConfig:
+ # Type controls the type of sample cache used.
+ # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is
+ # 5x the size of the trace cache. This is tracing proxy's original sample cache strategy.
+ # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember
+ # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces.
+ # It is also more configurable. The cuckoo filter is recommended for most installations.
+ # Default is "legacy".
+ # Type: "cuckoo"
+
+ # KeptSize controls the number of traces preserved in the cuckoo kept traces cache.
+ # tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some
+ # statistical information. This is most useful in cases where the trace was sent before sending
+ # the root span, so that the root span can be decorated with accurate metadata.
+ # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes).
+ # It Does not apply to the "legacy" type of cache.
+ # KeptSize: 10_000
+
+ # DroppedSize controls the size of the cuckoo dropped traces cache.
+ # This cache consumes 4-6 bytes per trace at a scale of millions of traces.
+ # Changing its size with live reload sets a future limit, but does not have an immediate effect.
+ # Default is 1_000_000 traces.
+ # It Does not apply to the "legacy" type of cache.
+ # DroppedSize: 1_000_000
+
+ # SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates
+ # the remaining capacity of its dropped traces cache and possibly cycles it.
+ # This cache is quite resilient so it doesn't need to happen very often, but the
+ # operation is also inexpensive.
+ # Default is 10 seconds.
+ # It Does not apply to the "legacy" type of cache.
+ # SizeCheckInterval: "10s"
+
+
+rules:
+ # DryRun - If enabled, marks traces that would be dropped given current sampling rules,
+ # and sends all traces regardless
+ DryRun: true
+ # lb:
+ # This is the default sampler used.
+ # Any traces received that are not for a defined dataset will use this sampler.
+ # Deterministic Sampler implementation. This is the simplest sampling algorithm
+ # - it is a static sample rate, choosing traces randomly to either keep or send
+ # (at the appropriate rate). It is not influenced by the contents of the trace.
+ Sampler: DeterministicSampler
+
+ # SampleRate is the rate at which to sample. It indicates a ratio, where one
+ # sample trace is kept for every n traces seen. For example, a SampleRate of 30
+ # will keep 1 out of every 30 traces.
+ SampleRate: 1
+
+ ## Dataset sampling rules ##
+ # Specify dataset rules by creating an object for each dataset
+ # Note: If your dataset name contains a space, you will have to escape the dataset name
+ # using single quotes, such as "dataset 1":
+ #
+ # This example creates a sampling definition for a dataset called: test-dataset
+ # test-dataset:
+ # Sampler: EMADynamicSampler
+ # GoalSampleRate: 5
+ # FieldList:
+ # - request.method
+ # - response.status_code
+
+ # LiveReload - If disabled, triggers a rolling restart of the cluster whenever
+ # the Rules configmap changes
+ LiveReload: true
+
+
+# Redis configuration
+redis:
+ # To install a simple single pod Redis deployment set this to true.
+ # If false, you must specify a value for existingHost
+ # For production, it is recommended to set this to false and provide
+ # a highly available Redis configuration using redis.existingHost
+ enabled: true
+
+ # If redis.enabled is false this needs to be specified.
+ # This needs to be the name:port of a Redis configuration
+ # existingHost:
+
+ # If redis.enabled is true, this the image that will be used to create
+ # the Redis deployment
+ image:
+ repository: redis
+ tag: 6.2.5
+ pullPolicy: IfNotPresent
+
+ # Node selector specific to installed Redis configuration. Requires redis.enabled to be true
+ nodeSelector: { }
+
+ # Tolerations specific to installed Redis configuration. Requires redis.enabled to be true
+ tolerations: [ ]
+
+ # Affinity specific to installed Redis configuration. Requires redis.enabled to be true
+ affinity: { }
+
+
+nodeSelector: { }
+
diff --git a/build/kubernetes/yaml/k8s-config-cm.yaml b/build/kubernetes/yaml/k8s-config-cm.yaml
new file mode 100644
index 0000000000..ca4a809bb2
--- /dev/null
+++ b/build/kubernetes/yaml/k8s-config-cm.yaml
@@ -0,0 +1,420 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: opsramp-tracing-proxy
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: opsramp-tracing-proxy-config
+ labels:
+ name: opsramp-tracing-proxy-config
+ namespace: opsramp-tracing-proxy
+data:
+ config.yaml: |-
+ ########################
+ ## Trace Proxy Config ##
+ ########################
+
+ # ListenAddr is the IP and port on which to listen for incoming events. Incoming
+ # traffic is expected to be HTTP, so if using SSL put something like nginx in
+ # front to do the TLS Termination.
+ ListenAddr: 0.0.0.0:8082
+
+ # GRPCListenAddr is the IP and port on which to listen for incoming events over
+ # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in
+ # front to do the TLS Termination.
+ GRPCListenAddr: 0.0.0.0:9090
+
+ # PeerListenAddr is the IP and port on which to listen for traffic being
+ # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL
+ # put something like nginx in front to do the decryption. Must be different from
+ # ListenAddr
+ PeerListenAddr: 0.0.0.0:8083
+
+ GRPCPeerListenAddr: 0.0.0.0:8084
+
+ # CompressPeerCommunication determines whether to compress span data
+ # it forwards to peers. If it costs money to transmit data between different
+ # instances (e.g. they're spread across AWS availability zones), then you
+ # almost certainly want compression enabled to reduce your bill. The option to
+ # disable it is provided as an escape hatch for deployments that value lower CPU
+ # utilization over data transfer costs.
+ CompressPeerCommunication: true
+
+ # OpsrampAPI is the URL for the upstream Opsramp API.
+ OpsrampAPI: ""
+
+ # Dataset you want to use for sampling
+ Dataset: "ds"
+
+ #Tls Options
+ UseTls: true
+ UseTlsInsecure: false
+
+ # LoggingLevel valid options are "debug", "info", "error", and "panic".
+ LoggingLevel: error
+
+ # SendDelay is a short timer that will be triggered when a trace is complete.
+ # Trace Proxy will wait for this duration before actually sending the trace. The
+ # reason for this short delay is to allow for small network delays or clock
+ # jitters to elapse and any final spans to arrive before actually sending the
+ # trace. This supports duration strings with supplied units. Set to 0 for
+ # immediate sends.
+ SendDelay: 2s
+
+ # BatchTimeout dictates how frequently to send unfulfilled batches. By default
+ # this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms.
+ # Eligible for live reload.
+ BatchTimeout: 1s
+
+ # TraceTimeout is a long timer; it represents the outside boundary of how long
+ # to wait before sending an incomplete trace. Normally traces are sent when the
+ # root span arrives. Sometimes the root span never arrives (due to crashes or
+ # whatever), and this timer will send a trace even without having received the
+ # root span. If you have particularly long-lived traces you should increase this
+ # timer. This supports duration strings with supplied units.
+ TraceTimeout: 60s
+
+ # MaxBatchSize is the number of events to be included in the batch for sending
+ MaxBatchSize: 500
+
+ # SendTicker is a short timer; it determines the duration to use to check for traces to send
+ SendTicker: 100ms
+
+ # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use
+ # when buffering events that will be forwarded to peers or the upstream API.
+ UpstreamBufferSize: 1000
+ PeerBufferSize: 1000
+
+ # AddHostMetadataToTrace determines whether to add information about
+ # the host that tracing proxy is running on to the spans that it processes.
+ # If enabled, information about the host will be added to each span with the
+ # key 'meta.local_hostname'.
+ AddHostMetadataToTrace: false
+
+ # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics
+ # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"}
+ # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5
+ # based on sorted order of keys
+ # "app" label is mandatory
+ AddAdditionalMetadata: { "app": "default" }
+
+ # EnvironmentCacheTTL is the amount of time a cache entry will live that associates
+ # an API key with an environment name.
+ # Cache misses lookup the environment name using OpsRampAPI config value.
+ # Default is 1 hour ("1h").
+ EnvironmentCacheTTL: "1h"
+
+ # QueryAuthToken, if specified, provides a token that must be specified with
+ # the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed.
+ # These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and
+ # are not typically needed in normal operation.
+ # Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN.
+ # If left unspecified, the /query endpoints are inaccessible.
+ # QueryAuthToken: "some-random-value"
+
+ # AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which
+ # contains text indicating which rule was evaluated that caused the trace to be included.
+ AddRuleReasonToTrace: true
+
+ # AdditionalErrorFields should be a list of span fields that should be included when logging
+ # errors that happen during ingestion of events (for example, the span too large error).
+ # This is primarily useful in trying to track down misbehaving senders in a large installation.
+ # The fields `dataset`, `apihost`, and `environment` are always included.
+ # If a field is not present in the span, it will not be present in the error log.
+ # Default is ["trace.span_id"].
+ AdditionalErrorFields:
+ - trace.span_id
+
+ # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate
+ # the number of child spans on the trace at the time the sampling decision was made.
+ # This value is available to the rules-based sampler, making it possible to write rules that
+ # are dependent upon the number of spans in the trace.
+ # Default is false.
+ AddSpanCountToRoot: false
+
+ # CacheOverrunStrategy controls the cache management behavior under memory pressure.
+ # "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again,
+ # which is generally not helpful unless it occurs because of a permanent change in traffic patterns.
+ # In the "impact" strategy, the items having the most impact on the cache size are
+ # ejected from the cache earlier than normal but the cache is not resized.
+ # In all cases, it only applies if MaxAlloc is nonzero.
+ # Default is "resize" for compatibility but "impact" is recommended for most installations.
+ CacheOverrunStrategy: "impact"
+
+ #########################
+ ## Retry Configuration ##
+ #########################
+ RetryConfiguration:
+ # InitialInterval the time to wait after the first failure before retrying.
+ InitialInterval: 500ms
+ # RandomizationFactor is a random factor used to calculate next backoff
+ # Randomized interval = RetryInterval * (1 Âą RandomizationFactor)
+ RandomizationFactor: 0.5
+ # Multiplier is the value multiplied by the backoff interval bounds
+ Multiplier: 1.5
+ # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between
+ # consecutive retries will always be `MaxInterval`.
+ MaxInterval: 60s
+ # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request.
+ # Once this value is reached, the data is discarded.
+ MaxElapsedTime: 15m
+
+ #########################
+ ## Proxy Configuration ##
+ #########################
+ ProxyConfiguration:
+ # Protocol accepts http and https
+ Protocol: "http"
+ # Host takes the proxy server address
+ Host: ""
+ # Port takes the proxy server port
+ Port: 3128
+ # UserName takes the proxy username
+ Username: ""
+ # Password takes the proxy password
+ Password: ""
+
+ ##################################
+ ## Authentication Configuration ##
+ ##################################
+ AuthConfiguration:
+ # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made
+ Endpoint: ""
+ # Key - authentication key provided in OpsRamp Portal
+ Key: ""
+ # Secret - authentication Secret provided in OpsRamp Portal
+ Secret: ""
+ # TenantId - tenant/client id to which the traces are to be posted
+ TenantId: ""
+
+ ############################
+ ## Implementation Choices ##
+ ############################
+ # Each of the config options below chooses an implementation of a Trace Proxy
+ # component to use. Depending on the choice, there may be more configuration
+ # required below in the section for that choice. Changing implementation choices
+ # requires a process restart.
+ # Collector describes which collector to use for collecting traces. The only
+ # current valid option is "InMemCollector". More can be added by adding
+ # implementations of the Collector interface.
+ Collector: "InMemCollector"
+
+ # InMemCollector brings together all the settings that are relevant to
+ # collecting spans together to make traces.
+ InMemCollector:
+
+ # The collection cache is used to collect all spans into a trace as well as
+ # remember the sampling decision for any spans that might come in after the
+ # trace has been marked "complete" (either by timing out or seeing the root
+ # span). The number of traces in the cache should be many multiples (100x to
+ # 1000x) of the total number of concurrently active traces (trace throughput *
+ # trace duration).
+ CacheCapacity: 1000
+
+ # MaxAlloc is optional. If set, it must be an integer >= 0.
+ # If set to a non-zero value, once per tick (see SendTicker) the collector
+ # will compare total allocated bytes to this value. If allocation is too
+ # high, cache capacity will be reduced and an error will be logged.
+ # Useful values for this setting are generally in the range of 75%-90% of
+ # available system memory. Using 80% is the recommended.
+ # This value should be set in according to the resources.limits.memory
+ # By default that setting is 4GB, and this is set to 85% of that limit
+ # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837
+ # MaxAlloc: 3435973836
+ MaxAlloc: 0
+
+ #####################
+ ## Peer Management ##
+ #####################
+
+ # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed
+ PeerManagement:
+ # Strategy controls the way that traces are assigned to Trace Proxy nodes.
+ # The "legacy" strategy uses a simple algorithm that unfortunately causes
+ # 1/2 of the in-flight traces to be assigned to a different node whenever the
+ # number of nodes changes.
+ # The legacy strategy is deprecated and is intended to be removed in a future release.
+ # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the
+ # number of nodes) are disrupted when the node count changes.
+ # Not eligible for live reload.
+ Strategy: "hash"
+
+ ###########################################################
+ ###### File (Suitable only for VM based deployments ######
+ ###### and single replica k8s deployments) ######
+ ###########################################################
+ Type: "file"
+
+ # Peers is the list of all servers participating in this proxy cluster. Events
+ # will be sharded evenly across all peers based on the Trace ID. Values here
+ # should be the base URL used to access the peer, and should include scheme,
+ # hostname (or ip address) and port. All servers in the cluster should be in
+ # this list, including this host.
+ Peers: [
+ "http://127.0.0.1:8084", #only grpc peer listener used
+ ]
+ ###########################################################
+
+ ###########################################################
+ ###### Redis (Suitable for all types of deployments) ######
+ ###########################################################
+ ## The type should always be redis when deployed to Kubernetes environments
+ #Type: "redis"
+
+ ## RedisHost is used to connect to redis for peer cluster membership management.
+ ## Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes
+ ## precedence and this value is ignored.
+ ## Not eligible for live reload.
+ ## RedisHost will default to the name used for the release or name overrides depending on what is used,
+ ## but can be overriden to a specific value.
+ #RedisHost: '{{include "opsramp-tracing-proxy.redis.fullname" .}}:6379'
+
+ ## RedisUsername is the username used to connect to redis for peer cluster membership management.
+ ## If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes
+ ## precedence and this value is ignored.
+ ## Not eligible for live reload.
+ #RedisUsername: ""
+
+ ## RedisPassword is the password used to connect to redis for peer cluster membership management.
+ ## If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes
+ ## precedence and this value is ignored.
+ ## Not eligible for live reload.
+ #RedisPassword: ""
+
+ ## RedisPrefix is a string used as a prefix for the keys in redis while storing
+ ## the peer membership. It might be useful to set this in any situation where
+ ## multiple trace-proxy clusters or multiple applications want to share a single
+ ## Redis instance. It may not be blank.
+ #RedisPrefix: "tracing-proxy"
+
+ ## RedisDatabase is an integer from 0-15 indicating the database number to use
+ ## for the Redis instance storing the peer membership. It might be useful to set
+ ## this in any situation where multiple trace-proxy clusters or multiple
+ ## applications want to share a single Redis instance.
+ #RedisDatabase: 0
+
+ ## UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2.
+ ## Not eligible for live reload.
+ #UseTLS: false
+
+ ## UseTLSInsecure disables certificate checks
+ ## Not eligible for live reload.
+ #UseTLSInsecure: false
+
+ ## IdentifierInterfaceName is optional.
+ ## Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name.
+ ## When configured the pod's IP will be used in the peer list
+ #IdentifierInterfaceName: eth0
+
+ ## UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first
+ ## IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use
+ ## the first IPV6 unicast address found.
+ #UseIPV6Identifier: false
+ ###########################################################
+
+ # LogrusLogger is a section of the config only used if you are using the
+ # LogrusLogger to send all logs to STDOUT using the logrus package.
+ LogrusLogger:
+ # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"]
+ LogFormatter: 'json'
+ # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"]
+ LogOutput: 'stdout'
+
+ MetricsConfig:
+ # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp
+ Enable: true
+
+ # ListenAddr determines the interface and port on which Prometheus will
+ # listen for requests for /metrics. Must be different from the main Trace Proxy
+ # listener.
+ ListenAddr: '0.0.0.0:2112'
+
+ # OpsRampAPI is the URL for the upstream OpsRamp API.
+ OpsRampAPI: ""
+
+ # ReportingInterval is the frequency specified in seconds at which
+ # the metrics are collected and sent to OpsRamp
+ ReportingInterval: 10
+
+ # MetricsList is a list of regular expressions which match the metric
+ # names. Keep the list as small as possible since too many regular expressions can lead to bad performance.
+ # Internally, all the items in the list are concatenated using '|' to make the computation faster.
+ MetricsList: [ ".*" ]
+
+ GRPCServerParameters:
+ # MaxConnectionIdle is a duration for the amount of time after which an
+ # idle connection would be closed by sending a GoAway. Idleness duration is
+ # defined since the most recent time the number of outstanding RPCs became
+ # zero or the connection establishment.
+ # 0s sets duration to infinity which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219
+ # MaxConnectionIdle: "1m"
+
+ # MaxConnectionAge is a duration for the maximum amount of time a
+ # connection may exist before it will be closed by sending a GoAway. A
+ # random jitter of +/-10% will be added to MaxConnectionAge to spread out
+ # connection storms.
+ # 0s sets duration to infinity which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222
+ # MaxConnectionAge: "0s"
+
+ # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
+ # which the connection will be forcibly closed.
+ # 0s sets duration to infinity which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227
+ # MaxConnectionAgeGrace: "0s"
+
+ # After a duration of this time if the server doesn't see any activity it
+ # pings the client to see if the transport is still alive.
+ # If set below 1s, a minimum value of 1s will be used instead.
+ # 0s sets duration to 2 hours which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230
+ # Time: "10s"
+
+ # After having pinged for keepalive check, the server waits for a duration
+ # of Timeout and if no activity is seen even after that the connection is
+ # closed.
+ # 0s sets duration to 20 seconds which is the default:
+ # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233
+ # Timeout: "2s"
+
+ ################################
+ ## Sample Cache Configuration ##
+ ################################
+
+ # Sample Cache Configuration controls the sample cache used to retain information about trace
+ # status after the sampling decision has been made.
+ SampleCacheConfig:
+ # Type controls the type of sample cache used.
+ # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is
+ # 5x the size of the trace cache. This is tracing proxy's original sample cache strategy.
+ # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember
+ # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces.
+ # It is also more configurable. The cuckoo filter is recommended for most installations.
+ # Default is "legacy".
+ # Type: "cuckoo"
+
+ # KeptSize controls the number of traces preserved in the cuckoo kept traces cache.
+ # tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some
+ # statistical information. This is most useful in cases where the trace was sent before sending
+ # the root span, so that the root span can be decorated with accurate metadata.
+ # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes).
+ # It Does not apply to the "legacy" type of cache.
+ # KeptSize: 10_000
+
+ # DroppedSize controls the size of the cuckoo dropped traces cache.
+ # This cache consumes 4-6 bytes per trace at a scale of millions of traces.
+ # Changing its size with live reload sets a future limit, but does not have an immediate effect.
+ # Default is 1_000_000 traces.
+ # It Does not apply to the "legacy" type of cache.
+ # DroppedSize: 1_000_000
+
+ # SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates
+ # the remaining capacity of its dropped traces cache and possibly cycles it.
+ # This cache is quite resilient so it doesn't need to happen very often, but the
+ # operation is also inexpensive.
+ # Default is 10 seconds.
+ # It Does not apply to the "legacy" type of cache.
+ # SizeCheckInterval: "10s"
diff --git a/build/kubernetes/yaml/k8s-deployment.yaml b/build/kubernetes/yaml/k8s-deployment.yaml
new file mode 100644
index 0000000000..95f5a91010
--- /dev/null
+++ b/build/kubernetes/yaml/k8s-deployment.yaml
@@ -0,0 +1,94 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: opsramp-tracing-proxy
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: opsramp-tracing-proxy
+ namespace: opsramp-tracing-proxy
+ labels:
+ app: opsramp-tracing-proxy
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: opsramp-tracing-proxy
+ template:
+ metadata:
+ labels:
+ app: opsramp-tracing-proxy
+ spec:
+ containers:
+ - name: opsramp-tracing-proxy
+ image: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy
+ imagePullPolicy: Always
+ command:
+ - "/usr/bin/tracing-proxy"
+ - "-c"
+ - "/etc/tracing-proxy/config.yaml"
+ - "-r"
+ - "/etc/tracing-proxy/rules.yaml"
+ ports:
+ - name: http
+ containerPort: 8082
+ protocol: TCP
+ - name: grpc
+ containerPort: 9090
+ protocol: TCP
+ - name: peer
+ containerPort: 8083
+ protocol: TCP
+ - containerPort: 8084
+ name: grpc-peer
+ resources:
+ requests:
+ memory: "2048Mi"
+ cpu: "2"
+ limits:
+ memory: "8096Mi"
+ cpu: "4"
+ volumeMounts:
+ - name: opsramp-tracing-rules
+ mountPath: /etc/tracing-proxy/rules.yaml
+ subPath: rules.yaml
+ readOnly: true
+ - name: opsramp-tracing-config
+ mountPath: /etc/tracing-proxy/config.yaml
+ subPath: config.yaml
+ readOnly: true
+ volumes:
+ - configMap:
+ name: opsramp-tracing-proxy-rules
+ name: opsramp-tracing-rules
+ - configMap:
+ name: opsramp-tracing-proxy-config
+ name: opsramp-tracing-config
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: opsramp-tracing-proxy
+ namespace: opsramp-tracing-proxy
+spec:
+ selector:
+ app: opsramp-tracing-proxy
+ ports:
+ - protocol: TCP
+ port: 9090
+ targetPort: 9090
+ name: grpc
+ - protocol: TCP
+ port: 8082
+ targetPort: 8082
+ name: http
+ - protocol: TCP
+ port: 8083
+ targetPort: 8083
+ name: peer
+ - protocol: TCP
+ port: 8084
+ targetPort: 8084
+ name: grpc-peer
\ No newline at end of file
diff --git a/build/kubernetes/yaml/k8s-rules-cm.yaml b/build/kubernetes/yaml/k8s-rules-cm.yaml
new file mode 100644
index 0000000000..14df8f1cd4
--- /dev/null
+++ b/build/kubernetes/yaml/k8s-rules-cm.yaml
@@ -0,0 +1,225 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: opsramp-tracing-proxy
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: opsramp-tracing-proxy-rules
+ labels:
+ name: opsramp-tracing-proxy-rules
+ namespace: opsramp-tracing-proxy
+data:
+ rules.yaml: |-
+ ############################
+ ## Sampling Rules Config ##
+ ############################
+
+ # DryRun - If enabled, marks traces that would be dropped given current sampling rules,
+ # and sends all traces regardless
+ DryRun: true
+
+ # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to trace_proxy_kept
+ DryRunFieldName: trace_proxy_kept
+
+ # DeterministicSampler is a section of the config for manipulating the
+ # Deterministic Sampler implementation. This is the simplest sampling algorithm
+ # - it is a static sample rate, choosing traces randomly to either keep or send
+ # (at the appropriate rate). It is not influenced by the contents of the trace.
+ Sampler: DeterministicSampler
+
+ # SampleRate is the rate at which to sample. It indicates a ratio, where one
+ # sample trace is kept for every n traces seen. For example, a SampleRate of 30
+ # will keep 1 out of every 30 traces. The choice on whether to keep any specific
+ # trace is random, so the rate is approximate.
+ # Eligible for live reload.
+ SampleRate: 1
+
+ #dataset1:
+ #
+ # # Note: If your dataset name contains a space, you will have to escape the dataset name
+ # # using single quotes, such as ['dataset 1']
+ #
+ # # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler
+ # # implementation. This sampler collects the values of a number of fields from a
+ # # trace and uses them to form a key. This key is handed to the standard dynamic
+ # # sampler algorithm which generates a sample rate based on the frequency with
+ # # which that key has appeared in the previous ClearFrequencySec seconds.This
+ # # sampler uses the AvgSampleRate algorithm from
+ # # that package.
+ # Sampler: DynamicSampler
+ #
+ # # SampleRate is the goal rate at which to sample. It indicates a ratio, where
+ # # one sample trace is kept for every n traces seen. For example, a SampleRate of
+ # # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic
+ # # sampler, who assigns a sample rate for each trace based on the fields selected
+ # # from that trace.
+ # SampleRate: 2
+ #
+ # # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler.
+ # # The combination of values from all of these fields should reflect how interesting the trace is compared to
+ # # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for
+ # # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent
+ # # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of
+ # # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is
+ # # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a
+ # # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can
+ # # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling
+ # # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint.
+ # # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of
+ # # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces.
+ # # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore
+ # # interesting traces, like traces that experienced a `500`, might not be sampled.
+ # # Field names may come from any span in the trace.
+ # FieldList:
+ # - ""
+ #
+ # # UseTraceLength will add the number of spans in the trace in to the dynamic
+ # # sampler as part of the key. The number of spans is exact, so if there are
+ # # normally small variations in trace length you may want to leave this off. If
+ # # traces are consistent lengths and changes in trace length is a useful
+ # # indicator of traces you'd like to see in OpsRamp, set this to true.
+ # UseTraceLength: true
+ #
+ # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field
+ # # to the root span of the trace containing the key used by the sampler to decide
+ # # the sample rate. This can be helpful in understanding why the sampler is
+ # # making certain decisions about sample rate and help you understand how to
+ # # better choose the sample rate key (aka the FieldList setting above) to use.
+ # AddSampleRateKeyToTrace: true
+ #
+ # # AddSampleRateKeyToTraceField is the name of the field the sampler will use
+ # # when adding the sample rate key to the trace. This setting is only used when
+ # # AddSampleRateKeyToTrace is true.
+ # AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key
+ #
+ # # ClearFrequencySec is the name of the field the sampler will use to determine
+ # # the period over which it will calculate the sample rate. This setting defaults
+ # # to 30.
+ # ClearFrequencySec: 60
+ #dataset2:
+ #
+ # # EMADynamicSampler is a section of the config for manipulating the Exponential
+ # # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler,
+ # # it attempts to average a given sample rate, weighting rare traffic and frequent
+ # # traffic differently so as to end up with the correct average.
+ # #
+ # # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended
+ # # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs
+ # # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential
+ # # Moving Average of counts seen per key, and adjusts this average at regular intervals.
+ # # The weight applied to more recent intervals is defined by `weight`, a number between
+ # # (0, 1) - larger values weight the average more toward recent observations. In other words,
+ # # a larger weight will cause sample rates more quickly adapt to traffic patterns,
+ # # while a smaller weight will result in sample rates that are less sensitive to bursts or drops
+ # # in traffic and thus more consistent over time.
+ # #
+ # # Keys that are not found in the EMA will always have a sample
+ # # rate of 1. Keys that occur more frequently will be sampled on a logarithmic
+ # # curve. In other words, every key will be represented at least once in any
+ # # given window and more frequent keys will have their sample rate
+ # # increased proportionally to wind up with the goal sample rate.
+ # Sampler: EMADynamicSampler
+ #
+ # # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where
+ # # one sample trace is kept for every n traces seen. For example, a SampleRate of
+ # # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic
+ # # sampler, who assigns a sample rate for each trace based on the fields selected
+ # # from that trace.
+ # GoalSampleRate: 2
+ #
+ # # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler.
+ # # The combination of values from all of these fields should reflect how interesting the trace is compared to
+ # # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for
+ # # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent
+ # # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of
+ # # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is
+ # # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a
+ # # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can
+ # # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling
+ # # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint.
+ # # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of
+ # # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces.
+ # # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore
+ # # interesting traces, like traces that experienced a `500`, might not be sampled.
+ # # Field names may come from any span in the trace.
+ # FieldList: []
+ #
+ # # UseTraceLength will add the number of spans in the trace in to the dynamic
+ # # sampler as part of the key. The number of spans is exact, so if there are
+ # # normally small variations in trace length you may want to leave this off. If
+ # # traces are consistent lengths and changes in trace length is a useful
+ # # indicator of traces you'd like to see in OpsRamp, set this to true.
+ # UseTraceLength: true
+ #
+ # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field
+ # # to the root span of the trace containing the key used by the sampler to decide
+ # # the sample rate. This can be helpful in understanding why the sampler is
+ # # making certain decisions about sample rate and help you understand how to
+ # # better choose the sample rate key (aka the FieldList setting above) to use.
+ # AddSampleRateKeyToTrace: true
+ #
+ # # AddSampleRateKeyToTraceField is the name of the field the sampler will use
+ # # when adding the sample rate key to the trace. This setting is only used when
+ # # AddSampleRateKeyToTrace is true.
+ # AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key
+ #
+ # # AdjustmentInterval defines how often (in seconds) we adjust the moving average from
+ # # recent observations. Default 15s
+ # AdjustmentInterval: 15
+ #
+ # # Weight is a value between (0, 1) indicating the weighting factor used to adjust
+ # # the EMA. With larger values, newer data will influence the average more, and older
+ # # values will be factored out more quickly. In mathematical literature concerning EMA,
+ # # this is referred to as the `alpha` constant.
+ # # Default is 0.5
+ # Weight: 0.5
+ #
+ # # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA.
+ # # Once MaxKeys is reached, new keys will not be included in the sample rate map, but
+ # # existing keys will continue to be be counted. You can use this to keep the sample rate
+ # # map size under control.
+ # MaxKeys: 0
+ #
+ # # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key
+ # # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to
+ # # decide what constitutes "zero". Keys with averages below this threshold will be removed
+ # # from the EMA. Default is the same as Weight, as this prevents a key with the smallest
+ # # integer value (1) from being aged out immediately. This value should generally be <= Weight,
+ # # unless you have very specific reasons to set it higher.
+ # AgeOutValue: 0.5
+ #
+ # # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define
+ # # the burst detection threshold. If total counts observed for a given interval exceed the threshold
+ # # EMA is updated immediately, rather than waiting on the AdjustmentInterval.
+ # # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles,
+ # # burst detection will kick in.
+ # BurstMultiple: 2
+ #
+ # # BurstDetectionDelay indicates the number of intervals to run after Start is called before
+ # # burst detection kicks in.
+ # # Defaults to 3
+ # BurstDetectionDelay: 3
+ #dataset3:
+ # Sampler: DeterministicSampler
+ # SampleRate: 10
+ #dataset4:
+ # Sampler: RulesBasedSampler
+ # CheckNestedFields: false
+ # rule:
+ # # Rule name
+ # - name: ""
+ # # Drop Condition (examples: true, false)
+ # drop:
+ # condition:
+ # # Field Name (example: status_code)
+ # - field: ""
+ # # Operator Value (example: =)
+ # operator: ""
+ # # Field Value (example: 500)
+ # value: ""
+ #dataset5:
+ # Sampler: TotalThroughputSampler
+ # GoalThroughputPerSec: 100
+ # FieldList: ''
diff --git a/build/vm/configure.go b/build/vm/configure.go
new file mode 100644
index 0000000000..630e1b7a5b
--- /dev/null
+++ b/build/vm/configure.go
@@ -0,0 +1,76 @@
+package main
+
+import (
+ "flag"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+)
+
+const ServiceName = "tracing-proxy.service"
+
+func main() {
+ configFile, err := os.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ api := flag.String("A", "", "API for Authorization")
+ key := flag.String("K", "", "OpsRamp Key")
+ secret := flag.String("S", "", "OpsRamp Secret")
+ tenant := flag.String("T", "", "OpsRamp TenantID")
+ tracesAPI := flag.String("B", "", "API to Sent Traces (Defaults to Authorization API specified using -A flag if not set)")
+ metricsAPI := flag.String("M", "", "API To Send Metrics (Defaults to Authorization API specified using -A flag if not set)")
+ flag.Parse()
+
+ if *api == "" {
+ log.Fatal("api cant be empty, please specify using -A flag")
+ }
+ if *key == "" {
+ log.Fatal("key cant be empty, please specify using -K flag")
+ }
+ if *secret == "" {
+ log.Fatal("secret cant be empty, please specify using -S flag")
+ }
+ if *tenant == "" {
+ log.Fatal("tenant cant be empty, please specify using -T flag")
+ }
+ if *tracesAPI == "" {
+ *tracesAPI = *api
+ }
+ if *metricsAPI == "" {
+ *metricsAPI = *api
+ }
+
+ fileContent := string(configFile)
+ fileContent = strings.ReplaceAll(fileContent, "", *api)
+ fileContent = strings.ReplaceAll(fileContent, "", *tracesAPI)
+ fileContent = strings.ReplaceAll(fileContent, "", *metricsAPI)
+ fileContent = strings.ReplaceAll(fileContent, "", *key)
+ fileContent = strings.ReplaceAll(fileContent, "", *secret)
+ fileContent = strings.ReplaceAll(fileContent, "", *tenant)
+
+ if err = os.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml", []byte(fileContent), 600); err != nil {
+ log.Fatal(err)
+ }
+
+ // Enable and start with fallback
+ if err := exec.Command("systemctl", "enable", "--now", ServiceName).Run(); err != nil {
+ _ = exec.Command("systemctl", "start", ServiceName).Run()
+ _ = exec.Command("systemctl", "enable", ServiceName).Run()
+ }
+
+ time.Sleep(5 * time.Second)
+
+ // Check if the services are enabled and started properly and attempt again
+ if output, err := exec.Command("systemctl", "is-enabled", ServiceName).Output(); err != nil || string(output) != "enabled" {
+ _ = exec.Command("systemctl", "enable", ServiceName).Run()
+ }
+ if output, err := exec.Command("systemctl", "is-active", ServiceName).Output(); err != nil || string(output) != "active" {
+ _ = exec.Command("systemctl", "start", ServiceName).Run()
+ } else {
+ log.Println("Tracing-Proxy Started Successfully")
+ }
+}
diff --git a/build/vm/package_directories/etc/systemd/system/tracing-proxy.service b/build/vm/package_directories/etc/systemd/system/tracing-proxy.service
new file mode 100644
index 0000000000..09cffbca6a
--- /dev/null
+++ b/build/vm/package_directories/etc/systemd/system/tracing-proxy.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=tracing-proxy OpsRamp Trace-Aware Sampling Proxy
+After=network.target
+
+[Service]
+ExecStart=/opt/opsramp/tracing-proxy/bin/tracing-proxy -c /opt/opsramp/tracing-proxy/conf/config_complete.yaml -r /opt/opsramp/tracing-proxy/conf/rules_complete.yaml
+KillMode=process
+Restart=on-failure
+LimitNOFILE=infinity
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml
new file mode 100644
index 0000000000..2087f8ba92
--- /dev/null
+++ b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml
@@ -0,0 +1,421 @@
+########################
+## Trace Proxy Config ##
+########################
+
+# ListenAddr is the IP and port on which to listen for incoming events. Incoming
+# traffic is expected to be HTTP, so if using SSL put something like nginx in
+# front to do the TLS Termination.
+ListenAddr: 0.0.0.0:8082
+
+# GRPCListenAddr is the IP and port on which to listen for incoming events over
+# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in
+# front to do the TLS Termination.
+GRPCListenAddr: 0.0.0.0:9090
+
+# PeerListenAddr is the IP and port on which to listen for traffic being
+# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL
+# put something like nginx in front to do the decryption. Must be different from
+# ListenAddr
+PeerListenAddr: 0.0.0.0:8083
+
+GRPCPeerListenAddr: 0.0.0.0:8084
+
+# CompressPeerCommunication determines whether to compress span data
+# it forwards to peers. If it costs money to transmit data between different
+# instances (e.g. they're spread across AWS availability zones), then you
+# almost certainly want compression enabled to reduce your bill. The option to
+# disable it is provided as an escape hatch for deployments that value lower CPU
+# utilization over data transfer costs.
+CompressPeerCommunication: true
+
+# OpsrampAPI is the URL for the upstream Opsramp API.
+OpsrampAPI: ""
+
+# Dataset you want to use for sampling
+Dataset: "ds"
+
+#Tls Options
+UseTls: true
+UseTlsInsecure: false
+
+# LoggingLevel valid options are "debug", "info", "error", and "panic".
+LoggingLevel: error
+
+# SendDelay is a short timer that will be triggered when a trace is complete.
+# Trace Proxy will wait for this duration before actually sending the trace. The
+# reason for this short delay is to allow for small network delays or clock
+# jitters to elapse and any final spans to arrive before actually sending the
+# trace. This supports duration strings with supplied units. Set to 0 for
+# immediate sends.
+SendDelay: 2s
+
+# BatchTimeout dictates how frequently to send unfulfilled batches. By default
+# this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms.
+# Eligible for live reload.
+BatchTimeout: 1s
+
+# TraceTimeout is a long timer; it represents the outside boundary of how long
+# to wait before sending an incomplete trace. Normally traces are sent when the
+# root span arrives. Sometimes the root span never arrives (due to crashes or
+# whatever), and this timer will send a trace even without having received the
+# root span. If you have particularly long-lived traces you should increase this
+# timer. This supports duration strings with supplied units.
+TraceTimeout: 60s
+
+# MaxBatchSize is the number of events to be included in the batch for sending
+MaxBatchSize: 500
+
+# SendTicker is a short timer; it determines the duration to use to check for traces to send
+SendTicker: 100ms
+
+# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use
+# when buffering events that will be forwarded to peers or the upstream API.
+UpstreamBufferSize: 1000
+PeerBufferSize: 1000
+
+# AddHostMetadataToTrace determines whether to add information about
+# the host that tracing proxy is running on to the spans that it processes.
+# If enabled, information about the host will be added to each span with the
+# key 'meta.local_hostname'.
+AddHostMetadataToTrace: false
+
+# AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics
+# the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"}
+# max number of additional keys supported is 5, if the limit exceeds then we considered the first 5
+# based on sorted order of keys
+# "app" label is mandatory
+AddAdditionalMetadata: { "app": "default" }
+
+# EnvironmentCacheTTL is the amount of time a cache entry will live that associates
+# an API key with an environment name.
+# Cache misses lookup the environment name using OpsRampAPI config value.
+# Default is 1 hour ("1h").
+EnvironmentCacheTTL: "1h"
+
+# QueryAuthToken, if specified, provides a token that must be specified with
+# the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed.
+# These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and
+# are not typically needed in normal operation.
+# Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN.
+# If left unspecified, the /query endpoints are inaccessible.
+# QueryAuthToken: "some-random-value"
+
+# AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which
+# contains text indicating which rule was evaluated that caused the trace to be included.
+AddRuleReasonToTrace: true
+
+# AdditionalErrorFields should be a list of span fields that should be included when logging
+# errors that happen during ingestion of events (for example, the span too large error).
+# This is primarily useful in trying to track down misbehaving senders in a large installation.
+# The fields `dataset`, `apihost`, and `environment` are always included.
+# If a field is not present in the span, it will not be present in the error log.
+# Default is ["trace.span_id"].
+AdditionalErrorFields:
+ - trace.span_id
+
+# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate
+# the number of child spans on the trace at the time the sampling decision was made.
+# This value is available to the rules-based sampler, making it possible to write rules that
+# are dependent upon the number of spans in the trace.
+# Default is false.
+AddSpanCountToRoot: false
+
+# CacheOverrunStrategy controls the cache management behavior under memory pressure.
+# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again,
+# which is generally not helpful unless it occurs because of a permanent change in traffic patterns.
+# In the "impact" strategy, the items having the most impact on the cache size are
+# ejected from the cache earlier than normal but the cache is not resized.
+# In all cases, it only applies if MaxAlloc is nonzero.
+# Default is "resize" for compatibility but "impact" is recommended for most installations.
+CacheOverrunStrategy: "impact"
+
+#########################
+## Retry Configuration ##
+#########################
+RetryConfiguration:
+ # InitialInterval the time to wait after the first failure before retrying.
+ InitialInterval: 500ms
+ # RandomizationFactor is a random factor used to calculate next backoff
+ # Randomized interval = RetryInterval * (1 Âą RandomizationFactor)
+ RandomizationFactor: 0.5
+ # Multiplier is the value multiplied by the backoff interval bounds
+ Multiplier: 1.5
+ # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between
+ # consecutive retries will always be `MaxInterval`.
+ MaxInterval: 60s
+ # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request.
+ # Once this value is reached, the data is discarded.
+ MaxElapsedTime: 15m
+
+#########################
+## Proxy Configuration ##
+#########################
+ProxyConfiguration:
+ # Protocol accepts http and https
+ Protocol: "http"
+ # Host takes the proxy server address
+ Host: ""
+ # Port takes the proxy server port
+ Port: 3128
+ # UserName takes the proxy username
+ Username: ""
+ # Password takes the proxy password
+ Password: ""
+
+##################################
+## Authentication Configuration ##
+##################################
+AuthConfiguration:
+ # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made
+ Endpoint: ""
+ # Key - authentication key provided in OpsRamp Portal
+ Key: ""
+ # Secret - authentication Secret provided in OpsRamp Portal
+ Secret: ""
+ # TenantId - tenant/client id to which the traces are to be posted
+ TenantId: ""
+
+############################
+## Implementation Choices ##
+############################
+# Each of the config options below chooses an implementation of a Trace Proxy
+# component to use. Depending on the choice, there may be more configuration
+# required below in the section for that choice. Changing implementation choices
+# requires a process restart.
+# Collector describes which collector to use for collecting traces. The only
+# current valid option is "InMemCollector". More can be added by adding
+# implementations of the Collector interface.
+Collector: "InMemCollector"
+
+# InMemCollector brings together all the settings that are relevant to
+# collecting spans together to make traces.
+InMemCollector:
+
+ # The collection cache is used to collect all spans into a trace as well as
+ # remember the sampling decision for any spans that might come in after the
+ # trace has been marked "complete" (either by timing out or seeing the root
+ # span). The number of traces in the cache should be many multiples (100x to
+ # 1000x) of the total number of concurrently active traces (trace throughput *
+ # trace duration).
+ CacheCapacity: 1000
+
+ # MaxAlloc is optional. If set, it must be an integer >= 0.
+ # If set to a non-zero value, once per tick (see SendTicker) the collector
+ # will compare total allocated bytes to this value. If allocation is too
+ # high, cache capacity will be reduced and an error will be logged.
+ # Useful values for this setting are generally in the range of 75%-90% of
+ # available system memory. Using 80% is the recommended.
+ # This value should be set in according to the resources.limits.memory
+ # By default that setting is 4GB, and this is set to 85% of that limit
+ # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837
+ # MaxAlloc: 3435973836
+ MaxAlloc: 0
+
+#####################
+## Peer Management ##
+#####################
+
+# Configure how OpsRamp-Tracing-Proxy peers are discovered and managed
+PeerManagement:
+ # Strategy controls the way that traces are assigned to Trace Proxy nodes.
+ # The "legacy" strategy uses a simple algorithm that unfortunately causes
+ # 1/2 of the in-flight traces to be assigned to a different node whenever the
+ # number of nodes changes.
+ # The legacy strategy is deprecated and is intended to be removed in a future release.
+ # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the
+ # number of nodes) are disrupted when the node count changes.
+ # Not eligible for live reload.
+ Strategy: "hash"
+
+ ###########################################################
+ ###### File (Suitable only for VM based deployments) ######
+ ###########################################################
+ Type: "file"
+
+ # Peers is the list of all servers participating in this proxy cluster. Events
+ # will be sharded evenly across all peers based on the Trace ID. Values here
+ # should be the base URL used to access the peer, and should include scheme,
+ # hostname (or ip address) and port. All servers in the cluster should be in
+ # this list, including this host.
+ Peers: [
+ "http://127.0.0.1:8084", #only grpc peer listener used
+ # "http://127.0.0.1:8083",
+ # "http://10.1.2.3.4:8080",
+ # "http://tracing-proxy-1231:8080",
+ # "http://peer-3.fqdn" // assumes port 80
+ ]
+ ###########################################################
+
+ ###########################################################
+ ###### Redis (Suitable for all types of deployments) ######
+ ###########################################################
+ # The type should always be redis when deployed to Kubernetes environments
+ # Type: "redis"
+ #
+ # # RedisHost is used to connect to redis for peer cluster membership management.
+ # # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes
+ # # precedence and this value is ignored.
+ # # Not eligible for live reload.
+ # # RedisHost will default to the name used for the release or name overrides depending on what is used,
+ # # but can be overriden to a specific value.
+ # RedisHost: 0.0.0.0:22122
+ #
+ # # RedisUsername is the username used to connect to redis for peer cluster membership management.
+ # # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes
+ # # precedence and this value is ignored.
+ # # Not eligible for live reload.
+ # RedisUsername: ""
+ #
+ # # RedisPassword is the password used to connect to redis for peer cluster membership management.
+ # # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes
+ # # precedence and this value is ignored.
+ # # Not eligible for live reload.
+ # RedisPassword: ""
+ #
+ # # RedisPrefix is a string used as a prefix for the keys in redis while storing
+ # # the peer membership. It might be useful to set this in any situation where
+ # # multiple trace-proxy clusters or multiple applications want to share a single
+ # # Redis instance. It may not be blank.
+ # RedisPrefix: "tracing-proxy"
+ #
+ # # RedisDatabase is an integer from 0-15 indicating the database number to use
+ # # for the Redis instance storing the peer membership. It might be useful to set
+ # # this in any situation where multiple trace-proxy clusters or multiple
+ # # applications want to share a single Redis instance.
+ # RedisDatabase: 0
+
+ # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2.
+ # Not eligible for live reload.
+ UseTLS: false
+
+ # UseTLSInsecure disables certificate checks
+ # Not eligible for live reload.
+ UseTLSInsecure: false
+
+ # IdentifierInterfaceName is optional.
+ # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name.
+ # When configured the pod's IP will be used in the peer list
+ # IdentifierInterfaceName: eth0
+
+ # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first
+ # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use
+ # the first IPV6 unicast address found.
+ UseIPV6Identifier: false
+ ###########################################################
+
+# LogrusLogger is a section of the config only used if you are using the
+# LogrusLogger to send all logs to STDOUT using the logrus package.
+LogrusLogger:
+ # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"]
+ LogFormatter: 'logfmt'
+ # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"]
+ LogOutput: 'file'
+
+ # specifies configs for logs when LogOutput is set to "file"
+ File:
+ # FileName specifies the location where the logs are supposed be stored
+ FileName: "/var/log/opsramp/tracing-proxy.log"
+ # MaxSize is the maximum size in megabytes of the log file before it gets rotated.
+ MaxSize: 1
+ # MaxBackups is the maximum number of old log files to retain.
+ MaxBackups: 3
+ # Compress determines if the rotated log files should be compressed
+ # using gzip.
+ Compress: true
+
+MetricsConfig:
+ # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp
+ Enable: true
+
+ # ListenAddr determines the interface and port on which Prometheus will
+ # listen for requests for /metrics. Must be different from the main Trace Proxy
+ # listener.
+ ListenAddr: '0.0.0.0:2112'
+
+ # OpsRampAPI is the URL for the upstream OpsRamp API.
+ OpsRampAPI: ""
+
+ # ReportingInterval is the frequency specified in seconds at which
+ # the metrics are collected and sent to OpsRamp
+ ReportingInterval: 10
+
+ # MetricsList is a list of regular expressions which match the metric
+ # names. Keep the list as small as possible since too many regular expressions can lead to bad performance.
+ # Internally, all the items in the list are concatenated using '|' to make the computation faster.
+ MetricsList: [ ".*" ]
+
+GRPCServerParameters:
+# MaxConnectionIdle is a duration for the amount of time after which an
+# idle connection would be closed by sending a GoAway. Idleness duration is
+# defined since the most recent time the number of outstanding RPCs became
+# zero or the connection establishment.
+# 0s sets duration to infinity which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219
+# MaxConnectionIdle: "1m"
+
+# MaxConnectionAge is a duration for the maximum amount of time a
+# connection may exist before it will be closed by sending a GoAway. A
+# random jitter of +/-10% will be added to MaxConnectionAge to spread out
+# connection storms.
+# 0s sets duration to infinity which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222
+# MaxConnectionAge: "0s"
+
+# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
+# which the connection will be forcibly closed.
+# 0s sets duration to infinity which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227
+# MaxConnectionAgeGrace: "0s"
+
+# After a duration of this time if the server doesn't see any activity it
+# pings the client to see if the transport is still alive.
+# If set below 1s, a minimum value of 1s will be used instead.
+# 0s sets duration to 2 hours which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230
+# Time: "10s"
+
+# After having pinged for keepalive check, the server waits for a duration
+# of Timeout and if no activity is seen even after that the connection is
+# closed.
+# 0s sets duration to 20 seconds which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233
+# Timeout: "2s"
+
+################################
+## Sample Cache Configuration ##
+################################
+
+# Sample Cache Configuration controls the sample cache used to retain information about trace
+# status after the sampling decision has been made.
+SampleCacheConfig:
+# Type controls the type of sample cache used.
+# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is
+# 5x the size of the trace cache. This is tracing proxy's original sample cache strategy.
+# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember
+# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces.
+# It is also more configurable. The cuckoo filter is recommended for most installations.
+# Default is "legacy".
+# Type: "cuckoo"
+
+# KeptSize controls the number of traces preserved in the cuckoo kept traces cache.
+# tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some
+# statistical information. This is most useful in cases where the trace was sent before sending
+# the root span, so that the root span can be decorated with accurate metadata.
+# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes).
+# It Does not apply to the "legacy" type of cache.
+# KeptSize: 10_000
+
+# DroppedSize controls the size of the cuckoo dropped traces cache.
+# This cache consumes 4-6 bytes per trace at a scale of millions of traces.
+# Changing its size with live reload sets a future limit, but does not have an immediate effect.
+# Default is 1_000_000 traces.
+# It Does not apply to the "legacy" type of cache.
+# DroppedSize: 1_000_000
+
+# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates
+# the remaining capacity of its dropped traces cache and possibly cycles it.
+# This cache is quite resilient so it doesn't need to happen very often, but the
+# operation is also inexpensive.
+# Default is 10 seconds.
+# It Does not apply to the "legacy" type of cache.
+# SizeCheckInterval: "10s"
diff --git a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/rules_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/rules_complete.yaml
new file mode 100644
index 0000000000..b6065605fc
--- /dev/null
+++ b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/rules_complete.yaml
@@ -0,0 +1,211 @@
+############################
+## Sampling Rules Config ##
+############################
+
+# DryRun - If enabled, marks traces that would be dropped given current sampling rules,
+# and sends all traces regardless
+DryRun: false
+
+# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to trace_proxy_kept
+DryRunFieldName: trace_proxy_kept
+
+# DeterministicSampler is a section of the config for manipulating the
+# Deterministic Sampler implementation. This is the simplest sampling algorithm
+# - it is a static sample rate, choosing traces randomly to either keep or send
+# (at the appropriate rate). It is not influenced by the contents of the trace.
+Sampler: DeterministicSampler
+
+# SampleRate is the rate at which to sample. It indicates a ratio, where one
+# sample trace is kept for every n traces seen. For example, a SampleRate of 30
+# will keep 1 out of every 30 traces. The choice on whether to keep any specific
+# trace is random, so the rate is approximate.
+# Eligible for live reload.
+SampleRate: 1
+
+#dataset1:
+#
+# # Note: If your dataset name contains a space, you will have to escape the dataset name
+# # using single quotes, such as ['dataset 1']
+#
+# # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler
+# # implementation. This sampler collects the values of a number of fields from a
+# # trace and uses them to form a key. This key is handed to the standard dynamic
+# # sampler algorithm which generates a sample rate based on the frequency with
+# # which that key has appeared in the previous ClearFrequencySec seconds.This
+# # sampler uses the AvgSampleRate algorithm from
+# # that package.
+# Sampler: DynamicSampler
+#
+# # SampleRate is the goal rate at which to sample. It indicates a ratio, where
+# # one sample trace is kept for every n traces seen. For example, a SampleRate of
+# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic
+# # sampler, who assigns a sample rate for each trace based on the fields selected
+# # from that trace.
+# SampleRate: 2
+#
+# # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler.
+# # The combination of values from all of these fields should reflect how interesting the trace is compared to
+# # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for
+# # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent
+# # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of
+# # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is
+# # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a
+# # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can
+# # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling
+# # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint.
+# # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of
+# # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces.
+# # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore
+# # interesting traces, like traces that experienced a `500`, might not be sampled.
+# # Field names may come from any span in the trace.
+# FieldList:
+# - ""
+#
+# # UseTraceLength will add the number of spans in the trace in to the dynamic
+# # sampler as part of the key. The number of spans is exact, so if there are
+# # normally small variations in trace length you may want to leave this off. If
+# # traces are consistent lengths and changes in trace length is a useful
+# # indicator of traces you'd like to see in OpsRamp, set this to true.
+# UseTraceLength: true
+#
+# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field
+# # to the root span of the trace containing the key used by the sampler to decide
+# # the sample rate. This can be helpful in understanding why the sampler is
+# # making certain decisions about sample rate and help you understand how to
+# # better choose the sample rate key (aka the FieldList setting above) to use.
+# AddSampleRateKeyToTrace: true
+#
+# # AddSampleRateKeyToTraceField is the name of the field the sampler will use
+# # when adding the sample rate key to the trace. This setting is only used when
+# # AddSampleRateKeyToTrace is true.
+# AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key
+#
+# # ClearFrequencySec is the name of the field the sampler will use to determine
+# # the period over which it will calculate the sample rate. This setting defaults
+# # to 30.
+# ClearFrequencySec: 60
+#dataset2:
+#
+# # EMADynamicSampler is a section of the config for manipulating the Exponential
+# # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler,
+# # it attempts to average a given sample rate, weighting rare traffic and frequent
+# # traffic differently so as to end up with the correct average.
+# #
+# # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended
+# # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs
+# # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential
+# # Moving Average of counts seen per key, and adjusts this average at regular intervals.
+# # The weight applied to more recent intervals is defined by `weight`, a number between
+# # (0, 1) - larger values weight the average more toward recent observations. In other words,
+# # a larger weight will cause sample rates more quickly adapt to traffic patterns,
+# # while a smaller weight will result in sample rates that are less sensitive to bursts or drops
+# # in traffic and thus more consistent over time.
+# #
+# # Keys that are not found in the EMA will always have a sample
+# # rate of 1. Keys that occur more frequently will be sampled on a logarithmic
+# # curve. In other words, every key will be represented at least once in any
+# # given window and more frequent keys will have their sample rate
+# # increased proportionally to wind up with the goal sample rate.
+# Sampler: EMADynamicSampler
+#
+# # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where
+# # one sample trace is kept for every n traces seen. For example, a SampleRate of
+# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic
+# # sampler, who assigns a sample rate for each trace based on the fields selected
+# # from that trace.
+# GoalSampleRate: 2
+#
+# # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler.
+# # The combination of values from all of these fields should reflect how interesting the trace is compared to
+# # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for
+# # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent
+# # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of
+# # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is
+# # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a
+# # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can
+# # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling
+# # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint.
+# # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of
+# # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces.
+# # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore
+# # interesting traces, like traces that experienced a `500`, might not be sampled.
+# # Field names may come from any span in the trace.
+# FieldList: []
+#
+# # UseTraceLength will add the number of spans in the trace in to the dynamic
+# # sampler as part of the key. The number of spans is exact, so if there are
+# # normally small variations in trace length you may want to leave this off. If
+# # traces are consistent lengths and changes in trace length is a useful
+# # indicator of traces you'd like to see in Opsramp, set this to true.
+# UseTraceLength: true
+#
+# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field
+# # to the root span of the trace containing the key used by the sampler to decide
+# # the sample rate. This can be helpful in understanding why the sampler is
+# # making certain decisions about sample rate and help you understand how to
+# # better choose the sample rate key (aka the FieldList setting above) to use.
+# AddSampleRateKeyToTrace: true
+#
+# # AddSampleRateKeyToTraceField is the name of the field the sampler will use
+# # when adding the sample rate key to the trace. This setting is only used when
+# # AddSampleRateKeyToTrace is true.
+# AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key
+#
+# # AdjustmentInterval defines how often (in seconds) we adjust the moving average from
+# # recent observations. Default 15s
+# AdjustmentInterval: 15
+#
+# # Weight is a value between (0, 1) indicating the weighting factor used to adjust
+# # the EMA. With larger values, newer data will influence the average more, and older
+# # values will be factored out more quickly. In mathematical literature concerning EMA,
+# # this is referred to as the `alpha` constant.
+# # Default is 0.5
+# Weight: 0.5
+#
+# # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA.
+# # Once MaxKeys is reached, new keys will not be included in the sample rate map, but
+# # existing keys will continue to be be counted. You can use this to keep the sample rate
+# # map size under control.
+# MaxKeys: 0
+#
+# # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key
+# # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to
+# # decide what constitutes "zero". Keys with averages below this threshold will be removed
+# # from the EMA. Default is the same as Weight, as this prevents a key with the smallest
+# # integer value (1) from being aged out immediately. This value should generally be <= Weight,
+# # unless you have very specific reasons to set it higher.
+# AgeOutValue: 0.5
+#
+# # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define
+# # the burst detection threshold. If total counts observed for a given interval exceed the threshold
+# # EMA is updated immediately, rather than waiting on the AdjustmentInterval.
+# # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles,
+# # burst detection will kick in.
+# BurstMultiple: 2
+#
+# # BurstDetectionDelay indicates the number of intervals to run after Start is called before
+# # burst detection kicks in.
+# # Defaults to 3
+# BurstDetectionDelay: 3
+#dataset3:
+# Sampler: DeterministicSampler
+# SampleRate: 10
+#dataset4:
+# Sampler: RulesBasedSampler
+# CheckNestedFields: false
+# rule:
+# # Rule name
+# - name: ""
+# # Drop Condition (examples: true, false)
+# drop:
+# condition:
+# # Field Name (example: status_code)
+# - field: ""
+# # Operator Value (example: =)
+# operator: ""
+# # Field Value (example: 500)
+# value: ""
+#dataset5:
+# Sampler: TotalThroughputSampler
+# GoalThroughputPerSec: 100
+# FieldList: ''
diff --git a/build/vm/tracing-deb/script.sh b/build/vm/tracing-deb/script.sh
new file mode 100755
index 0000000000..feb67d304c
--- /dev/null
+++ b/build/vm/tracing-deb/script.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+# $1 is a version of the package
+Version=$1
+if [[ -z "$Version" ]]; then
+ Version=$VERSION_TAG
+fi
+
+BUILD_DIR="."
+
+if [ "$IS_GITHUB_ACTION" = "true" ]; then
+ BUILD_DIR="build/vm/tracing-deb"
+fi
+
+sed -i "/^Version/s/:.*$/: ${Version}/g" $BUILD_DIR/tracing/DEBIAN/control
+
+architecture=$(uname -m)
+if [ "$architecture" = "x86_64" ]; then
+ architecture='amd64'
+fi
+
+sed -i "/^Architecture/s/:.*$/: ${architecture}/g" $BUILD_DIR/tracing/DEBIAN/control
+
+# remove old data
+rm -rf $BUILD_DIR/output
+
+# Updating the files
+mkdir -p $BUILD_DIR/tracing/opt/opsramp/tracing-proxy/bin
+mkdir -p $BUILD_DIR/tracing/opt/opsramp/tracing-proxy/conf
+mkdir -p $BUILD_DIR/tracing/etc/systemd/system
+
+cp -r $BUILD_DIR/../package_directories/* $BUILD_DIR/tracing/
+
+# Building a static binaries
+CGO_ENABLED=0 \
+ GOOS=linux \
+ GOARCH=amd64 \
+ go build -ldflags "-X main.BuildID=${Version}" \
+ -o $BUILD_DIR/tracing-proxy \
+ $BUILD_DIR/../../../cmd/tracing-proxy/main.go
+
+CGO_ENABLED=0 \
+ GOOS=linux \
+ GOARCH=amd64 \
+ go build -ldflags "-X main.BuildID=${Version}" \
+ -o $BUILD_DIR/configure \
+ $BUILD_DIR/../configure.go
+
+cp $BUILD_DIR/tracing-proxy $BUILD_DIR/tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy
+cp $BUILD_DIR/configure $BUILD_DIR/tracing/opt/opsramp/tracing-proxy/bin/configure
+
+dpkg -b $BUILD_DIR/tracing
+
+# Rename the package with version and architecture
+packageName="tracing-proxy_"${architecture}"-"${Version}".deb"
+mkdir -p $BUILD_DIR/output
+mv $BUILD_DIR/tracing.deb $BUILD_DIR/output/"${packageName}"
+
+# Cleanup
+rm -rf $BUILD_DIR/tracing/opt
+rm -rf $BUILD_DIR/tracing/etc
+rm -rf $BUILD_DIR/configure $BUILD_DIR/tracing-proxy
diff --git a/build/vm/tracing-deb/tracing/DEBIAN/conffiles b/build/vm/tracing-deb/tracing/DEBIAN/conffiles
new file mode 100644
index 0000000000..2efffe9130
--- /dev/null
+++ b/build/vm/tracing-deb/tracing/DEBIAN/conffiles
@@ -0,0 +1,2 @@
+/opt/opsramp/tracing-proxy/conf/config_complete.yaml
+/opt/opsramp/tracing-proxy/conf/rules_complete.yaml
diff --git a/build/vm/tracing-deb/tracing/DEBIAN/control b/build/vm/tracing-deb/tracing/DEBIAN/control
new file mode 100644
index 0000000000..5c33427362
--- /dev/null
+++ b/build/vm/tracing-deb/tracing/DEBIAN/control
@@ -0,0 +1,7 @@
+Package: tracing-proxy
+Version: 1.1.0
+Architecture: amd64
+Essential: no
+Priority: optional
+Maintainer: sai kalyan
+Description: This is tracing proxy debian package
diff --git a/build/vm/tracing-deb/tracing/DEBIAN/postinst b/build/vm/tracing-deb/tracing/DEBIAN/postinst
new file mode 100755
index 0000000000..8a3134ca69
--- /dev/null
+++ b/build/vm/tracing-deb/tracing/DEBIAN/postinst
@@ -0,0 +1,9 @@
+mkdir -p /var/log/opsramp
+touch /var/log/opsramp/tracing-proxy.log
+chmod 644 /etc/systemd/system/tracing-proxy.service
+chmod 600 /opt/opsramp/tracing-proxy/conf
+chmod 600 /opt/opsramp/tracing-proxy/conf/config_complete.yaml
+chmod 600 /opt/opsramp/tracing-proxy/conf/rules_complete.yaml
+chmod 744 /opt/opsramp/tracing-proxy/bin
+chmod 744 /opt/opsramp/tracing-proxy/bin/configure
+chmod 744 /opt/opsramp/tracing-proxy/bin/tracing-proxy
\ No newline at end of file
diff --git a/build/vm/tracing-deb/tracing/DEBIAN/prerm b/build/vm/tracing-deb/tracing/DEBIAN/prerm
new file mode 100755
index 0000000000..7bca94d9ef
--- /dev/null
+++ b/build/vm/tracing-deb/tracing/DEBIAN/prerm
@@ -0,0 +1,10 @@
+echo "Uninstalling Tracing Proxy"
+systemctl stop tracing-proxy
+systemctl disable tracing-proxy
+if [ -f /etc/systemd/system/tracing-proxy.service ]; then
+ rm -rf /etc/systemd/system/tracing-proxy.service > /dev/null 2>&1
+fi
+rm -rf /opt/opsramp/tracing-proxy
+systemctl daemon-reload
+systemctl reset-failed tracing-proxy.service > /dev/null 2>&1
+echo "Uninstalled Tracing Proxy Successfully"
diff --git a/build/vm/tracing-rpm/script.sh b/build/vm/tracing-rpm/script.sh
new file mode 100755
index 0000000000..36d46b1d19
--- /dev/null
+++ b/build/vm/tracing-rpm/script.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+yum -y install rpmdevtools
+rpmdev-setuptree
+
+BUILD_DIR="."
+
+if [ "$IS_GITHUB_ACTION" = "true" ]; then
+ BUILD_DIR="build/vm/tracing-rpm"
+fi
+
+Release=$(uname -m)
+sed -i "/^\%define release/s/^.*$/\%define release ${Release}/g" $BUILD_DIR/tracing-proxy.spec
+# $1 is a version of the package
+Version=$1
+if [[ -z "$Version" ]]; then
+ Version=$VERSION_TAG
+fi
+sed -i "/^\%define version/s/^.*$/\%define version ${Version}/g" $BUILD_DIR/tracing-proxy.spec
+
+# Building a static binaries
+CGO_ENABLED=0 \
+ GOOS=linux \
+ GOARCH=amd64 \
+ go build -ldflags "-X main.BuildID=${Version}" \
+ -o $BUILD_DIR/tracing-proxy \
+ $BUILD_DIR/../../../cmd/tracing-proxy/main.go
+
+CGO_ENABLED=0 \
+ GOOS=linux \
+ GOARCH=amd64 \
+ go build -ldflags "-X main.BuildID=${Version}" \
+ -o $BUILD_DIR/configure \
+ $BUILD_DIR/../configure.go
+
+package_name="tracing-proxy-${Version}"
+mkdir -p ${package_name}/opt/opsramp/tracing-proxy/bin/
+cp -r $BUILD_DIR/../package_directories/* ${package_name}
+mv $BUILD_DIR/configure ${package_name}/opt/opsramp/tracing-proxy/bin/configure
+mv $BUILD_DIR/tracing-proxy ${package_name}/opt/opsramp/tracing-proxy/bin/tracing-proxy
+
+tar -czvf ${package_name}.tar.gz ${package_name}
+
+mv ${package_name}.tar.gz /root/rpmbuild/SOURCES/
+cp $BUILD_DIR/tracing-proxy.spec /root/rpmbuild/SPECS/tracing-proxy.spec
+
+rpmbuild -ba --clean /root/rpmbuild/SPECS/tracing-proxy.spec
+
+echo "***** rpm package can be found in /root/rpmbuild/RPMS/x86_64/ ****"
+
+# CleanUp
+rm -rf ${package_name}
+rm -rf $BUILD_DIR/configure $BUILD_DIR/tracing-proxy
diff --git a/build/vm/tracing-rpm/tracing-proxy.spec b/build/vm/tracing-rpm/tracing-proxy.spec
new file mode 100644
index 0000000000..fb0042b59f
--- /dev/null
+++ b/build/vm/tracing-rpm/tracing-proxy.spec
@@ -0,0 +1,60 @@
+# SPEC file for creating tracing-proxy RPM
+
+%define name tracing-proxy
+%define release
+%define version 1.1.0
+
+Summary: Tracing Proxy
+License: OpsRamp
+Name: %{name}
+Version: %{version}
+Source0: %{name}-%{version}.tar.gz
+Release: %{release}
+Provides: tracing-proxy
+BuildRequires: bash
+
+%description
+Tracing Proxy
+
+%prep
+%setup -q -n %{name}-%{version}
+
+%install
+%__rm -rf %{buildroot}
+install -p -d -m 0755 %{buildroot}/opt/opsramp/tracing-proxy/bin
+install -p -d -m 0755 %{buildroot}/opt/opsramp/tracing-proxy/conf
+install -p -d -m 0755 %{buildroot}/etc/systemd/system
+install -m 0744 opt/opsramp/tracing-proxy/bin/tracing-proxy %{buildroot}/opt/opsramp/tracing-proxy/bin/
+install -m 0744 opt/opsramp/tracing-proxy/bin/configure %{buildroot}/opt/opsramp/tracing-proxy/bin
+install -m 0600 opt/opsramp/tracing-proxy/conf/config_complete.yaml %{buildroot}/opt/opsramp/tracing-proxy/conf/
+install -m 0600 opt/opsramp/tracing-proxy/conf/rules_complete.yaml %{buildroot}/opt/opsramp/tracing-proxy/conf/
+install -m 0644 etc/systemd/system/tracing-proxy.service %{buildroot}/etc/systemd/system
+
+%clean
+%__rm -rf %{buildroot}
+
+%files
+/opt/opsramp/tracing-proxy/bin/
+/opt/opsramp/tracing-proxy/conf/
+/etc/systemd/system/tracing-proxy.service
+
+
+%post -p /bin/bash
+mkdir -p /var/log/opsramp
+touch /var/log/opsramp/tracing-proxy.log
+systemctl start tracing-proxy
+
+
+%preun -p /bin/bash
+echo "Uninstalling Tracing Proxy"
+systemctl stop tracing-proxy
+systemctl disable tracing-proxy
+
+%postun -p /bin/bash
+%__rm -rf /opt/opsramp/tracing-proxy
+if [ -f /etc/systemd/system/tracing-proxy.service ]; then
+ %__rm -rf /etc/systemd/system/tracing-proxy.service > /dev/null 2>&1
+fi
+systemctl daemon-reload
+systemctl reset-failed tracing-proxy.service > /dev/null 2>&1
+echo "Uninstalled Tracing Proxy Successfully"
diff --git a/cmd/refinery/main.go b/cmd/refinery/main.go
deleted file mode 100644
index 6f21ad836d..0000000000
--- a/cmd/refinery/main.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package main
-
-import (
- "fmt"
- "net"
- "net/http"
- "os"
- "os/signal"
- "syscall"
- "time"
-
- libhoney "github.com/honeycombio/libhoney-go"
- "github.com/honeycombio/libhoney-go/transmission"
- statsd "gopkg.in/alexcesaro/statsd.v2"
-
- "github.com/facebookgo/inject"
- "github.com/facebookgo/startstop"
- flag "github.com/jessevdk/go-flags"
- "github.com/sirupsen/logrus"
-
- "github.com/honeycombio/refinery/app"
- "github.com/honeycombio/refinery/collect"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/internal/peer"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/sample"
- "github.com/honeycombio/refinery/service/debug"
- "github.com/honeycombio/refinery/sharder"
- "github.com/honeycombio/refinery/transmit"
-)
-
-// set by travis.
-var BuildID string
-var version string
-
-type Options struct {
- ConfigFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/refinery/refinery.toml"`
- RulesFile string `short:"r" long:"rules_config" description:"Path to rules config file" default:"/etc/refinery/rules.toml"`
- Version bool `short:"v" long:"version" description:"Print version number and exit"`
- Debug bool `short:"d" long:"debug" description:"If enabled, runs debug service (runs on the first open port between localhost:6060 and :6069 by default)"`
-}
-
-func main() {
- var opts Options
- flagParser := flag.NewParser(&opts, flag.Default)
- if extraArgs, err := flagParser.Parse(); err != nil || len(extraArgs) != 0 {
- fmt.Println("command line parsing error - call with --help for usage")
- os.Exit(1)
- }
-
- if BuildID == "" {
- version = "dev"
- } else {
- version = BuildID
- }
-
- if opts.Version {
- fmt.Println("Version: " + version)
- os.Exit(0)
- }
-
- a := app.App{
- Version: version,
- }
-
- c, err := config.NewConfig(opts.ConfigFile, opts.RulesFile, func(err error) {
- if a.Logger != nil {
- a.Logger.Error().WithField("error", err).Logf("error reloading config")
- }
- })
- if err != nil {
- fmt.Printf("unable to load config: %+v\n", err)
- os.Exit(1)
- }
-
- peers, err := peer.NewPeers(c)
-
- if err != nil {
- fmt.Printf("unable to load peers: %+v\n", err)
- os.Exit(1)
- }
-
- // get desired implementation for each dependency to inject
- lgr := logger.GetLoggerImplementation(c)
- collector := collect.GetCollectorImplementation(c)
- metricsr := metrics.GetMetricsImplementation(c)
- shrdr := sharder.GetSharderImplementation(c)
- samplerFactory := &sample.SamplerFactory{}
-
- // set log level
- logLevel, err := c.GetLoggingLevel()
- if err != nil {
- fmt.Printf("unable to get logging level from config: %v\n", err)
- os.Exit(1)
- }
- if err := lgr.SetLevel(logLevel); err != nil {
- fmt.Printf("unable to set logging level: %v\n", err)
- os.Exit(1)
- }
-
- // upstreamTransport is the http transport used to send things on to Honeycomb
- upstreamTransport := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 10 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 15 * time.Second,
- }
-
- // peerTransport is the http transport used to send things to a local peer
- peerTransport := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 3 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 1200 * time.Millisecond,
- }
-
- sdUpstream, _ := statsd.New(statsd.Prefix("refinery.upstream"))
- sdPeer, _ := statsd.New(statsd.Prefix("refinery.peer"))
-
- userAgentAddition := "refinery/" + version
- upstreamClient, err := libhoney.NewClient(libhoney.ClientConfig{
- Transmission: &transmission.Honeycomb{
- MaxBatchSize: 500,
- BatchTimeout: libhoney.DefaultBatchTimeout,
- MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches,
- PendingWorkCapacity: uint(c.GetUpstreamBufferSize()),
- UserAgentAddition: userAgentAddition,
- Transport: upstreamTransport,
- BlockOnSend: true,
- EnableMsgpackEncoding: true,
- Metrics: sdUpstream,
- },
- })
- if err != nil {
- fmt.Printf("unable to initialize upstream libhoney client")
- os.Exit(1)
- }
-
- peerClient, err := libhoney.NewClient(libhoney.ClientConfig{
- Transmission: &transmission.Honeycomb{
- MaxBatchSize: 500,
- BatchTimeout: libhoney.DefaultBatchTimeout,
- MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches,
- PendingWorkCapacity: uint(c.GetPeerBufferSize()),
- UserAgentAddition: userAgentAddition,
- Transport: peerTransport,
- // gzip compression is expensive, and peers are most likely close to each other
- // so we can turn off gzip when forwarding to peers
- DisableGzipCompression: true,
- EnableMsgpackEncoding: true,
- Metrics: sdPeer,
- },
- })
- if err != nil {
- fmt.Printf("unable to initialize upstream libhoney client")
- os.Exit(1)
- }
-
- var g inject.Graph
- err = g.Provide(
- &inject.Object{Value: c},
- &inject.Object{Value: peers},
- &inject.Object{Value: lgr},
- &inject.Object{Value: upstreamTransport, Name: "upstreamTransport"},
- &inject.Object{Value: peerTransport, Name: "peerTransport"},
- &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: upstreamClient, Name: "upstream_"}, Name: "upstreamTransmission"},
- &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: peerClient, Name: "peer_"}, Name: "peerTransmission"},
- &inject.Object{Value: shrdr},
- &inject.Object{Value: collector},
- &inject.Object{Value: metricsr},
- &inject.Object{Value: version, Name: "version"},
- &inject.Object{Value: samplerFactory},
- &inject.Object{Value: &a},
- )
- if err != nil {
- fmt.Printf("failed to provide injection graph. error: %+v\n", err)
- os.Exit(1)
- }
-
- if opts.Debug {
- err = g.Provide(&inject.Object{Value: &debug.DebugService{Config: c}})
- if err != nil {
- fmt.Printf("failed to provide injection graph. error: %+v\n", err)
- os.Exit(1)
- }
- }
-
- if err := g.Populate(); err != nil {
- fmt.Printf("failed to populate injection graph. error: %+v\n", err)
- os.Exit(1)
- }
-
- // the logger provided to startstop must be valid before any service is
- // started, meaning it can't rely on injected configs. make a custom logger
- // just for this step
- ststLogger := logrus.New()
- level, _ := logrus.ParseLevel(logLevel)
- ststLogger.SetLevel(level)
-
- defer startstop.Stop(g.Objects(), ststLogger)
- if err := startstop.Start(g.Objects(), ststLogger); err != nil {
- fmt.Printf("failed to start injected dependencies. error: %+v\n", err)
- os.Exit(1)
- }
-
- // set up signal channel to exit
- sigsToExit := make(chan os.Signal, 1)
- signal.Notify(sigsToExit, syscall.SIGINT, syscall.SIGTERM)
-
- // block on our signal handler to exit
- sig := <-sigsToExit
- a.Logger.Error().Logf("Caught signal \"%s\"", sig)
-}
diff --git a/cmd/test_redimem/main.go b/cmd/test_redimem/main.go
deleted file mode 100644
index 6448d0d196..0000000000
--- a/cmd/test_redimem/main.go
+++ /dev/null
@@ -1,290 +0,0 @@
-package main
-
-// this test is an exercise against an actual redis instance to see the redimem
-// package work as expected.
-
-import (
- "context"
- "math/rand"
- "sync"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/garyburd/redigo/redis"
- "github.com/sirupsen/logrus"
-
- "github.com/honeycombio/refinery/internal/redimem"
-)
-
-func main() {
- rand.Seed(time.Now().UnixNano())
-
- // tick := time.NewTicker(time.Second)
- // for t := range tick.C {
- // logrus.Info("Current time: ", t)
- // }
-
- logrus.SetLevel(logrus.WarnLevel)
-
- pool := &redis.Pool{
- MaxIdle: 3,
- MaxActive: 30,
- IdleTimeout: 5 * time.Minute,
- Wait: true,
- Dial: func() (redis.Conn, error) {
- return redis.Dial(
- "tcp", "localhost:6379",
- redis.DialReadTimeout(1*time.Second),
- redis.DialConnectTimeout(1*time.Second),
- redis.DialDatabase(0), // TODO enable multiple databases for multiple samproxies
- )
- },
- }
-
- rm := &redimem.RedisMembership{
- Prefix: "test_redimem",
- Pool: pool,
- }
-
- wg := sync.WaitGroup{}
- for i := 0; i < 500; i++ {
- wg.Add(1)
- go func() {
- singleTestRandomLength(10, 5, rm)
- wg.Done()
- }()
- time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
- }
- wg.Wait()
-}
-
-// singleTestRandomLength will register then re-register an entry some number of
-// times up to limit. It will check the entire time to verify that the entry is
-// still there then that it goes away when it's supposed to at the end. The
-// intent is to call this function in multiple goroutines to watch a whole slew
-// of entries start and stop.
-func singleTestRandomLength(limit, registerDurLimitSec int, rm *redimem.RedisMembership) {
- // numTimes will be the number of times to re-register this entry before
- // letting it expire
- numTimes := rand.Intn(limit) + 1
- // registerDur will be the duration in milliseconds to register this entry
- registerDur := rand.Intn(registerDurLimitSec*1000) + 1000
- // reregisterFreq is how frequently we should re-register the entry
- reregisterFreq := registerDur / 2
- // done will let this function know when the entry is done being reregistered
- done := make(chan struct{})
- // name is a random string used to register this entry
- name := GenID(12)
-
- ctx := context.Background()
-
- // register the entry once to make sure it's there before the first check runs
- logrus.WithFields(logrus.Fields{
- "registerDur": registerDur,
- "name": name,
- "numTimes": numTimes,
- }).Info("registering entry")
- rm.Register(ctx, name, time.Duration(registerDur)*time.Millisecond)
-
- // register the entry and then re-register it numTimes
- go func() {
- ticker := time.NewTicker(time.Duration(reregisterFreq) * time.Millisecond)
- var i int
- for range ticker.C {
- i = i + 1
- logrus.WithField("name", name).Debug("re-registering entry")
- rm.Register(ctx, name, time.Duration(registerDur)*time.Millisecond)
- if i >= numTimes {
- break
- }
- }
- done <- struct{}{}
- }()
-
- // watch for the entry to appear, then check that it's still there until it's
- // time for it to go away, then verify it went away.
- func() {
- var i int
- SHOULDEXIST:
- for {
- i = i + 1
- // exit out of this for loop when we get a message from the done channel
- select {
- case <-done:
- break SHOULDEXIST
- default:
- }
- // check that name is registered
- var found bool
- list, err := rm.GetMembers(ctx)
- if err != nil {
- logrus.WithError(err).WithFields(logrus.Fields{
- "name": name,
- "numEntries": len(list),
- "iteration": i,
- }).Warn("caught error from get members")
- }
- for _, entry := range list {
- if entry == name {
- found = true
- // logrus.WithField("name", name).Info("shouldexist: found entry")
- break
- }
- }
- if !found {
- logrus.WithFields(logrus.Fields{
- "name": name,
- "numEntries": len(list),
- "iteration": i,
- }).Warn("shouldexist: Failed to find entry")
- }
- // pause between each check
- time.Sleep(100 * time.Millisecond)
- }
- // ok, we hit the last registration. We should expect to find the name for
- // another registerDur and then it should go away
- timer := time.NewTimer(time.Duration(registerDur) * time.Millisecond)
- startLastIter := time.Now()
-
- i = 0
- LASTITER:
- for {
- i = i + 1
- select {
- case <-timer.C:
- // ok, now we should expect it to go away
- break LASTITER
- default:
- }
- // check that we find the entry
- var found bool
- list, err := rm.GetMembers(ctx)
- if err != nil {
- logrus.WithError(err).WithFields(logrus.Fields{
- "name": name,
- "numEntries": len(list),
- "iteration": i,
- }).Warn("in lastiter caught error from get members")
- }
- for _, entry := range list {
- if entry == name {
- found = true
- // logrus.WithField("name", name).Info("lastiter: found entry")
- break
- }
- }
- if !found {
- dur := time.Since(startLastIter)
- logrus.WithFields(logrus.Fields{
- "name": name,
- "numEntries": len(list),
- "timeInLastIterMs": float64(dur / time.Millisecond),
- "expectedDurMs": registerDur,
- "deltaExpire": float64(registerDur) - float64(dur/time.Millisecond),
- }).Info("lastiter: Entry vanished")
- if float64(registerDur)-float64(dur/time.Millisecond) > 1600 {
- logrus.WithFields(logrus.Fields{
- "iteration": i,
- "name": name,
- "numEntries": len(list),
- "timeInLastIterMs": float64(dur / time.Millisecond),
- "expectedDurMs": registerDur,
- "deltaExpire": float64(registerDur) - float64(dur/time.Millisecond),
- }).Warn("delta exceeded 1.6 seconds - out of bounds of expected expiration")
- }
- break
- }
- time.Sleep(50 * time.Millisecond)
- }
-
- // ok, we're beyond the duration of the last registration interval; now or
- // very soon we should see the entry disappear.
- i = 0
- for {
- // check that we find the entry
- var found bool
- list, err := rm.GetMembers(ctx)
- if err != nil {
- logrus.WithError(err).WithFields(logrus.Fields{
- "name": name,
- "numEntries": len(list),
- "iteration": i,
- }).Warn("in endgame caught error from get members")
- }
- for _, entry := range list {
- if entry == name {
- found = true
- break
- }
- }
- if !found {
- // we're done, the register is gone
- logrus.WithField("count", i).WithField("name", name).Infof("entry now gone")
- break
- }
- if i > 100 {
- logrus.WithField("name", name).Warn("entry still exists after 100 checks")
- }
- time.Sleep(10 * time.Millisecond)
- }
- logrus.WithField("name", name).Infof("all done checking entry")
- }()
-
-}
-
-// adds two entries with various sleeps and verifies they're there at the
-// expected times
-func linearTest(rm *redimem.RedisMembership) {
- ctx := context.Background()
- logrus.Infoln("about to register one for 3sec")
- rm.Register(ctx, "one", 3*time.Second)
-
- logrus.Infoln("about to sleep for 2sec")
- time.Sleep(2 * time.Second)
-
- logrus.Infoln("checking for one")
- list, _ := rm.GetMembers(ctx)
- spew.Dump(list)
-
- logrus.Infoln("about to register two for 3sec")
- rm.Register(ctx, "two", 3*time.Second)
-
- logrus.Infoln("checking for one and two")
- list, _ = rm.GetMembers(ctx)
- spew.Dump(list)
-
- logrus.Infoln("about to sleep for 1.5sec")
- time.Sleep(1500 * time.Millisecond)
-
- logrus.Infoln("checking list; one should be missing, two should be there")
- list, _ = rm.GetMembers(ctx)
- spew.Dump(list)
-
- logrus.Infoln("about to re-register two for 3sec")
- rm.Register(ctx, "two", 3*time.Second)
-
- logrus.Infoln("about to sleep for 2sec")
- time.Sleep(2 * time.Second)
-
- logrus.Infoln("checking list; one should be missing, two should be there")
- list, _ = rm.GetMembers(ctx)
- spew.Dump(list)
-
- logrus.Infoln("about to sleep for 1.5sec")
- time.Sleep(1500 * time.Millisecond)
-
- logrus.Infoln("checking list; both should be missing")
- list, _ = rm.GetMembers(ctx)
- spew.Dump(list)
-}
-
-const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
-
-// GenID returns a random string of length numChars
-func GenID(numChars int) string {
- id := make([]byte, numChars)
- for i := 0; i < numChars; i++ {
- id[i] = charset[rand.Intn(len(charset))]
- }
- return string(id)
-}
diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go
new file mode 100644
index 0000000000..526bbe703a
--- /dev/null
+++ b/cmd/tracing-proxy/main.go
@@ -0,0 +1,265 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "github.com/facebookgo/inject"
+ "github.com/facebookgo/startstop"
+ flag "github.com/jessevdk/go-flags"
+ "github.com/opsramp/libtrace-go"
+ "github.com/opsramp/libtrace-go/transmission"
+ "github.com/opsramp/tracing-proxy/app"
+ "github.com/opsramp/tracing-proxy/collect"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/internal/peer"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/sample"
+ "github.com/opsramp/tracing-proxy/service/debug"
+ "github.com/opsramp/tracing-proxy/sharder"
+ "github.com/opsramp/tracing-proxy/transmit"
+ "net"
+ "net/http"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+)
+
+// set by travis.
+var BuildID string
+var CollectorVersion string
+
+type Options struct {
+ ConfigFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/tracing-proxy/config.toml"`
+ RulesFile string `short:"r" long:"rules_config" description:"Path to rules config file" default:"/etc/tracing-proxy/rules.toml"`
+ Version bool `short:"v" long:"version" description:"Print version number and exit"`
+ Debug bool `short:"d" long:"debug" description:"If enabled, runs debug service (runs on the first open port between localhost:6060 and :6069 by default)"`
+ InterfaceNames bool `long:"interface-names" description:"If set, print system's network interface names and exit."`
+}
+
+func main() {
+ var opts Options
+ flagParser := flag.NewParser(&opts, flag.Default)
+ if extraArgs, err := flagParser.Parse(); err != nil || len(extraArgs) != 0 {
+ fmt.Println("command line parsing error - call with --help for usage")
+ os.Exit(1)
+ }
+
+ if BuildID == "" {
+ CollectorVersion = "dev"
+ } else {
+ CollectorVersion = BuildID
+ }
+
+ if opts.Version {
+ fmt.Println("Version: " + CollectorVersion)
+ os.Exit(0)
+ }
+
+ if opts.InterfaceNames {
+ ifaces, err := net.Interfaces()
+ if err != nil {
+ fmt.Printf("Error: %s\n", err)
+ os.Exit(1)
+ }
+ for _, i := range ifaces {
+ fmt.Println(i.Name)
+ }
+ os.Exit(0)
+ }
+
+ a := app.App{
+ Version: CollectorVersion,
+ }
+
+ c, err := config.NewConfig(opts.ConfigFile, opts.RulesFile, func(err error) {
+ if a.Logger != nil {
+ a.Logger.Error().WithField("error", err).Logf("error reloading config")
+ }
+ })
+ if err != nil {
+ fmt.Printf("unable to load config: %+v\n", err)
+ os.Exit(1)
+ }
+
+ // get desired implementation for each dependency to inject
+ lgr := logger.GetLoggerImplementation()
+ collector := collect.GetCollectorImplementation(c)
+ metricsConfig := metrics.GetMetricsImplementation("")
+ shrdr := sharder.GetSharderImplementation(c)
+ samplerFactory := &sample.SamplerFactory{}
+
+ // set log level
+ logLevel, err := c.GetLoggingLevel()
+ if err != nil {
+ fmt.Printf("unable to get logging level from config: %v\n", err)
+ os.Exit(1)
+ }
+ logrusLogger := lgr.Init()
+ if err := lgr.SetLevel(logLevel); err != nil {
+ fmt.Printf("unable to set logging level: %v\n", err)
+ os.Exit(1)
+ }
+
+ // upstreamTransport is the http transport used to send things on to OpsRamp
+ upstreamTransport := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 10 * time.Second,
+ }).DialContext,
+ TLSHandshakeTimeout: 15 * time.Second,
+ }
+
+ // peerTransport is the http transport used to send things to a local peer
+ peerTransport := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 3 * time.Second,
+ }).DialContext,
+ TLSHandshakeTimeout: 1200 * time.Millisecond,
+ }
+
+ upstreamMetricsConfig := metrics.GetMetricsImplementation("upstream")
+ peerMetricsConfig := metrics.GetMetricsImplementation("peer")
+
+ authConfig := c.GetAuthConfig()
+ opsrampAPI, err := c.GetOpsrampAPI()
+ if err != nil {
+ logrusLogger.Fatal(err)
+ }
+ dataset, err := c.GetDataset()
+ if err != nil {
+ logrusLogger.Fatal(err)
+ }
+ retryConfig := c.GetRetryConfig()
+
+ userAgentAddition := "tracing-proxy/" + CollectorVersion
+ upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{
+ Transmission: &transmission.TraceProxy{
+ MaxBatchSize: c.GetMaxBatchSize(),
+ BatchTimeout: c.GetBatchTimeout(),
+ MaxConcurrentBatches: libtrace.DefaultMaxConcurrentBatches,
+ PendingWorkCapacity: uint(c.GetUpstreamBufferSize()),
+ UserAgentAddition: userAgentAddition,
+ Transport: upstreamTransport,
+ BlockOnSend: true,
+ EnableMsgpackEncoding: false,
+ Metrics: upstreamMetricsConfig,
+ IsPeer: false,
+ UseTls: c.GetGlobalUseTLS(),
+ UseTlsInsecure: c.GetGlobalUseTLSInsecureSkip(),
+ AuthTokenEndpoint: authConfig.Endpoint,
+ AuthTokenKey: authConfig.Key,
+ AuthTokenSecret: authConfig.Secret,
+ ApiHost: opsrampAPI,
+ TenantId: authConfig.TenantId,
+ Dataset: dataset,
+ RetrySettings: &transmission.RetrySettings{
+ InitialInterval: retryConfig.InitialInterval,
+ RandomizationFactor: retryConfig.RandomizationFactor,
+ Multiplier: retryConfig.Multiplier,
+ MaxInterval: retryConfig.MaxInterval,
+ MaxElapsedTime: retryConfig.MaxElapsedTime,
+ },
+ },
+ })
+ if err != nil {
+ fmt.Printf("unable to initialize upstream libtrace client: %v", err)
+ os.Exit(1)
+ }
+
+ peerClient, err := libtrace.NewClient(libtrace.ClientConfig{
+ Transmission: &transmission.TraceProxy{
+ MaxBatchSize: c.GetMaxBatchSize(),
+ BatchTimeout: c.GetBatchTimeout(),
+ MaxConcurrentBatches: libtrace.DefaultMaxConcurrentBatches,
+ PendingWorkCapacity: uint(c.GetPeerBufferSize()),
+ UserAgentAddition: userAgentAddition,
+ Transport: peerTransport,
+ DisableCompression: !c.GetCompressPeerCommunication(),
+ EnableMsgpackEncoding: false,
+ Metrics: peerMetricsConfig,
+ IsPeer: true,
+ AuthTokenEndpoint: authConfig.Endpoint,
+ AuthTokenKey: authConfig.Key,
+ AuthTokenSecret: authConfig.Secret,
+ ApiHost: opsrampAPI,
+ TenantId: authConfig.TenantId,
+ Dataset: dataset,
+ RetrySettings: &transmission.RetrySettings{
+ InitialInterval: retryConfig.InitialInterval,
+ RandomizationFactor: retryConfig.RandomizationFactor,
+ Multiplier: retryConfig.Multiplier,
+ MaxInterval: retryConfig.MaxInterval,
+ MaxElapsedTime: retryConfig.MaxElapsedTime,
+ },
+ },
+ })
+ if err != nil {
+ fmt.Printf("unable to initialize upstream libtrace client: %v", err)
+ os.Exit(1)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), c.GetPeerTimeout())
+ defer cancel()
+ done := make(chan struct{})
+ peers, err := peer.NewPeers(ctx, c, done)
+ if err != nil {
+ fmt.Printf("unable to load peers: %+v\n", err)
+ os.Exit(1)
+ }
+
+ var g inject.Graph
+ err = g.Provide(
+ &inject.Object{Value: c},
+ &inject.Object{Value: peers},
+ &inject.Object{Value: lgr},
+ &inject.Object{Value: upstreamTransport, Name: "upstreamTransport"},
+ &inject.Object{Value: peerTransport, Name: "peerTransport"},
+ &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: upstreamClient, Name: "upstream_"}, Name: "upstreamTransmission"},
+ &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: peerClient, Name: "peer_"}, Name: "peerTransmission"},
+ &inject.Object{Value: shrdr},
+ &inject.Object{Value: collector},
+ &inject.Object{Value: metricsConfig, Name: "metrics"},
+ &inject.Object{Value: upstreamMetricsConfig, Name: "upstreamMetrics"},
+ &inject.Object{Value: peerMetricsConfig, Name: "peerMetrics"},
+ &inject.Object{Value: CollectorVersion, Name: "version"},
+ &inject.Object{Value: samplerFactory},
+ &inject.Object{Value: &a},
+ )
+ if err != nil {
+ fmt.Printf("failed to provide injection graph. error: %+v\n", err)
+ os.Exit(1)
+ }
+
+ if opts.Debug {
+ err = g.Provide(&inject.Object{Value: &debug.DebugService{Config: c}})
+ if err != nil {
+ fmt.Printf("failed to provide injection graph. error: %+v\n", err)
+ os.Exit(1)
+ }
+ }
+
+ if err := g.Populate(); err != nil {
+ fmt.Printf("failed to populate injection graph. error: %+v\n", err)
+ os.Exit(1)
+ }
+
+ defer startstop.Stop(g.Objects(), logrusLogger)
+ if err := startstop.Start(g.Objects(), logrusLogger); err != nil {
+ fmt.Printf("failed to start injected dependencies. error: %+v\n", err)
+ os.Exit(1)
+ }
+
+ // set up signal channel to exit
+ sigsToExit := make(chan os.Signal, 1)
+ signal.Notify(sigsToExit, syscall.SIGINT, syscall.SIGTERM)
+
+ // block on our signal handler to exit
+ sig := <-sigsToExit
+ // unregister ourselves before we go
+ close(done)
+ time.Sleep(100 * time.Millisecond)
+ a.Logger.Error().Logf("Caught signal \"%s\"", sig)
+}
diff --git a/collect/cache/cache.go b/collect/cache/cache.go
index 91a457e0ae..35ba69115b 100644
--- a/collect/cache/cache.go
+++ b/collect/cache/cache.go
@@ -3,9 +3,9 @@ package cache
import (
"time"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
)
// Cache is a non-threadsafe cache. It must not be used for concurrent access.
@@ -50,9 +50,9 @@ func NewInMemCache(
// buffer_overrun increments when the trace overwritten in the circular
// buffer has not yet been sent
- metrics.Register("collect_cache_buffer_overrun", "counter")
- metrics.Register("collect_cache_capacity", "gauge")
- metrics.Register("collect_cache_entries", "histogram")
+ metrics.Register("collector_cache_buffer_overrun", "counter")
+ metrics.Register("collector_cache_capacity", "gauge")
+ metrics.Register("collector_cache_entries", "histogram")
if capacity == 0 {
capacity = DefaultInMemCacheCapacity
@@ -102,7 +102,7 @@ func (d *DefaultInMemCache) Set(trace *types.Trace) *types.Trace {
if !oldTrace.Sent {
// if it hasn't already been sent,
// record that we're overrunning the buffer
- d.Metrics.IncrementCounter("collect_cache_buffer_overrun")
+ d.Metrics.Increment("collector_cache_buffer_overrun")
// and return the trace so it can be sent.
retTrace = oldTrace
}
@@ -128,9 +128,11 @@ func (d *DefaultInMemCache) GetAll() []*types.Trace {
return tmp
}
+// TakeExpiredTraces should be called to decide which traces are past their expiration time;
+// It removes and returns them.
func (d *DefaultInMemCache) TakeExpiredTraces(now time.Time) []*types.Trace {
- d.Metrics.Gauge("collect_cache_capacity", float64(len(d.insertionOrder)))
- d.Metrics.Histogram("collect_cache_entries", float64(len(d.cache)))
+ d.Metrics.Gauge("collector_cache_capacity", float64(len(d.insertionOrder)))
+ d.Metrics.Histogram("collector_cache_entries", float64(len(d.cache)))
var res []*types.Trace
for i, t := range d.insertionOrder {
@@ -142,3 +144,19 @@ func (d *DefaultInMemCache) TakeExpiredTraces(now time.Time) []*types.Trace {
}
return res
}
+
+// RemoveTraces accepts a set of trace IDs and removes any matching ones from
+// the insertion list. This is used in the case of a cache overrun.
+func (d *DefaultInMemCache) RemoveTraces(toDelete map[string]struct{}) {
+ d.Metrics.Gauge("collector_cache_capacity", float64(len(d.insertionOrder)))
+ d.Metrics.Histogram("collector_cache_entries", float64(len(d.cache)))
+
+ for i, t := range d.insertionOrder {
+ if t != nil {
+ if _, ok := toDelete[t.TraceID]; ok {
+ d.insertionOrder[i] = nil
+ delete(d.cache, t.TraceID)
+ }
+ }
+ }
+}
diff --git a/collect/cache/cache_test.go b/collect/cache/cache_test.go
index bc14b14d6f..bff24e8662 100644
--- a/collect/cache/cache_test.go
+++ b/collect/cache/cache_test.go
@@ -1,5 +1,3 @@
-// +build all race
-
package cache
import (
@@ -8,9 +6,9 @@ import (
"github.com/stretchr/testify/assert"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
)
// TestCacheSetGet sets a value then fetches it back
@@ -35,29 +33,29 @@ func TestBufferOverrun(t *testing.T) {
c := NewInMemCache(2, s, &logger.NullLogger{})
traces := []*types.Trace{
- &types.Trace{TraceID: "abc123"},
- &types.Trace{TraceID: "def456"},
- &types.Trace{TraceID: "ghi789"},
+ {TraceID: "abc123"},
+ {TraceID: "def456"},
+ {TraceID: "ghi789"},
}
c.Set(traces[0])
c.Set(traces[1])
- assert.Equal(t, 0, s.CounterIncrements["collect_cache_buffer_overrun"], "buffer should not yet have overrun")
+ assert.Equal(t, 0, s.CounterIncrements["collector_cache_buffer_overrun"], "buffer should not yet have overrun")
c.Set(traces[2])
- assert.Equal(t, 1, s.CounterIncrements["collect_cache_buffer_overrun"], "buffer should have overrun")
+ assert.Equal(t, 1, s.CounterIncrements["collector_cache_buffer_overrun"], "buffer should have overrun")
}
func TestTakeExpiredTraces(t *testing.T) {
s := &metrics.MockMetrics{}
s.Start()
- c := NewInMemCache(10, s, &logger.NullLogger{})
+ c := NewInMemCache(10, s, logger.GetLoggerImplementation())
now := time.Now()
traces := []*types.Trace{
- &types.Trace{TraceID: "1", SendBy: now.Add(-time.Minute), Sent: true},
- &types.Trace{TraceID: "2", SendBy: now.Add(-time.Minute)},
- &types.Trace{TraceID: "3", SendBy: now.Add(time.Minute)},
- &types.Trace{TraceID: "4"},
+ {TraceID: "1", SendBy: now.Add(-time.Minute), Sent: true},
+ {TraceID: "2", SendBy: now.Add(-time.Minute)},
+ {TraceID: "3", SendBy: now.Add(time.Minute)},
+ {TraceID: "4"},
}
for _, t := range traces {
c.Set(t)
@@ -77,3 +75,33 @@ func TestTakeExpiredTraces(t *testing.T) {
assert.Equal(t, traces[2], all[i])
}
}
+
+func TestRemoveSentTraces(t *testing.T) {
+ s := &metrics.MockMetrics{}
+ s.Start()
+ c := NewInMemCache(10, s, logger.GetLoggerImplementation())
+
+ now := time.Now()
+ traces := []*types.Trace{
+ {TraceID: "1", SendBy: now.Add(-time.Minute), Sent: true},
+ {TraceID: "2", SendBy: now.Add(-time.Minute)},
+ {TraceID: "3", SendBy: now.Add(time.Minute)},
+ {TraceID: "4"},
+ }
+ for _, t := range traces {
+ c.Set(t)
+ }
+
+ deletes := map[string]struct{}{
+ "1": {},
+ "3": {},
+ "4": {},
+ "5": {}, // not present
+ }
+
+ c.RemoveTraces(deletes)
+
+ all := c.GetAll()
+ assert.Equal(t, 1, len(all))
+ assert.Equal(t, traces[1], all[0])
+}
diff --git a/collect/cache/cuckoo.go b/collect/cache/cuckoo.go
new file mode 100644
index 0000000000..5d5aff0eb7
--- /dev/null
+++ b/collect/cache/cuckoo.go
@@ -0,0 +1,97 @@
+package cache
+
+import (
+ "sync"
+
+ "github.com/opsramp/tracing-proxy/metrics"
+ cuckoo "github.com/panmari/cuckoofilter"
+)
+
+// These are the names of metrics tracked for the cuckoo filter
+const (
+ CurrentLoadFactor = "cuckoo_current_load_factor"
+ FutureLoadFactor = "cuckoo_future_load_factor"
+ CurrentCapacity = "cuckoo_current_capacity"
+)
+
+// This wraps a cuckoo filter implementation in a way that lets us keep it running forever
+// without filling up.
+// A cuckoo filter can't be emptied (you can delete individual items if you know what they are,
+// but you can't get their names from the filter). Consequently, what we do is keep *two* filters,
+// current and future. The current one is the one we use to check against, and when we add, we
+// add to both. But the future one is started *after* the current one, so that when the current
+// gets too full, we can discard it, replace it with future, and then start a new, empty future.
+// This is why the future filter is nil until the current filter reaches .5.
+// You must call Maintain() periodically, most likely from a goroutine. The call is cheap,
+// and the timing isn't very critical. The effect of going above "capacity" is an increased
+// false positive rate, but the filter continues to function.
+type CuckooTraceChecker struct {
+ current *cuckoo.Filter
+ future *cuckoo.Filter
+ mut sync.RWMutex
+ capacity uint
+ met metrics.Metrics
+}
+
+func NewCuckooTraceChecker(capacity uint, m metrics.Metrics) *CuckooTraceChecker {
+ return &CuckooTraceChecker{
+ capacity: capacity,
+ current: cuckoo.NewFilter(capacity),
+ future: nil,
+ met: m,
+ }
+}
+
+// Add puts a traceID into the filter.
+func (c *CuckooTraceChecker) Add(traceID string) {
+ c.mut.Lock()
+ defer c.mut.Unlock()
+ c.current.Insert([]byte(traceID))
+ // don't add anything to future if it doesn't exist yet
+ if c.future != nil {
+ c.future.Insert([]byte(traceID))
+ }
+}
+
+// Check tests if a traceID is (very probably) in the filter.
+func (c *CuckooTraceChecker) Check(traceID string) bool {
+ b := []byte(traceID)
+ c.mut.RLock()
+ defer c.mut.RUnlock()
+ return c.current.Lookup(b)
+}
+
+// Maintain should be called periodically; if the current filter is full, it replaces
+// it with the future filter and creates a new future filter.
+func (c *CuckooTraceChecker) Maintain() {
+ c.mut.RLock()
+ currentLoadFactor := c.current.LoadFactor()
+ c.met.Gauge(CurrentLoadFactor, currentLoadFactor)
+ if c.future != nil {
+ c.met.Gauge(FutureLoadFactor, c.future.LoadFactor())
+ }
+ c.met.Gauge(CurrentCapacity, c.capacity)
+ c.mut.RUnlock()
+
+ // once the current one is half loaded, we can start using the future one too
+ if c.future == nil && currentLoadFactor > 0.5 {
+ c.mut.Lock()
+ c.future = cuckoo.NewFilter(c.capacity)
+ c.mut.Unlock()
+ }
+
+ // if the current one is full, cycle the filters
+ if currentLoadFactor > 0.99 {
+ c.mut.Lock()
+ defer c.mut.Unlock()
+ c.current = c.future
+ c.future = cuckoo.NewFilter(c.capacity)
+ }
+}
+
+// SetNextCapacity adjusts the capacity that will be set for the future filter on the next replacement.
+func (c *CuckooTraceChecker) SetNextCapacity(capacity uint) {
+ c.mut.Lock()
+ defer c.mut.Unlock()
+ c.capacity = capacity
+}
diff --git a/collect/cache/cuckooSentCache.go b/collect/cache/cuckooSentCache.go
new file mode 100644
index 0000000000..d77277f6e5
--- /dev/null
+++ b/collect/cache/cuckooSentCache.go
@@ -0,0 +1,189 @@
+package cache
+
+import (
+ "sync"
+ "time"
+
+ lru "github.com/hashicorp/golang-lru"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
+)
+
+// cuckooSentCache extends Refinery's legacy cache. It keeps the same records
+// for kept traces but adds a pair of cuckoo filters to record dropped traces.
+// This allows many more traces to be kept in the cache; now only kept records
+// are retained in the cache of sentRecords.
+// The size of the sent cache is still set based on the size of the live trace cache,
+// and the size of the dropped cache is an independent value.
+
+// cuckooKeptRecord is an internal record we leave behind when keeping a trace to remember
+// our decision for the future. We only store them if the record was kept.
+type cuckooKeptRecord struct {
+ rate uint // sample rate used when sending the trace
+ spanCount uint // number of spans in the trace (we decorate the root span with this)
+}
+
+func (t *cuckooKeptRecord) Kept() bool {
+ return true
+}
+
+func (t *cuckooKeptRecord) Rate() uint {
+ return t.rate
+}
+
+func (t *cuckooKeptRecord) DescendantCount() uint {
+ return uint(t.spanCount)
+}
+
+func (t *cuckooKeptRecord) Count(*types.Span) {
+ t.spanCount++
+}
+
+// Make sure it implements TraceSentRecord
+var _ TraceSentRecord = (*cuckooKeptRecord)(nil)
+
+// cuckooSentRecord is what we return when the trace was dropped.
+// It's always the same one.
+type cuckooDroppedRecord struct{}
+
+func (t *cuckooDroppedRecord) Kept() bool {
+ return false
+}
+
+func (t *cuckooDroppedRecord) Rate() uint {
+ return 0
+}
+
+func (t *cuckooDroppedRecord) DescendantCount() uint {
+ return 0
+}
+
+func (t *cuckooDroppedRecord) Count(*types.Span) {
+}
+
+// Make sure it implements TraceSentRecord
+var _ TraceSentRecord = (*cuckooDroppedRecord)(nil)
+
+type cuckooSentCache struct {
+ kept *lru.Cache
+ dropped *CuckooTraceChecker
+ cfg config.SampleCacheConfig
+
+ // The done channel is used to decide when to terminate the monitor
+ // goroutine. When resizing the cache, we write to the channel, but
+ // when terminating the system, call Stop() to close the channel.
+ // Either one causes the goroutine to shut down, and in resizing
+ // we then start a new monitor.
+ done chan struct{}
+
+ // This mutex is for managing kept traces
+ keptMut sync.Mutex
+}
+
+// Make sure it implements TraceSentCache
+var _ TraceSentCache = (*cuckooSentCache)(nil)
+
+func NewCuckooSentCache(cfg config.SampleCacheConfig, met metrics.Metrics) (TraceSentCache, error) {
+ stc, err := lru.New(int(cfg.KeptSize))
+ if err != nil {
+ return nil, err
+ }
+ dropped := NewCuckooTraceChecker(cfg.DroppedSize, met)
+
+ cache := &cuckooSentCache{
+ kept: stc,
+ dropped: dropped,
+ cfg: cfg,
+ done: make(chan struct{}),
+ }
+ go cache.monitor()
+ return cache, nil
+}
+
+// goroutine to monitor the cache and cycle the size check periodically
+func (c *cuckooSentCache) monitor() {
+ ticker := time.NewTicker(c.cfg.SizeCheckInterval)
+ for {
+ select {
+ case <-ticker.C:
+ c.dropped.Maintain()
+ case <-c.done:
+ return
+ }
+ }
+}
+
+// Stop halts the monitor goroutine
+func (c *cuckooSentCache) Stop() {
+ close(c.done)
+}
+
+func (c *cuckooSentCache) Record(trace *types.Trace, keep bool) {
+ if keep {
+ // record this decision in the sent record LRU for future spans
+ sentRecord := cuckooKeptRecord{
+ rate: trace.SampleRate,
+ spanCount: trace.DescendantCount(),
+ }
+ c.keptMut.Lock()
+ defer c.keptMut.Unlock()
+ c.kept.Add(trace.TraceID, &sentRecord)
+ return
+ }
+ // if we're not keeping it, save it in the dropped trace filter
+ c.dropped.Add(trace.TraceID)
+}
+
+func (c *cuckooSentCache) Check(span *types.Span) (TraceSentRecord, bool) {
+ // was it dropped?
+ if c.dropped.Check(span.TraceID) {
+ // we recognize it as dropped, so just say so; there's nothing else to do
+ return &cuckooDroppedRecord{}, false
+ }
+ // was it kept?
+ c.keptMut.Lock()
+ defer c.keptMut.Unlock()
+ if sentRecord, found := c.kept.Get(span.TraceID); found {
+ if sr, ok := sentRecord.(*cuckooKeptRecord); ok {
+ // if we kept it, then this span being checked needs counting too
+ sr.Count(span)
+ return sr, true
+ }
+ }
+ // we have no memory of this place
+ return nil, false
+}
+
+func (c *cuckooSentCache) Resize(cfg config.SampleCacheConfig) error {
+ stc, err := lru.New(int(cfg.KeptSize))
+ if err != nil {
+ return err
+ }
+
+ // grab all the items in the current cache; if it's larger than
+ // what will fit in the new one, discard the oldest ones
+ // (we don't have to do anything with the ones we discard, this is
+ // the trace decisions cache).
+ c.keptMut.Lock()
+ defer c.keptMut.Unlock()
+ keys := c.kept.Keys()
+ if len(keys) > int(cfg.KeptSize) {
+ keys = keys[len(keys)-int(cfg.KeptSize):]
+ }
+ // copy all the keys to the new cache in order
+ for _, k := range keys {
+ if v, found := c.kept.Get(k); found {
+ stc.Add(k, v)
+ }
+ }
+ c.kept = stc
+
+ // also set up the drop cache size to change eventually
+ c.dropped.SetNextCapacity(cfg.DroppedSize)
+
+ // shut down the old monitor and create a new one
+ c.done <- struct{}{}
+ go c.monitor()
+ return nil
+}
diff --git a/collect/cache/legacySentCache.go b/collect/cache/legacySentCache.go
new file mode 100644
index 0000000000..971eb69124
--- /dev/null
+++ b/collect/cache/legacySentCache.go
@@ -0,0 +1,84 @@
+package cache
+
+import (
+ lru "github.com/hashicorp/golang-lru"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/types"
+)
+
+// legacySentRecord is Refinery's original traceSent cache. It keeps the same records
+// for both kept and dropped traces and the size of the sent cache is set based on the size
+// of the live trace cache.
+
+// legacySentRecord is an internal record we leave behind when sending a trace to remember
+// our decision for the future, so any delinquent spans that show up later can
+// be dropped or passed along.
+type legacySentRecord struct {
+ keep bool // true if the trace was kept, false if it was dropped
+ rate uint // sample rate used when sending the trace
+ spanCount uint // number of spans in the trace (we decorate the root span with this)
+}
+
+func (t *legacySentRecord) Kept() bool {
+ return t.keep
+}
+
+func (t *legacySentRecord) Rate() uint {
+ return t.rate
+}
+
+func (t *legacySentRecord) DescendantCount() uint {
+ return uint(t.spanCount)
+}
+
+func (t *legacySentRecord) Count(*types.Span) {
+ t.spanCount++
+}
+
+// Make sure it implements TraceSentRecord
+var _ TraceSentRecord = (*legacySentRecord)(nil)
+
+type legacySentCache struct {
+ sentTraceCache *lru.Cache
+}
+
+// Make sure it implements TraceSentCache
+var _ TraceSentCache = (*legacySentCache)(nil)
+
+func NewLegacySentCache(capacity int) (TraceSentCache, error) {
+ stc, err := lru.New(capacity)
+ if err != nil {
+ return nil, err
+ }
+ return &legacySentCache{sentTraceCache: stc}, nil
+}
+
+func (c *legacySentCache) Record(trace *types.Trace, keep bool) {
+ // record this decision in the sent record LRU for future spans
+ sentRecord := legacySentRecord{
+ keep: keep,
+ rate: trace.SampleRate,
+ spanCount: trace.DescendantCount(),
+ }
+ c.sentTraceCache.Add(trace.TraceID, &sentRecord)
+}
+
+func (c *legacySentCache) Check(span *types.Span) (TraceSentRecord, bool) {
+ if sentRecord, found := c.sentTraceCache.Get(span.TraceID); found {
+ if sr, ok := sentRecord.(*legacySentRecord); ok {
+ sr.Count(span)
+ return sr, true
+ }
+ }
+ return nil, false
+}
+
+// legacy Stop does nothing
+// Stop halts the monitor goroutine
+func (c *legacySentCache) Stop() {
+}
+
+// legacy Resize does nothing
+func (c *legacySentCache) Resize(cfg config.SampleCacheConfig) error {
+ return nil
+}
diff --git a/collect/cache/traceSentCache.go b/collect/cache/traceSentCache.go
new file mode 100644
index 0000000000..af6add6113
--- /dev/null
+++ b/collect/cache/traceSentCache.go
@@ -0,0 +1,29 @@
+package cache
+
+import (
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/types"
+)
+
+type TraceSentRecord interface {
+ // Kept returns whether the trace was kept (sampled and sent to honeycomb) or dropped.
+ Kept() bool
+ // Rate() returns the sample rate for the trace
+ Rate() uint
+ // DescendantCount returns the count of items associated with the trace, including all types of children like span links and span events.
+ DescendantCount() uint
+ // Count records additional spans in the totals
+ Count(*types.Span)
+}
+
+type TraceSentCache interface {
+ // Record preserves the record of a trace being sent or not.
+ Record(trace *types.Trace, keep bool)
+ // Check tests if a trace corresponding to the span is in the cache; if found, it returns the appropriate TraceSentRecord and true,
+ // else nil and false.
+ Check(span *types.Span) (TraceSentRecord, bool)
+ // Stop halts the cache in preparation for shutdown
+ Stop()
+ // Resize adjusts the size of the cache according to the Config passed in
+ Resize(cfg config.SampleCacheConfig) error
+}
diff --git a/collect/collect.go b/collect/collect.go
index c505bbe711..d4d53b03f0 100644
--- a/collect/collect.go
+++ b/collect/collect.go
@@ -3,20 +3,25 @@ package collect
import (
"errors"
"fmt"
+ "github.com/opsramp/tracing-proxy/collect/cache"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/sample"
+ "github.com/opsramp/tracing-proxy/transmit"
+ "github.com/opsramp/tracing-proxy/types"
+ "github.com/sirupsen/logrus"
"os"
"runtime"
"sort"
"sync"
"time"
+)
- lru "github.com/hashicorp/golang-lru"
- "github.com/honeycombio/refinery/collect/cache"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/sample"
- "github.com/honeycombio/refinery/transmit"
- "github.com/honeycombio/refinery/types"
+const (
+ resourceAttributesKey = "resourceAttributes"
+ spanAttributesKey = "spanAttributes"
+ eventAttributesKey = "eventAttributes"
)
var ErrWouldBlock = errors.New("not adding span, channel buffer is full")
@@ -46,12 +51,20 @@ func GetCollectorImplementation(c config.Config) Collector {
return collector
}
-// InMemCollector is a single threaded collector
+// These are the names of the metrics we use to track our send decisions.
+const (
+ TraceSendGotRoot = "trace_send_got_root"
+ TraceSendExpired = "trace_send_expired"
+ TraceSendEjectedFull = "trace_send_ejected_full"
+ TraceSendEjectedMemsize = "trace_send_ejected_memsize"
+)
+
+// InMemCollector is a single threaded collector.
type InMemCollector struct {
Config config.Config `inject:""`
Logger logger.Logger `inject:""`
Transmission transmit.Transmission `inject:"upstreamTransmission"`
- Metrics metrics.Metrics `inject:""`
+ Metrics metrics.Metrics `inject:"metrics"`
SamplerFactory *sample.SamplerFactory `inject:""`
// For test use only
@@ -64,19 +77,13 @@ type InMemCollector struct {
cache cache.Cache
datasetSamplers map[string]sample.Sampler
- sentTraceCache *lru.Cache
+ sampleTraceCache cache.TraceSentCache
incoming chan *types.Span
fromPeer chan *types.Span
reload chan struct{}
-}
-// traceSentRecord is the bit we leave behind when sending a trace to remember
-// our decision for the future, so any delinquent spans that show up later can
-// be dropped or passed along.
-type traceSentRecord struct {
- keep bool // true if the trace was kept, false if it was dropped
- rate uint // sample rate used when sending the trace
+ hostname string
}
func (i *InMemCollector) Start() error {
@@ -92,27 +99,108 @@ func (i *InMemCollector) Start() error {
i.Config.RegisterReloadCallback(i.sendReloadSignal)
i.Metrics.Register("trace_duration_ms", "histogram")
- i.Metrics.Register("trace_span_count", "histogram")
+ i.Metrics.Register("trace_spans_count_total", "histogram")
i.Metrics.Register("collector_tosend_queue", "histogram")
i.Metrics.Register("collector_incoming_queue", "histogram")
i.Metrics.Register("collector_peer_queue", "histogram")
+ i.Metrics.Register("collector_cache_size", "gauge")
+ i.Metrics.Register("memory_heap_allocation", "gauge")
i.Metrics.Register("trace_sent_cache_hit", "counter")
i.Metrics.Register("trace_accepted", "counter")
i.Metrics.Register("trace_send_kept", "counter")
i.Metrics.Register("trace_send_dropped", "counter")
i.Metrics.Register("trace_send_has_root", "counter")
i.Metrics.Register("trace_send_no_root", "counter")
-
- stc, err := lru.New(imcConfig.CacheCapacity * 5) // keep 5x ring buffer size
- if err != nil {
- return err
+ i.Metrics.Register(TraceSendGotRoot, "counter")
+ i.Metrics.Register(TraceSendExpired, "counter")
+ i.Metrics.Register(TraceSendEjectedFull, "counter")
+ i.Metrics.Register(TraceSendEjectedMemsize, "counter")
+
+ i.Metrics.RegisterWithDescriptionLabels(
+ "trace_operations_latency_ms",
+ "gauge",
+ "Trace latency wrt each trace operation",
+ []string{"service_name", "operation", "app"},
+ )
+ i.Metrics.RegisterWithDescriptionLabels(
+ "trace_operations_failed",
+ "counter",
+ "Number of Error events in spans wrt each trace operation",
+ []string{"service_name", "operation", "app"},
+ )
+ i.Metrics.RegisterWithDescriptionLabels(
+ "trace_operations_succeeded",
+ "counter",
+ "Number of Succeeded events in spans wrt each trace operation",
+ []string{"service_name", "operation", "app"},
+ )
+ i.Metrics.RegisterWithDescriptionLabels(
+ "trace_operations_total",
+ "counter",
+ "Total Number of events in spans wrt each trace operation",
+ []string{"service_name", "operation", "app"},
+ )
+ i.Metrics.RegisterWithDescriptionLabels(
+ "trace_root_span",
+ "counter",
+ "Number of root spans in an operation",
+ []string{"service_name", "operation", "app"},
+ )
+ i.Metrics.RegisterWithDescriptionLabels(
+ "trace_spans_count",
+ "counter",
+ "Number of spans in an operation",
+ []string{"service_name", "operation", "app"},
+ )
+ i.Metrics.RegisterWithDescriptionLabels(
+ "trace_root_operation_latency_ms",
+ "gauge",
+ "Trace latency wrt each root trace operation",
+ []string{"service_name", "operation", "app"},
+ )
+ i.Metrics.RegisterWithDescriptionLabels(
+ "trace_root_operations_failed",
+ "counter",
+ "Number of Error events in root spans wrt each trace operation",
+ []string{"service_name", "operation", "app"},
+ )
+ i.Metrics.RegisterWithDescriptionLabels(
+ "trace_operations_error",
+ "gauge",
+ "Trace errors wrt each trace operation / trace_span_count",
+ []string{"service_name", "operation", "app"},
+ )
+
+ sampleCacheConfig := i.Config.GetSampleCacheConfig()
+ switch sampleCacheConfig.Type {
+ case "legacy", "":
+ i.sampleTraceCache, err = cache.NewLegacySentCache(imcConfig.CacheCapacity * 5) // (keep 5x ring buffer size)
+ if err != nil {
+ return err
+ }
+ case "cuckoo":
+ i.Metrics.Register(cache.CurrentCapacity, "gauge")
+ i.Metrics.Register(cache.FutureLoadFactor, "gauge")
+ i.Metrics.Register(cache.CurrentLoadFactor, "gauge")
+ i.sampleTraceCache, err = cache.NewCuckooSentCache(sampleCacheConfig, i.Metrics)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("validation failure - sampleTraceCache had invalid config type '%s'", sampleCacheConfig.Type)
}
- i.sentTraceCache = stc
i.incoming = make(chan *types.Span, imcConfig.CacheCapacity*3)
i.fromPeer = make(chan *types.Span, imcConfig.CacheCapacity*3)
i.reload = make(chan struct{}, 1)
i.datasetSamplers = make(map[string]sample.Sampler)
+
+ if i.Config.GetAddHostMetadataToTrace() {
+ if hostname, err := os.Hostname(); err == nil && hostname != "" {
+ i.hostname = hostname
+ }
+ }
+
// spin up one collector because this is a single threaded collector
go i.collect()
@@ -144,15 +232,17 @@ func (i *InMemCollector) reloadConfigs() {
// pull the old cache contents into the new cache
for j, trace := range existingCache.GetAll() {
if j >= imcConfig.CacheCapacity {
- i.send(trace)
+ i.send(trace, TraceSendEjectedFull)
continue
}
c.Set(trace)
}
i.cache = c
} else {
- i.Logger.Debug().Logf("skipping reloading the cache on config reload because it hasn't changed capacity")
+ i.Logger.Debug().Logf("skipping reloading the in-memory cache on config reload because it hasn't changed capacity")
}
+
+ i.sampleTraceCache.Resize(i.Config.GetSampleCacheConfig())
} else {
i.Logger.Error().WithField("cache", i.cache.(*cache.DefaultInMemCache)).Logf("skipping reloading the cache on config reload because it's not an in-memory cache")
}
@@ -163,17 +253,21 @@ func (i *InMemCollector) reloadConfigs() {
// TODO add resizing the LRU sent trace cache on config reload
}
-func (i *InMemCollector) checkAlloc() {
+func (i *InMemCollector) oldCheckAlloc() {
inMemConfig, err := i.Config.GetInMemCollectorCacheCapacity()
var mem runtime.MemStats
runtime.ReadMemStats(&mem)
+ i.Metrics.Gauge("memory_heap_allocation", int64(mem.Alloc))
if err != nil || inMemConfig.MaxAlloc == 0 || mem.Alloc < inMemConfig.MaxAlloc {
return
}
existingCache, ok := i.cache.(*cache.DefaultInMemCache)
- if !ok || existingCache.GetCacheSize() < 100 {
+ existingSize := existingCache.GetCacheSize()
+ i.Metrics.Gauge("collector_cache_size", existingSize)
+
+ if !ok || existingSize < 100 {
i.Logger.Error().WithField("alloc", mem.Alloc).Logf(
"total allocation exceeds limit, but unable to shrink cache",
)
@@ -183,14 +277,13 @@ func (i *InMemCollector) checkAlloc() {
// Reduce cache size by a fixed 10%, successive overages will continue to shrink.
// Base this on the total number of actual traces, which may be fewer than
// the cache capacity.
- oldCap := existingCache.GetCacheSize()
oldTraces := existingCache.GetAll()
newCap := int(float64(len(oldTraces)) * 0.9)
// Treat any MaxAlloc overage as an error. The configured cache capacity
// should be reduced to avoid this condition.
i.Logger.Error().
- WithField("cache_size.previous", oldCap).
+ WithField("cache_size.previous", existingSize).
WithField("cache_size.new", newCap).
WithField("alloc", mem.Alloc).
Logf("reducing cache size due to memory overage")
@@ -204,7 +297,7 @@ func (i *InMemCollector) checkAlloc() {
// Send the traces we can't keep, put the rest into the new cache.
for _, trace := range oldTraces[:len(oldTraces)-newCap] {
- i.send(trace)
+ i.send(trace, TraceSendEjectedMemsize)
}
for _, trace := range oldTraces[len(oldTraces)-newCap:] {
c.Set(trace)
@@ -217,6 +310,76 @@ func (i *InMemCollector) checkAlloc() {
runtime.GC()
}
+func (i *InMemCollector) newCheckAlloc() {
+ inMemConfig, err := i.Config.GetInMemCollectorCacheCapacity()
+
+ var mem runtime.MemStats
+ runtime.ReadMemStats(&mem)
+ i.Metrics.Gauge("memory_heap_allocation", int64(mem.Alloc))
+ if err != nil || inMemConfig.MaxAlloc == 0 || mem.Alloc < inMemConfig.MaxAlloc {
+ return
+ }
+
+ // Figure out what fraction of the total cache we should remove. We'd like it to be
+ // enough to get us below the max capacity, but not TOO much below.
+ // Because our impact numbers are only the data size, reducing by enough to reach
+ // max alloc will actually do more than that.
+ totalToRemove := mem.Alloc - inMemConfig.MaxAlloc
+
+ // The size of the cache exceeds the user's intended allocation, so we're going to
+ // remove the traces from the cache that have had the most impact on allocation.
+ // To do this, we sort the traces by their CacheImpact value and then remove traces
+ // until the total size is less than the amount to which we want to shrink.
+ existingCache, ok := i.cache.(*cache.DefaultInMemCache)
+ if !ok {
+ i.Logger.Error().WithField("alloc", mem.Alloc).Logf(
+ "total allocation exceeds limit, but unable to control cache",
+ )
+ return
+ }
+ allTraces := existingCache.GetAll()
+ timeout, err := i.Config.GetTraceTimeout()
+ if err != nil {
+ timeout = 60 * time.Second
+ } // Sort traces by CacheImpact, heaviest first
+ sort.Slice(allTraces, func(i, j int) bool {
+ return allTraces[i].CacheImpact(timeout) > allTraces[j].CacheImpact(timeout)
+ })
+
+ // Now start removing the biggest traces, by summing up DataSize for
+ // successive traces until we've crossed the totalToRemove threshold
+ // or just run out of traces to delete.
+
+ cap := existingCache.GetCacheSize()
+ i.Metrics.Gauge("collector_cache_size", cap)
+
+ totalDataSizeSent := 0
+ tracesSent := make(map[string]struct{})
+ // Send the traces we can't keep.
+ for _, trace := range allTraces {
+ tracesSent[trace.TraceID] = struct{}{}
+ totalDataSizeSent += trace.DataSize
+ i.send(trace, TraceSendEjectedMemsize)
+ if totalDataSizeSent > int(totalToRemove) {
+ break
+ }
+ }
+ existingCache.RemoveTraces(tracesSent)
+
+ // Treat any MaxAlloc overage as an error so we know it's happening
+ i.Logger.Error().
+ WithField("cache_size", cap).
+ WithField("alloc", mem.Alloc).
+ WithField("num_traces_sent", len(tracesSent)).
+ WithField("datasize_sent", totalDataSizeSent).
+ WithField("new_trace_count", existingCache.GetCacheSize()).
+ Logf("evicting large traces early due to memory overage")
+
+ // Manually GC here - without this we can easily end up evicting more than we
+ // need to, since total alloc won't be updated until after a GC pass.
+ runtime.GC()
+}
+
// AddSpan accepts the incoming span to a queue and returns immediately
func (i *InMemCollector) AddSpan(sp *types.Span) error {
return i.add(sp, i.incoming)
@@ -261,7 +424,7 @@ func (i *InMemCollector) collect() {
i.Metrics.Histogram("collector_incoming_queue", float64(len(i.incoming)))
i.Metrics.Histogram("collector_peer_queue", float64(len(i.fromPeer)))
- // Always drain peer channel before doing anyhting else. By processing peer
+ // Always drain peer channel before doing anything else. By processing peer
// traffic preferentially we avoid the situation where the cluster essentially
// deadlocks because peers are waiting to get their events handed off to each
// other.
@@ -276,7 +439,14 @@ func (i *InMemCollector) collect() {
select {
case <-ticker.C:
i.sendTracesInCache(time.Now())
- i.checkAlloc()
+ switch i.Config.GetCacheOverrunStrategy() {
+ case "impact":
+ i.newCheckAlloc()
+ case "resize":
+ i.oldCheckAlloc()
+ default:
+ i.oldCheckAlloc()
+ }
// Briefly unlock the cache, to allow test access.
i.mutex.Unlock()
@@ -306,7 +476,11 @@ func (i *InMemCollector) collect() {
func (i *InMemCollector) sendTracesInCache(now time.Time) {
traces := i.cache.TakeExpiredTraces(now)
for _, t := range traces {
- i.send(t)
+ if t.RootSpan != nil {
+ i.send(t, TraceSendGotRoot)
+ } else {
+ i.send(t, TraceSendExpired)
+ }
}
}
@@ -316,40 +490,43 @@ func (i *InMemCollector) processSpan(sp *types.Span) {
trace := i.cache.Get(sp.TraceID)
if trace == nil {
// if the trace has already been sent, just pass along the span
- if sentRecord, found := i.sentTraceCache.Get(sp.TraceID); found {
- if sr, ok := sentRecord.(*traceSentRecord); ok {
- i.Metrics.IncrementCounter("trace_sent_cache_hit")
- i.dealWithSentTrace(sr.keep, sr.rate, sp)
- return
- }
+ if sr, found := i.sampleTraceCache.Check(sp); found {
+ i.Metrics.Increment("trace_sent_cache_hit")
+ // bump the count of records on this trace -- if the root span isn't
+ // the last late span, then it won't be perfect, but it will be better than
+ // having none at all
+ i.dealWithSentTrace(sr.Kept(), sr.Rate(), sr.DescendantCount(), sp)
+ return
}
// trace hasn't already been sent (or this span is really old); let's
// create a new trace to hold it
- i.Metrics.IncrementCounter("trace_accepted")
+ i.Metrics.Increment("trace_accepted")
timeout, err := i.Config.GetTraceTimeout()
if err != nil {
timeout = 60 * time.Second
}
+ now := time.Now()
trace = &types.Trace{
- APIHost: sp.APIHost,
- APIKey: sp.APIKey,
- Dataset: sp.Dataset,
- TraceID: sp.TraceID,
- StartTime: time.Now(),
- SendBy: time.Now().Add(timeout),
+ APIHost: sp.APIHost,
+ APIKey: sp.APIKey,
+ Dataset: sp.Dataset,
+ TraceID: sp.TraceID,
+ ArrivalTime: now,
+ SendBy: now.Add(timeout),
+ SampleRate: sp.SampleRate, // if it had a sample rate, we want to keep it
}
// push this into the cache and if we eject an unsent trace, send it ASAP
ejectedTrace := i.cache.Set(trace)
if ejectedTrace != nil {
- i.send(ejectedTrace)
+ i.send(ejectedTrace, TraceSendEjectedFull)
}
}
// if the trace we got back from the cache has already been sent, deal with the
// span.
- if trace.Sent == true {
- i.dealWithSentTrace(trace.KeepSample, trace.SampleRate, sp)
+ if trace.Sent {
+ i.dealWithSentTrace(trace.KeepSample, trace.SampleRate, trace.DescendantCount(), sp)
}
// great! trace is live. add the span.
@@ -364,14 +541,14 @@ func (i *InMemCollector) processSpan(sp *types.Span) {
}
trace.SendBy = time.Now().Add(timeout)
- trace.HasRootSpan = true
+ trace.RootSpan = sp
}
}
// dealWithSentTrace handles a span that has arrived after the sampling decision
// on the trace has already been made, and it obeys that decision by either
// sending the span immediately or dropping it.
-func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, sp *types.Span) {
+func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, spanCount uint, sp *types.Span) {
if i.Config.GetIsDryRun() {
field := i.Config.GetDryRunFieldName()
// if dry run mode is enabled, we keep all traces and mark the spans with the sampling decision
@@ -384,15 +561,41 @@ func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, sp *types
}
if keep {
i.Logger.Debug().WithField("trace_id", sp.TraceID).Logf("Sending span because of previous decision to send trace")
- sp.SampleRate *= sampleRate
+ mergeTraceAndSpanSampleRates(sp, sampleRate)
+ // if this span is a late root span, possibly update it with our current span count
+ if i.Config.GetAddSpanCountToRoot() && isRootSpan(sp) {
+ sp.Data["meta.span_count"] = int64(spanCount)
+ }
i.Transmission.EnqueueSpan(sp)
return
}
i.Logger.Debug().WithField("trace_id", sp.TraceID).Logf("Dropping span because of previous decision to drop trace")
}
+func mergeTraceAndSpanSampleRates(sp *types.Span, traceSampleRate uint) {
+ if traceSampleRate != 1 {
+ // When the sample rate from the trace is not 1 that means we are
+ // going to mangle the span sample rate. Write down the original sample
+ // rate so that that information is more easily recovered
+ sp.Data["meta.refinery.original_sample_rate"] = sp.SampleRate
+ }
+
+ if sp.SampleRate < 1 {
+ // See https://docs.honeycomb.io/manage-data-volume/sampling/
+ // SampleRate is the denominator of the ratio of sampled spans
+ // HoneyComb treats a missing or 0 SampleRate the same as 1, but
+ // behaves better/more consistently if the SampleRate is explicitly
+ // set instead of inferred
+ sp.SampleRate = 1
+ }
+
+ // if spans are already sampled, take that in to account when computing
+ // the final rate
+ sp.SampleRate *= traceSampleRate
+}
+
func isRootSpan(sp *types.Span) bool {
- parentID := sp.Data["trace.parent_id"]
+ parentID := sp.Data["traceParentID"]
if parentID == nil {
parentID = sp.Data["parentId"]
if parentID == nil {
@@ -403,8 +606,8 @@ func isRootSpan(sp *types.Span) bool {
return false
}
-func (i *InMemCollector) send(trace *types.Trace) {
- if trace.Sent == true {
+func (i *InMemCollector) send(trace *types.Trace, reason string) {
+ if trace.Sent {
// someone else already sent this so we shouldn't also send it. This happens
// when two timers race and two signals for the same trace are sent down the
// toSend channel
@@ -416,60 +619,120 @@ func (i *InMemCollector) send(trace *types.Trace) {
}
trace.Sent = true
- traceDur := time.Now().Sub(trace.StartTime)
+ traceDur := time.Since(trace.ArrivalTime)
i.Metrics.Histogram("trace_duration_ms", float64(traceDur.Milliseconds()))
- i.Metrics.Histogram("trace_span_count", float64(len(trace.GetSpans())))
- if trace.HasRootSpan {
- i.Metrics.IncrementCounter("trace_send_has_root")
+ i.Metrics.Histogram("trace_spans_count_total", float64(trace.DescendantCount()))
+ if trace.RootSpan != nil {
+ i.Metrics.Increment("trace_send_has_root")
} else {
- i.Metrics.IncrementCounter("trace_send_no_root")
+ i.Metrics.Increment("trace_send_no_root")
+ }
+
+ // Add metrics for latency/duration per operation
+ for _, span := range trace.GetSpans() {
+ if span.Data == nil {
+ continue
+ }
+
+ labelToKeyMap := map[string][]string{
+ "service_name": {"service_name", "service.name"},
+ "operation": {"spanName"},
+ "app": {"app"},
+ }
+
+ labels := metrics.ExtractLabelsFromSpan(span, labelToKeyMap)
+
+ durationMsString, ok := span.Data["durationMs"]
+ if ok && durationMsString != nil {
+ i.Metrics.GaugeWithLabels("trace_operations_latency_ms", labels, metrics.ConvertNumeric(durationMsString))
+ }
+ if isRootSpan(span) {
+ i.Metrics.GaugeWithLabels("trace_root_operation_latency_ms", labels, metrics.ConvertNumeric(durationMsString))
+ i.Metrics.IncrementWithLabels("trace_root_span", labels)
+ }
+ i.Metrics.IncrementWithLabels("trace_spans_count", labels)
+
+ errorStatus, ok := span.Data["error"]
+ if ok && errorStatus != nil && errorStatus.(bool) {
+ i.Metrics.IncrementWithLabels("trace_operations_failed", labels)
+ i.Metrics.IncrementWithLabels("trace_operations_total", labels)
+ if isRootSpan(span) {
+ i.Metrics.IncrementWithLabels("trace_root_operations_failed", labels)
+ }
+ } else {
+ i.Metrics.IncrementWithLabels("trace_operations_succeeded", labels)
+ i.Metrics.IncrementWithLabels("trace_operations_total", labels)
+ }
}
+ i.Metrics.Increment(reason)
+
var sampler sample.Sampler
var found bool
- if sampler, found = i.datasetSamplers[trace.Dataset]; !found {
- sampler = i.SamplerFactory.GetSamplerImplementationForDataset(trace.Dataset)
- // save sampler for later
- i.datasetSamplers[trace.Dataset] = sampler
+ // get sampler key (dataset for legacy keys, environment for new keys)
+ samplerKey, isLegacyKey := trace.GetSamplerKey()
+ logFields := logrus.Fields{
+ "trace_id": trace.TraceID,
+ }
+ if isLegacyKey {
+ logFields["dataset"] = samplerKey
+ } else {
+ logFields["environment"] = samplerKey
+ }
+
+ // If we have a root span, update it with the count before determining the SampleRate.
+ if i.Config.GetAddSpanCountToRoot() && trace.RootSpan != nil {
+ trace.RootSpan.Data["meta.span_count"] = int64(trace.DescendantCount())
+ }
+
+ // use sampler key to find sampler; create and cache if not found
+ if sampler, found = i.datasetSamplers[samplerKey]; !found {
+ sampler = i.SamplerFactory.GetSamplerImplementationForKey(samplerKey, isLegacyKey)
+ i.datasetSamplers[samplerKey] = sampler
}
// make sampling decision and update the trace
- rate, shouldSend := sampler.GetSampleRate(trace)
+ rate, shouldSend, reason := sampler.GetSampleRate(trace)
trace.SampleRate = rate
trace.KeepSample = shouldSend
+ logFields["reason"] = reason
- // record this decision in the sent record LRU for future spans
- sentRecord := traceSentRecord{
- keep: shouldSend,
- rate: rate,
- }
- i.sentTraceCache.Add(trace.TraceID, &sentRecord)
+ i.sampleTraceCache.Record(trace, shouldSend)
// if we're supposed to drop this trace, and dry run mode is not enabled, then we're done.
if !shouldSend && !i.Config.GetIsDryRun() {
- i.Metrics.IncrementCounter("trace_send_dropped")
- i.Logger.Info().WithString("trace_id", trace.TraceID).WithString("dataset", trace.Dataset).Logf("Dropping trace because of sampling, trace to dataset")
+ i.Metrics.Increment("trace_send_dropped")
+ i.Logger.Info().WithFields(logFields).Logf("Dropping trace because of sampling")
return
}
- i.Metrics.IncrementCounter("trace_send_kept")
+ i.Metrics.Increment("trace_send_kept")
// ok, we're not dropping this trace; send all the spans
if i.Config.GetIsDryRun() && !shouldSend {
- i.Logger.Info().WithString("trace_id", trace.TraceID).WithString("dataset", trace.Dataset).Logf("Trace would have been dropped, but dry run mode is enabled")
+ i.Logger.Info().WithFields(logFields).Logf("Trace would have been dropped, but dry run mode is enabled")
}
- i.Logger.Info().WithString("trace_id", trace.TraceID).WithString("dataset", trace.Dataset).Logf("Sending trace to dataset")
+ i.Logger.Debug().WithFields(logFields).Logf("Sending trace")
for _, sp := range trace.GetSpans() {
- if sp.SampleRate < 1 {
- sp.SampleRate = 1
+ if i.Config.GetAddRuleReasonToTrace() {
+ sp.Data["meta.reason"] = reason
+ }
+
+ // update the root span (if we have one, which we might not if the trace timed out)
+ // with the final total as of our send time
+ if i.Config.GetAddSpanCountToRoot() && isRootSpan(sp) {
+ sp.Data["meta.span_count"] = int64(trace.DescendantCount())
}
+
if i.Config.GetIsDryRun() {
field := i.Config.GetDryRunFieldName()
sp.Data[field] = shouldSend
}
- // if spans are already sampled, take that in to account when computing
- // the final rate
- sp.SampleRate *= trace.SampleRate
+
+ if i.hostname != "" {
+ sp.Data["meta.local_hostname"] = i.hostname
+ }
+ mergeTraceAndSpanSampleRates(sp, trace.SampleRate)
i.Transmission.EnqueueSpan(sp)
}
}
@@ -486,13 +749,16 @@ func (i *InMemCollector) Stop() error {
traces := i.cache.GetAll()
for _, trace := range traces {
if trace != nil {
- i.send(trace)
+ i.send(trace, TraceSendEjectedFull)
}
}
}
if i.Transmission != nil {
i.Transmission.Flush()
}
+
+ i.sampleTraceCache.Stop()
+
return nil
}
diff --git a/collect/collect_benchmark_test.go b/collect/collect_benchmark_test.go
index 5fbc753fec..4fc449f859 100644
--- a/collect/collect_benchmark_test.go
+++ b/collect/collect_benchmark_test.go
@@ -1,5 +1,3 @@
-// +build all race
-
package collect
import (
@@ -8,16 +6,15 @@ import (
"testing"
"time"
- lru "github.com/hashicorp/golang-lru"
"github.com/stretchr/testify/assert"
- "github.com/honeycombio/refinery/collect/cache"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/sample"
- "github.com/honeycombio/refinery/transmit"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/collect/cache"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/sample"
+ "github.com/opsramp/tracing-proxy/transmit"
+ "github.com/opsramp/tracing-proxy/types"
)
func BenchmarkCollect(b *testing.B) {
@@ -37,7 +34,7 @@ func BenchmarkCollect(b *testing.B) {
metric := &metrics.MockMetrics{}
metric.Start()
- stc, err := lru.New(15)
+ stc, err := cache.NewLegacySentCache(15)
assert.NoError(b, err, "lru cache should start")
coll := &InMemCollector{
@@ -49,12 +46,12 @@ func BenchmarkCollect(b *testing.B) {
Config: conf,
Logger: log,
},
- BlockOnAddSpan: true,
- cache: cache.NewInMemCache(3, metric, log),
- incoming: make(chan *types.Span, 500),
- fromPeer: make(chan *types.Span, 500),
- datasetSamplers: make(map[string]sample.Sampler),
- sentTraceCache: stc,
+ BlockOnAddSpan: true,
+ cache: cache.NewInMemCache(3, metric, log),
+ incoming: make(chan *types.Span, 500),
+ fromPeer: make(chan *types.Span, 500),
+ datasetSamplers: make(map[string]sample.Sampler),
+ sampleTraceCache: stc,
}
go coll.collect()
diff --git a/collect/collect_test.go b/collect/collect_test.go
index bda6a84313..73c7d085e9 100644
--- a/collect/collect_test.go
+++ b/collect/collect_test.go
@@ -1,25 +1,28 @@
-// +build all race
-
package collect
import (
+ "fmt"
+ "math/rand"
"runtime"
"strconv"
+ "strings"
"testing"
"time"
- lru "github.com/hashicorp/golang-lru"
+ "github.com/facebookgo/inject"
"github.com/stretchr/testify/assert"
- "github.com/honeycombio/refinery/collect/cache"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/sample"
- "github.com/honeycombio/refinery/transmit"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/collect/cache"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/sample"
+ "github.com/opsramp/tracing-proxy/transmit"
+ "github.com/opsramp/tracing-proxy/types"
)
+const legacyAPIKey = "***REMOVED***"
+
// TestAddRootSpan tests that adding a root span winds up with a trace object in
// the cache and that that trace gets sent
func TestAddRootSpan(t *testing.T) {
@@ -44,9 +47,9 @@ func TestAddRootSpan(t *testing.T) {
c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{})
coll.cache = c
- stc, err := lru.New(15)
+ stc, err := cache.NewLegacySentCache(15)
assert.NoError(t, err, "lru cache should start")
- coll.sentTraceCache = stc
+ coll.sampleTraceCache = stc
coll.incoming = make(chan *types.Span, 5)
coll.fromPeer = make(chan *types.Span, 5)
@@ -61,6 +64,7 @@ func TestAddRootSpan(t *testing.T) {
TraceID: traceID1,
Event: types.Event{
Dataset: "aoeu",
+ APIKey: legacyAPIKey,
},
}
coll.AddSpan(span)
@@ -80,6 +84,7 @@ func TestAddRootSpan(t *testing.T) {
TraceID: traceID2,
Event: types.Event{
Dataset: "aoeu",
+ APIKey: legacyAPIKey,
},
}
coll.AddSpanFromPeer(span)
@@ -95,6 +100,127 @@ func TestAddRootSpan(t *testing.T) {
transmission.Mux.RUnlock()
}
+// #490, SampleRate getting stomped could cause confusion if sampling was
+// happening upstream of refinery. Writing down what got sent to refinery
+// will help people figure out what is going on.
+func TestOriginalSampleRateIsNotedInMetaField(t *testing.T) {
+ transmission := &transmit.MockTransmission{}
+ transmission.Start()
+ conf := &config.MockConfig{
+ GetSendDelayVal: 0,
+ GetTraceTimeoutVal: 60 * time.Second,
+ GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 2},
+ SendTickerVal: 2 * time.Millisecond,
+ }
+ coll := &InMemCollector{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ Transmission: transmission,
+ Metrics: &metrics.NullMetrics{},
+ SamplerFactory: &sample.SamplerFactory{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ },
+ }
+
+ c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{})
+ coll.cache = c
+ stc, err := cache.NewLegacySentCache(15)
+ assert.NoError(t, err, "lru cache should start")
+ coll.sampleTraceCache = stc
+
+ coll.incoming = make(chan *types.Span, 5)
+ coll.fromPeer = make(chan *types.Span, 5)
+ coll.datasetSamplers = make(map[string]sample.Sampler)
+ go coll.collect()
+ defer coll.Stop()
+
+ // Spin until a sample gets triggered
+ sendAttemptCount := 0
+ for getEventsLength(transmission) < 1 || sendAttemptCount > 10 {
+ sendAttemptCount++
+ span := &types.Span{
+ TraceID: fmt.Sprintf("trace-%v", sendAttemptCount),
+ Event: types.Event{
+ Dataset: "aoeu",
+ APIKey: legacyAPIKey,
+ SampleRate: 50,
+ Data: make(map[string]interface{}),
+ },
+ }
+ coll.AddSpan(span)
+ time.Sleep(conf.SendTickerVal * 2)
+ }
+
+ transmission.Mux.RLock()
+ assert.Greater(t, len(transmission.Events), 0, "should be some events transmitted")
+ assert.Equal(t, uint(50), transmission.Events[0].Data["meta.refinery.original_sample_rate"], "metadata should be populated with original sample rate")
+ transmission.Mux.RUnlock()
+}
+
+// HoneyComb treats a missing or 0 SampleRate the same as 1, but
+// behaves better/more consistently if the SampleRate is explicitly
+// set instead of inferred
+func TestTransmittedSpansShouldHaveASampleRateOfAtLeastOne(t *testing.T) {
+ transmission := &transmit.MockTransmission{}
+ transmission.Start()
+ conf := &config.MockConfig{
+ GetSendDelayVal: 0,
+ GetTraceTimeoutVal: 60 * time.Second,
+ GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1},
+ SendTickerVal: 2 * time.Millisecond,
+ }
+ coll := &InMemCollector{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ Transmission: transmission,
+ Metrics: &metrics.NullMetrics{},
+ SamplerFactory: &sample.SamplerFactory{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ },
+ }
+
+ c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{})
+ coll.cache = c
+ stc, err := cache.NewLegacySentCache(15)
+ assert.NoError(t, err, "lru cache should start")
+ coll.sampleTraceCache = stc
+
+ coll.incoming = make(chan *types.Span, 5)
+ coll.fromPeer = make(chan *types.Span, 5)
+ coll.datasetSamplers = make(map[string]sample.Sampler)
+ go coll.collect()
+ defer coll.Stop()
+
+ span := &types.Span{
+ TraceID: fmt.Sprintf("trace-%v", 1),
+ Event: types.Event{
+ Dataset: "aoeu",
+ APIKey: legacyAPIKey,
+ SampleRate: 0, // This should get lifted to 1
+ Data: make(map[string]interface{}),
+ },
+ }
+
+ coll.AddSpan(span)
+
+ time.Sleep(conf.SendTickerVal * 2)
+
+ transmission.Mux.RLock()
+ assert.Equal(t, 1, len(transmission.Events), "should be some events transmitted")
+ assert.Equal(t, uint(1), transmission.Events[0].SampleRate,
+ "SampleRate should be reset to one after starting at zero")
+ transmission.Mux.RUnlock()
+}
+
+func getEventsLength(transmission *transmit.MockTransmission) int {
+ transmission.Mux.RLock()
+ defer transmission.Mux.RUnlock()
+
+ return len(transmission.Events)
+}
+
// TestAddSpan tests that adding a span winds up with a trace object in the
// cache
func TestAddSpan(t *testing.T) {
@@ -118,9 +244,9 @@ func TestAddSpan(t *testing.T) {
}
c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{})
coll.cache = c
- stc, err := lru.New(15)
+ stc, err := cache.NewLegacySentCache(15)
assert.NoError(t, err, "lru cache should start")
- coll.sentTraceCache = stc
+ coll.sampleTraceCache = stc
coll.incoming = make(chan *types.Span, 5)
coll.fromPeer = make(chan *types.Span, 5)
@@ -137,6 +263,7 @@ func TestAddSpan(t *testing.T) {
Data: map[string]interface{}{
"trace.parent_id": "unused",
},
+ APIKey: legacyAPIKey,
},
}
coll.AddSpanFromPeer(span)
@@ -149,6 +276,7 @@ func TestAddSpan(t *testing.T) {
Event: types.Event{
Dataset: "aoeu",
Data: map[string]interface{}{},
+ APIKey: legacyAPIKey,
},
}
coll.AddSpan(rootSpan)
@@ -179,7 +307,7 @@ func TestDryRunMode(t *testing.T) {
Config: conf,
Logger: &logger.NullLogger{},
}
- sampler := samplerFactory.GetSamplerImplementationForDataset("test")
+ sampler := samplerFactory.GetSamplerImplementationForKey("test", true)
coll := &InMemCollector{
Config: conf,
Logger: &logger.NullLogger{},
@@ -189,9 +317,9 @@ func TestDryRunMode(t *testing.T) {
}
c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{})
coll.cache = c
- stc, err := lru.New(15)
+ stc, err := cache.NewLegacySentCache(15)
assert.NoError(t, err, "lru cache should start")
- coll.sentTraceCache = stc
+ coll.sampleTraceCache = stc
coll.incoming = make(chan *types.Span, 5)
coll.fromPeer = make(chan *types.Span, 5)
@@ -203,19 +331,20 @@ func TestDryRunMode(t *testing.T) {
var traceID2 = "def456"
var traceID3 = "ghi789"
// sampling decisions based on trace ID
- _, keepTraceID1 := sampler.GetSampleRate(&types.Trace{TraceID: traceID1})
+ _, keepTraceID1, _ := sampler.GetSampleRate(&types.Trace{TraceID: traceID1})
// would be dropped if dry run mode was not enabled
assert.False(t, keepTraceID1)
- _, keepTraceID2 := sampler.GetSampleRate(&types.Trace{TraceID: traceID2})
+ _, keepTraceID2, _ := sampler.GetSampleRate(&types.Trace{TraceID: traceID2})
assert.True(t, keepTraceID2)
- _, keepTraceID3 := sampler.GetSampleRate(&types.Trace{TraceID: traceID3})
+ _, keepTraceID3, _ := sampler.GetSampleRate(&types.Trace{TraceID: traceID3})
// would be dropped if dry run mode was not enabled
assert.False(t, keepTraceID3)
span := &types.Span{
TraceID: traceID1,
Event: types.Event{
- Data: map[string]interface{}{},
+ Data: map[string]interface{}{},
+ APIKey: legacyAPIKey,
},
}
coll.AddSpan(span)
@@ -238,6 +367,7 @@ func TestDryRunMode(t *testing.T) {
Data: map[string]interface{}{
"trace.parent_id": "unused",
},
+ APIKey: legacyAPIKey,
},
}
coll.AddSpanFromPeer(span)
@@ -247,7 +377,8 @@ func TestDryRunMode(t *testing.T) {
span = &types.Span{
TraceID: traceID2,
Event: types.Event{
- Data: map[string]interface{}{},
+ Data: map[string]interface{}{},
+ APIKey: legacyAPIKey,
},
}
coll.AddSpanFromPeer(span)
@@ -263,7 +394,8 @@ func TestDryRunMode(t *testing.T) {
span = &types.Span{
TraceID: traceID3,
Event: types.Event{
- Data: map[string]interface{}{},
+ Data: map[string]interface{}{},
+ APIKey: legacyAPIKey,
},
}
coll.AddSpan(span)
@@ -313,20 +445,21 @@ func TestCacheSizeReload(t *testing.T) {
Data: map[string]interface{}{
"trace.parent_id": "1",
},
+ APIKey: legacyAPIKey,
}
coll.AddSpan(&types.Span{TraceID: "1", Event: event})
coll.AddSpan(&types.Span{TraceID: "2", Event: event})
expectedEvents := 1
- wait := 2 * time.Millisecond
+ wait := 1 * time.Second
check := func() bool {
transmission.Mux.RLock()
defer transmission.Mux.RUnlock()
return len(transmission.Events) == expectedEvents
}
- assert.Eventually(t, check, 10*wait, wait, "expected one trace evicted and sent")
+ assert.Eventually(t, check, 60*wait, wait, "expected one trace evicted and sent")
conf.Mux.Lock()
conf.GetInMemoryCollectorCacheCapacityVal.CacheCapacity = 2
@@ -338,7 +471,7 @@ func TestCacheSizeReload(t *testing.T) {
defer coll.mutex.RUnlock()
return coll.cache.(*cache.DefaultInMemCache).GetCacheSize() == 2
- }, 10*wait, wait, "cache size to change")
+ }, 60*wait, wait, "cache size to change")
coll.AddSpan(&types.Span{TraceID: "3", Event: event})
time.Sleep(5 * conf.SendTickerVal)
@@ -350,7 +483,7 @@ func TestCacheSizeReload(t *testing.T) {
conf.ReloadConfig()
expectedEvents = 2
- assert.Eventually(t, check, 10*wait, wait, "expected another trace evicted and sent")
+ assert.Eventually(t, check, 60*wait, wait, "expected another trace evicted and sent")
}
func TestSampleConfigReload(t *testing.T) {
@@ -360,7 +493,7 @@ func TestSampleConfigReload(t *testing.T) {
conf := &config.MockConfig{
GetSendDelayVal: 0,
- GetTraceTimeoutVal: 10 * time.Millisecond,
+ GetTraceTimeoutVal: 60 * time.Second,
GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1},
SendTickerVal: 2 * time.Millisecond,
GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{CacheCapacity: 10},
@@ -387,6 +520,7 @@ func TestSampleConfigReload(t *testing.T) {
TraceID: "1",
Event: types.Event{
Dataset: dataset,
+ APIKey: legacyAPIKey,
},
}
@@ -414,6 +548,7 @@ func TestSampleConfigReload(t *testing.T) {
TraceID: "2",
Event: types.Event{
Dataset: dataset,
+ APIKey: legacyAPIKey,
},
}
@@ -428,7 +563,7 @@ func TestSampleConfigReload(t *testing.T) {
}, conf.GetTraceTimeoutVal*2, conf.SendTickerVal)
}
-func TestMaxAlloc(t *testing.T) {
+func TestOldMaxAlloc(t *testing.T) {
transmission := &transmit.MockTransmission{}
transmission.Start()
conf := &config.MockConfig{
@@ -449,9 +584,9 @@ func TestMaxAlloc(t *testing.T) {
}
c := cache.NewInMemCache(1000, &metrics.NullMetrics{}, &logger.NullLogger{})
coll.cache = c
- stc, err := lru.New(15)
+ stc, err := cache.NewLegacySentCache(15)
assert.NoError(t, err, "lru cache should start")
- coll.sentTraceCache = stc
+ coll.sampleTraceCache = stc
coll.incoming = make(chan *types.Span, 1000)
coll.fromPeer = make(chan *types.Span, 5)
@@ -468,6 +603,7 @@ func TestMaxAlloc(t *testing.T) {
"trace.parent_id": "unused",
"id": i,
},
+ APIKey: legacyAPIKey,
},
}
coll.AddSpan(span)
@@ -503,7 +639,7 @@ func TestMaxAlloc(t *testing.T) {
time.Sleep(conf.SendTickerVal)
}
- assert.Equal(t, 450, len(traces), "should have shrunk cache to 90% of previous size")
+ assert.Equal(t, 450, len(traces), "should have shrunk cache to 90%% of previous size")
for i, trace := range traces {
assert.False(t, trace.Sent)
assert.Equal(t, strconv.Itoa(i+50), trace.TraceID)
@@ -512,7 +648,7 @@ func TestMaxAlloc(t *testing.T) {
// We discarded the first 50 spans, and sent them.
transmission.Mux.Lock()
- assert.Equal(t, 50, len(transmission.Events), "should have sent 10% of traces")
+ assert.Equal(t, 50, len(transmission.Events), "should have sent 10%% of traces")
for i, ev := range transmission.Events {
assert.Equal(t, i, ev.Data["id"])
}
@@ -520,6 +656,105 @@ func TestMaxAlloc(t *testing.T) {
transmission.Mux.Unlock()
}
+func TestStableMaxAlloc(t *testing.T) {
+ transmission := &transmit.MockTransmission{}
+ transmission.Start()
+ conf := &config.MockConfig{
+ GetSendDelayVal: 0,
+ GetTraceTimeoutVal: 10 * time.Minute,
+ GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1},
+ SendTickerVal: 2 * time.Millisecond,
+ CacheOverrunStrategy: "impact",
+ }
+ coll := &InMemCollector{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ Transmission: transmission,
+ Metrics: &metrics.NullMetrics{},
+ SamplerFactory: &sample.SamplerFactory{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ },
+ }
+ spandata := make([]map[string]interface{}, 500)
+ for i := 0; i < 500; i++ {
+ spandata[i] = map[string]interface{}{
+ "trace.parent_id": "unused",
+ "id": i,
+ "str1": strings.Repeat("abc", rand.Intn(100)+1),
+ "str2": strings.Repeat("def", rand.Intn(100)+1),
+ }
+ }
+
+ c := cache.NewInMemCache(1000, &metrics.NullMetrics{}, &logger.NullLogger{})
+ coll.cache = c
+ stc, err := cache.NewLegacySentCache(15)
+ assert.NoError(t, err, "lru cache should start")
+ coll.sampleTraceCache = stc
+
+ coll.incoming = make(chan *types.Span, 1000)
+ coll.fromPeer = make(chan *types.Span, 5)
+ coll.datasetSamplers = make(map[string]sample.Sampler)
+ go coll.collect()
+ defer coll.Stop()
+
+ for i := 0; i < 500; i++ {
+ span := &types.Span{
+ TraceID: strconv.Itoa(i),
+ Event: types.Event{
+ Dataset: "aoeu",
+ Data: spandata[i],
+ APIKey: legacyAPIKey,
+ },
+ }
+ coll.AddSpan(span)
+ }
+
+ for len(coll.incoming) > 0 {
+ time.Sleep(conf.SendTickerVal)
+ }
+
+ // Now there should be 500 traces in the cache.
+ coll.mutex.Lock()
+ assert.Equal(t, 500, len(coll.cache.GetAll()))
+
+ // We want to induce an eviction event, so set MaxAlloc a bit below
+ // our current post-GC alloc.
+ runtime.GC()
+ var mem runtime.MemStats
+ runtime.ReadMemStats(&mem)
+ // Set MaxAlloc, which should cause cache evictions.
+ conf.GetInMemoryCollectorCacheCapacityVal.MaxAlloc = mem.Alloc * 99 / 100
+
+ coll.mutex.Unlock()
+
+ // wait for the cache to take some action
+ var traces []*types.Trace
+ for {
+ coll.mutex.Lock()
+ traces = coll.cache.GetAll()
+ if len(traces) < 500 {
+ break
+ }
+ coll.mutex.Unlock()
+
+ time.Sleep(conf.SendTickerVal)
+ }
+
+ assert.Equal(t, 1000, coll.cache.(*cache.DefaultInMemCache).GetCacheSize(), "cache size shouldn't change")
+
+ tracesLeft := len(traces)
+ assert.Less(t, tracesLeft, 480, "should have sent some traces")
+ assert.Greater(t, tracesLeft, 100, "should have NOT sent some traces")
+ coll.mutex.Unlock()
+
+ // We discarded the most costly spans, and sent them.
+ transmission.Mux.Lock()
+ assert.Equal(t, 500-len(traces), len(transmission.Events), "should have sent traces that weren't kept")
+
+ transmission.Mux.Unlock()
+}
+
func TestAddSpanNoBlock(t *testing.T) {
transmission := &transmit.MockTransmission{}
transmission.Start()
@@ -541,9 +776,9 @@ func TestAddSpanNoBlock(t *testing.T) {
}
c := cache.NewInMemCache(10, &metrics.NullMetrics{}, &logger.NullLogger{})
coll.cache = c
- stc, err := lru.New(15)
+ stc, err := cache.NewLegacySentCache(15)
assert.NoError(t, err, "lru cache should start")
- coll.sentTraceCache = stc
+ coll.sampleTraceCache = stc
coll.incoming = make(chan *types.Span, 3)
coll.fromPeer = make(chan *types.Span, 3)
@@ -554,6 +789,7 @@ func TestAddSpanNoBlock(t *testing.T) {
TraceID: "1",
Event: types.Event{
Dataset: "aoeu",
+ APIKey: legacyAPIKey,
},
}
@@ -569,3 +805,162 @@ func TestAddSpanNoBlock(t *testing.T) {
err = coll.AddSpanFromPeer(span)
assert.Error(t, err)
}
+
+func TestDependencyInjection(t *testing.T) {
+ var g inject.Graph
+ err := g.Provide(
+ &inject.Object{Value: &InMemCollector{}},
+ &inject.Object{Value: &config.MockConfig{}},
+ &inject.Object{Value: &logger.NullLogger{}},
+ &inject.Object{Value: &transmit.MockTransmission{}, Name: "upstreamTransmission"},
+ &inject.Object{Value: &metrics.NullMetrics{}, Name: "metrics"},
+ &inject.Object{Value: &sample.SamplerFactory{}},
+ )
+ if err != nil {
+ t.Error(err)
+ }
+ if err := g.Populate(); err != nil {
+ t.Error(err)
+ }
+}
+
+// TestAddSpanCount tests that adding a root span winds up with a trace object in
+// the cache and that that trace gets span count added to it
+func TestAddSpanCount(t *testing.T) {
+ transmission := &transmit.MockTransmission{}
+ transmission.Start()
+ conf := &config.MockConfig{
+ GetSendDelayVal: 0,
+ GetTraceTimeoutVal: 60 * time.Second,
+ GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1},
+ SendTickerVal: 2 * time.Millisecond,
+ AddSpanCountToRoot: true,
+ }
+ coll := &InMemCollector{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ Transmission: transmission,
+ Metrics: &metrics.NullMetrics{},
+ SamplerFactory: &sample.SamplerFactory{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ },
+ }
+ c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{})
+ coll.cache = c
+ stc, err := cache.NewLegacySentCache(15)
+ assert.NoError(t, err, "lru cache should start")
+ coll.sampleTraceCache = stc
+
+ coll.incoming = make(chan *types.Span, 5)
+ coll.fromPeer = make(chan *types.Span, 5)
+ coll.datasetSamplers = make(map[string]sample.Sampler)
+ go coll.collect()
+ defer coll.Stop()
+
+ var traceID = "mytrace"
+
+ span := &types.Span{
+ TraceID: traceID,
+ Event: types.Event{
+ Dataset: "aoeu",
+ Data: map[string]interface{}{
+ "trace.parent_id": "unused",
+ },
+ APIKey: legacyAPIKey,
+ },
+ }
+ coll.AddSpanFromPeer(span)
+ time.Sleep(conf.SendTickerVal * 2)
+ assert.Equal(t, traceID, coll.getFromCache(traceID).TraceID, "after adding the span, we should have a trace in the cache with the right trace ID")
+ assert.Equal(t, 0, len(transmission.Events), "adding a non-root span should not yet send the span")
+ // ok now let's add the root span and verify that both got sent
+ rootSpan := &types.Span{
+ TraceID: traceID,
+ Event: types.Event{
+ Dataset: "aoeu",
+ Data: map[string]interface{}{},
+ APIKey: legacyAPIKey,
+ },
+ }
+ coll.AddSpan(rootSpan)
+ time.Sleep(conf.SendTickerVal * 2)
+ assert.Nil(t, coll.getFromCache(traceID), "after adding a leaf and root span, it should be removed from the cache")
+ transmission.Mux.RLock()
+ assert.Equal(t, 2, len(transmission.Events), "adding a root span should send all spans in the trace")
+ assert.Equal(t, nil, transmission.Events[0].Data["meta.span_count"], "child span metadata should NOT be populated with span count")
+ assert.Equal(t, int64(2), transmission.Events[1].Data["meta.span_count"], "root span metadata should be populated with span count")
+ transmission.Mux.RUnlock()
+}
+
+// TestLateRootGetsSpanCount tests that the root span gets decorated with the right span count
+// even if the trace had already been sent
+func TestLateRootGetsSpanCount(t *testing.T) {
+ transmission := &transmit.MockTransmission{}
+ transmission.Start()
+ conf := &config.MockConfig{
+ GetSendDelayVal: 0,
+ GetTraceTimeoutVal: 5 * time.Millisecond,
+ GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1},
+ SendTickerVal: 2 * time.Millisecond,
+ AddSpanCountToRoot: true,
+ }
+ coll := &InMemCollector{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ Transmission: transmission,
+ Metrics: &metrics.NullMetrics{},
+ SamplerFactory: &sample.SamplerFactory{
+ Config: conf,
+ Logger: &logger.NullLogger{},
+ },
+ }
+ c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{})
+ coll.cache = c
+ stc, err := cache.NewLegacySentCache(15)
+ assert.NoError(t, err, "lru cache should start")
+ coll.sampleTraceCache = stc
+
+ coll.incoming = make(chan *types.Span, 5)
+ coll.fromPeer = make(chan *types.Span, 5)
+ coll.datasetSamplers = make(map[string]sample.Sampler)
+ go coll.collect()
+ defer coll.Stop()
+
+ var traceID = "mytrace"
+
+ span := &types.Span{
+ TraceID: traceID,
+ Event: types.Event{
+ Dataset: "aoeu",
+ Data: map[string]interface{}{
+ "trace.parent_id": "unused",
+ },
+ APIKey: legacyAPIKey,
+ },
+ }
+ coll.AddSpanFromPeer(span)
+ time.Sleep(conf.SendTickerVal * 10)
+
+ trace := coll.getFromCache(traceID)
+ assert.Nil(t, trace, "trace should have been sent although the root span hasn't arrived")
+ assert.Equal(t, 1, len(transmission.Events), "adding a non-root span and waiting should send the span")
+ // now we add the root span and verify that both got sent and that the root span had the span count
+ rootSpan := &types.Span{
+ TraceID: traceID,
+ Event: types.Event{
+ Dataset: "aoeu",
+ Data: map[string]interface{}{},
+ APIKey: legacyAPIKey,
+ },
+ }
+ coll.AddSpan(rootSpan)
+ time.Sleep(conf.SendTickerVal * 2)
+ assert.Nil(t, coll.getFromCache(traceID), "after adding a leaf and root span, it should be removed from the cache")
+ transmission.Mux.RLock()
+ assert.Equal(t, 2, len(transmission.Events), "adding a root span should send all spans in the trace")
+ assert.Equal(t, nil, transmission.Events[0].Data["meta.span_count"], "child span metadata should NOT be populated with span count")
+ assert.Equal(t, int64(2), transmission.Events[1].Data["meta.span_count"], "root span metadata should be populated with span count")
+ transmission.Mux.RUnlock()
+
+}
diff --git a/config.toml b/config.toml
deleted file mode 100644
index 9dc2826ff0..0000000000
--- a/config.toml
+++ /dev/null
@@ -1,39 +0,0 @@
-#######################
-## Quickstart Config ##
-#######################
-
-# InMemCollector brings together all the settings that are relevant to
-# collecting spans together to make traces.
-[InMemCollector]
-
-# The collection cache is used to collect all spans into a trace as well as
-# remember the sampling decision for any spans that might come in after the
-# trace has been marked "complete" (either by timing out or seeing the root
-# span). The number of traces in the cache should be many multiples (100x to
-# 1000x) of the total number of concurrently active traces (trace throughput *
-# trace duration).
-# Eligible for live reload. Growing the cache capacity with a live config reload
-# is fine. Avoid shrinking it with a live reload (you can, but it may cause
-# temporary odd sampling decisions).
-CacheCapacity = 1000
-
-[HoneycombMetrics]
-
-# MetricsHoneycombAPI is the URL for the upstream Honeycomb API.
-# Eligible for live reload.
-MetricsHoneycombAPI = "https://api.honeycomb.io"
-
-# MetricsAPIKey is the API key to use to send log events to the Honeycomb logging
-# dataset. This is separate from the APIKeys used to authenticate regular
-# traffic.
-# Eligible for live reload.
-MetricsAPIKey = "abcd1234"
-
-# MetricsDataset is the name of the dataset to which to send Refinery metrics
-# Eligible for live reload.
-MetricsDataset = "Refinery Metrics"
-
-# MetricsReportingInterval is the frequency (in seconds) to send metric events
-# to Honeycomb. Between 1 and 60 is recommended.
-# Not eligible for live reload.
-MetricsReportingInterval = 3
diff --git a/config/config.go b/config/config.go
index ff7f114454..293b99087d 100644
--- a/config/config.go
+++ b/config/config.go
@@ -25,21 +25,56 @@ type Config interface {
// peer traffic
GetPeerListenAddr() (string, error)
- // GetAPIKeys returns a list of Honeycomb API keys
- GetAPIKeys() ([]string, error)
+ // GetPeerListenAddr returns the address and port on which to listen for
+ // GRPC peer traffic
+ GetGRPCPeerListenAddr() (string, error)
+
+ // GetCompressPeerCommunication will be true if tracing-proxy should compress
+ // data before forwarding it to a peer.
+ GetCompressPeerCommunication() bool
+
+ // GetGRPCListenAddr returns the address and port on which to listen for
+ // incoming events over gRPC
+ GetGRPCListenAddr() (string, error)
// GetPeers returns a list of other servers participating in this proxy cluster
GetPeers() ([]string, error)
GetPeerManagementType() (string, error)
+ // GetPeerManagementStrategy returns the strategy specified for
+ // Peer management.
+ GetPeerManagementStrategy() (string, error)
+
// GetRedisHost returns the address of a Redis instance to use for peer
// management.
GetRedisHost() (string, error)
- // GetHoneycombAPI returns the base URL (protocol, hostname, and port) of
+ // GetRedisUsername returns the username of a Redis instance to use for peer
+ // management.
+ GetRedisUsername() (string, error)
+
+ // GetRedisPassword returns the password of a Redis instance to use for peer
+ // management.
+ GetRedisPassword() (string, error)
+
+ // GetRedisPrefix returns the prefix string used in the keys for peer
+ // management.
+ GetRedisPrefix() string
+
+ // GetRedisDatabase returns the ID of the Redis database to use for peer management.
+ GetRedisDatabase() int
+
+ // GetUseTLS returns true when TLS must be enabled to dial the Redis instance to
+ // use for peer management.
+ GetUseTLS() (bool, error)
+
+ // UseTLSInsecure returns true when certificate checks are disabled
+ GetUseTLSInsecure() (bool, error)
+
+ // GetOpsrampAPI returns the base URL (protocol, hostname, and port) of
// the upstream Honeycomb API server
- GetHoneycombAPI() (string, error)
+ GetOpsrampAPI() (string, error)
// GetLoggingLevel returns the verbosity with which we should log
GetLoggingLevel() (string, error)
@@ -48,24 +83,23 @@ type Config interface {
// complete before sending it, to allow stragglers to arrive
GetSendDelay() (time.Duration, error)
+ // GetBatchTimeout returns how often to send off batches in seconds
+ GetBatchTimeout() time.Duration
+
// GetTraceTimeout is how long to wait before sending a trace even if it's
// not complete. This should be longer than the longest expected trace
// duration.
GetTraceTimeout() (time.Duration, error)
+ // GetMaxBatchSize is the number of events to be included in the batch for sending
+ GetMaxBatchSize() uint
+
// GetOtherConfig attempts to fill the passed in struct with the contents of
// a subsection of the config. This is used by optional configurations to
// allow different implementations of necessary interfaces configure
// themselves
GetOtherConfig(name string, configStruct interface{}) error
- // GetLoggerType returns the type of the logger to use. Valid types are in
- // the logger package
- GetLoggerType() (string, error)
-
- // GetHoneycombLoggerConfig returns the config specific to the HoneycombLogger
- GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error)
-
// GetCollectorType returns the type of the collector to use. Valid types
// are in the collect package
GetCollectorType() (string, error)
@@ -73,24 +107,23 @@ type Config interface {
// GetInMemCollectorCacheCapacity returns the config specific to the InMemCollector
GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCapacity, error)
- // GetSamplerConfigForDataset returns the sampler type to use for the given dataset
- GetSamplerConfigForDataset(string) (interface{}, error)
+ // GetSamplerConfigForDataset returns the sampler type and name to use for the given dataset
+ GetSamplerConfigForDataset(string) (interface{}, string, error)
- // GetMetricsType returns the type of metrics to use. Valid types are in the
- // metrics package
- GetMetricsType() (string, error)
+ // GetAllSamplerRules returns all dataset rules in a map, including the default
+ GetAllSamplerRules() (map[string]interface{}, error)
- // GetHoneycombMetricsConfig returns the config specific to HoneycombMetrics
- GetHoneycombMetricsConfig() (HoneycombMetricsConfig, error)
+ // GetLogrusConfig returns the config specific to Logrus
+ GetLogrusConfig() (*LogrusLoggerConfig, error)
- // GetPrometheusMetricsConfig returns the config specific to PrometheusMetrics
- GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error)
+ // GetMetricsConfig returns the config specific to PrometheusMetrics
+ GetMetricsConfig() MetricsConfig
- // GetUpstreamBufferSize returns the size of the libhoney buffer to use for the upstream
- // libhoney client
+ // GetUpstreamBufferSize returns the size of the libtrace buffer to use for the upstream
+ // libtrace client
GetUpstreamBufferSize() int
- // GetPeerBufferSize returns the size of the libhoney buffer to use for the peer forwarding
- // libhoney client
+ // GetPeerBufferSize returns the size of the libtrace buffer to use for the peer forwarding
+ // libtrace client
GetPeerBufferSize() int
GetIdentifierInterfaceName() (string, error)
@@ -109,4 +142,66 @@ type Config interface {
GetIsDryRun() bool
GetDryRunFieldName() string
+
+ GetAddHostMetadataToTrace() bool
+
+ GetAddAdditionalMetadata() map[string]string
+
+ GetSendMetricsToOpsRamp() bool
+
+ // GetUseTLS returns true when TLS must be enabled to dial
+ GetGlobalUseTLS() bool
+
+ // GetUseTLSInsecureSkip returns false when certificate checks are disabled
+ GetGlobalUseTLSInsecureSkip() bool
+
+ // GetProxyConfig returns proxy configuration
+ GetProxyConfig() ProxyConfiguration
+
+ // GetAuthConfig return the authentication configuration
+ GetAuthConfig() AuthConfiguration
+
+ GetRetryConfig() *RetryConfiguration
+
+ GetTenantId() (string, error)
+
+ GetDataset() (string, error)
+
+ GetAddRuleReasonToTrace() bool
+
+ GetEnvironmentCacheTTL() time.Duration
+
+ GetDatasetPrefix() string
+
+ // GetQueryAuthToken returns the token that must be used to access the /query endpoints
+ GetQueryAuthToken() string
+
+ GetGRPCMaxConnectionIdle() time.Duration
+
+ GetGRPCMaxConnectionAge() time.Duration
+
+ GetGRPCMaxConnectionAgeGrace() time.Duration
+
+ GetGRPCTime() time.Duration
+
+ GetGRPCTimeout() time.Duration
+
+ GetPeerTimeout() time.Duration
+
+ GetAdditionalErrorFields() []string
+
+ GetAddSpanCountToRoot() bool
+
+ GetCacheOverrunStrategy() string
+
+ GetConfigMetadata() []ConfigMetadata
+
+ GetSampleCacheConfig() SampleCacheConfig
+}
+
+type ConfigMetadata struct {
+ Type string `json:"type"`
+ ID string `json:"id"`
+ Hash string `json:"hash"`
+ LoadedAt string `json:"loaded_at"`
}
diff --git a/config/config_test.go b/config/config_test.go
index 8ed47f0193..65ecacfccb 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -1,9 +1,6 @@
-// +build all race
-
package config
import (
- "io/ioutil"
"os"
"sync"
"testing"
@@ -12,10 +9,28 @@ import (
"github.com/stretchr/testify/assert"
)
-func TestRedisEnvVar(t *testing.T) {
- host := "redis.magic:1337"
- os.Setenv("REFINERY_REDIS_HOST", host)
- defer os.Unsetenv("REFINERY_REDIS_HOST")
+func TestGRPCListenAddrEnvVar(t *testing.T) {
+ const address = "127.0.0.1:4317"
+ const envVarName = "REFINERY_GRPC_LISTEN_ADDRESS"
+ os.Setenv(envVarName, address)
+ defer os.Unsetenv(envVarName)
+
+ c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {})
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ if a, _ := c.GetGRPCListenAddr(); a != address {
+ t.Error("received", a, "expected", address)
+ }
+}
+
+func TestRedisHostEnvVar(t *testing.T) {
+ const host = "redis.magic:1337"
+ const envVarName = "TRACE_PROXY_REDIS_HOST"
+ os.Setenv(envVarName, host)
+ defer os.Unsetenv(envVarName)
c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {})
@@ -28,35 +43,159 @@ func TestRedisEnvVar(t *testing.T) {
}
}
-func TestReload(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
+func TestRedisUsernameEnvVar(t *testing.T) {
+ const username = "admin"
+ const envVarName = "TRACE_PROXY_REDIS_USERNAME"
+ os.Setenv(envVarName, username)
+ defer os.Unsetenv(envVarName)
+
+ c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {})
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ if d, _ := c.GetRedisUsername(); d != username {
+ t.Error("received", d, "expected", username)
+ }
+}
+
+func TestRedisPasswordEnvVar(t *testing.T) {
+ const password = ***REMOVED***
+ const envVarName = "TRACE_PROXY_REDIS_PASSWORD"
+ os.Setenv(envVarName, password)
+ defer os.Unsetenv(envVarName)
+
+ c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {})
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ if d, _ := c.GetRedisPassword(); d != password {
+ t.Error("received", d, "expected", password)
+ }
+}
+
+func TestMetricsAPIKeyEnvVar(t *testing.T) {
+ testCases := []struct {
+ name string
+ envVar string
+ key string
+ }{
+ {
+ name: "Specific env var",
+ envVar: "REFINERY_HONEYCOMB_METRICS_API_KEY",
+ key: "abc123",
+ },
+ {
+ name: "Fallback env var",
+ envVar: "REFINERY_HONEYCOMB_API_KEY",
+ key: "321cba",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ os.Setenv(tc.envVar, tc.key)
+ defer os.Unsetenv(tc.envVar)
+
+ c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {})
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ if d := c.GetAuthConfig(); d.Key != tc.key {
+ t.Error("received", d, "expected", tc.key)
+ }
+ })
+ }
+}
+
+func TestMetricsAPIKeyMultipleEnvVar(t *testing.T) {
+ const specificKey = "abc123"
+ const specificEnvVarName = "REFINERY_HONEYCOMB_METRICS_API_KEY"
+ const fallbackKey = "this should not be set in the config"
+ const fallbackEnvVarName = "REFINERY_HONEYCOMB_API_KEY"
+
+ os.Setenv(specificEnvVarName, specificKey)
+ defer os.Unsetenv(specificEnvVarName)
+ os.Setenv(fallbackEnvVarName, fallbackKey)
+ defer os.Unsetenv(fallbackEnvVarName)
+
+ c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {})
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ if d := c.GetAuthConfig(); d.Key != specificKey {
+ t.Error("received", d, "expected", specificKey)
+ }
+}
+
+func TestMetricsAPIKeyFallbackEnvVar(t *testing.T) {
+ const key = "abc1234"
+ const envVarName = "REFINERY_HONEYCOMB_API_KEY"
+ os.Setenv(envVarName, key)
+ defer os.Unsetenv(envVarName)
+
+ c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {})
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ if d := c.GetAuthConfig(); d.Key != key {
+ t.Error("received", d, "expected", key)
+ }
+}
+
+// creates two temporary toml files from the strings passed in and returns their filenames
+func createTempConfigs(t *testing.T, configBody string, rulesBody string) (string, string) {
+ tmpDir, err := os.MkdirTemp("", "")
assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
+ configFile, err := os.CreateTemp(tmpDir, "*.toml")
assert.NoError(t, err)
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
+ if configBody != "" {
+ _, err = configFile.WriteString(configBody)
+ assert.NoError(t, err)
+ }
+ configFile.Close()
+
+ rulesFile, err := os.CreateTemp(tmpDir, "*.toml")
assert.NoError(t, err)
- dummy := []byte(`
+ if rulesBody != "" {
+ _, err = rulesFile.WriteString(rulesBody)
+ assert.NoError(t, err)
+ }
+ rulesFile.Close()
+
+ return configFile.Name(), rulesFile.Name()
+}
+
+func TestReload(t *testing.T) {
+ config, rules := createTempConfigs(t, `
ListenAddr="0.0.0.0:8080"
[InMemCollector]
CacheCapacity=1000
[HoneycombMetrics]
- MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsOpsrampAPI="http://jirs5"
MetricsAPIKey="1234"
MetricsDataset="testDatasetName"
MetricsReportingInterval=3
- `)
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
- _, err = configFile.Write(dummy)
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
- configFile.Close()
-
- c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
if err != nil {
t.Error(err)
@@ -100,7 +239,7 @@ func TestReload(t *testing.T) {
}
}()
- if file, err := os.OpenFile(configFile.Name(), os.O_RDWR, 0644); err == nil {
+ if file, err := os.OpenFile(config, os.O_RDWR, 0644); err == nil {
file.WriteString(`ListenAddr = "0.0.0.0:9000"`)
file.Close()
}
@@ -148,13 +287,22 @@ func TestReadDefaults(t *testing.T) {
t.Error("received", d, "expected", false)
}
- if d := c.GetDryRunFieldName(); d != "refinery_kept" {
- t.Error("received", d, "expected", "refinery_kept")
+ if d := c.GetDryRunFieldName(); d != "tracing-proxy_kept" {
+ t.Error("received", d, "expected", "tracing-proxy_kept")
+ }
+
+ if d := c.GetAddHostMetadataToTrace(); d != false {
+ t.Error("received", d, "expected", false)
+ }
+
+ if d := c.GetEnvironmentCacheTTL(); d != time.Hour {
+ t.Error("received", d, "expected", time.Hour)
}
- d, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist")
+ d, name, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist")
assert.NoError(t, err)
assert.IsType(t, &DeterministicSamplerConfig{}, d)
+ assert.Equal(t, "DeterministicSampler", name)
type imcConfig struct {
CacheCapacity int
@@ -174,64 +322,68 @@ func TestReadRulesConfig(t *testing.T) {
t.Error(err)
}
- d, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist")
+ d, name, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist")
assert.NoError(t, err)
assert.IsType(t, &DeterministicSamplerConfig{}, d)
+ assert.Equal(t, "DeterministicSampler", name)
- d, err = c.GetSamplerConfigForDataset("dataset1")
+ d, name, err = c.GetSamplerConfigForDataset("dataset1")
assert.NoError(t, err)
assert.IsType(t, &DynamicSamplerConfig{}, d)
+ assert.Equal(t, "DynamicSampler", name)
- d, err = c.GetSamplerConfigForDataset("dataset4")
+ d, name, err = c.GetSamplerConfigForDataset("dataset4")
assert.NoError(t, err)
switch r := d.(type) {
case *RulesBasedSamplerConfig:
- assert.Len(t, r.Rule, 3)
+ assert.Len(t, r.Rule, 6)
var rule *RulesBasedSamplerRule
rule = r.Rule[0]
- assert.Equal(t, 1, rule.SampleRate)
- assert.Equal(t, "500 errors", rule.Name)
- assert.Len(t, rule.Condition, 2)
-
- rule = r.Rule[1]
assert.True(t, rule.Drop)
assert.Equal(t, 0, rule.SampleRate)
assert.Len(t, rule.Condition, 1)
+ rule = r.Rule[1]
+ assert.Equal(t, 1, rule.SampleRate)
+ assert.Equal(t, "keep slow 500 errors", rule.Name)
+ assert.Len(t, rule.Condition, 2)
+
+ rule = r.Rule[4]
+ assert.Equal(t, 5, rule.SampleRate)
+ assert.Equal(t, "span", rule.Scope)
+
+ rule = r.Rule[5]
+ assert.Equal(t, 10, rule.SampleRate)
+ assert.Equal(t, "", rule.Scope)
+
+ assert.Equal(t, "RulesBasedSampler", name)
+
default:
assert.Fail(t, "dataset4 should have a rules based sampler", d)
}
}
func TestPeerManagementType(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
-
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
-
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
-
- _, err = configFile.Write([]byte(`
+ config, rules := createTempConfigs(t, `
[InMemCollector]
CacheCapacity=1000
[HoneycombMetrics]
- MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsOpsrampAPI="http://jirs5"
MetricsAPIKey="1234"
MetricsDataset="testDatasetName"
MetricsReportingInterval=3
[PeerManagement]
Type = "redis"
- Peers = ["http://refinery-1231:8080"]
- `))
+ Peers = ["http://trace-proxy-1231:8080"]
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
- c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
if d, _ := c.GetPeerManagementType(); d != "redis" {
@@ -239,31 +391,50 @@ func TestPeerManagementType(t *testing.T) {
}
}
-func TestDebugServiceAddr(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+func TestAbsentTraceKeyField(t *testing.T) {
+ config, rules := createTempConfigs(t, `
+ [InMemCollector]
+ CacheCapacity=1000
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
+ [HoneycombMetrics]
+ MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsAPIKey="1234"
+ MetricsDataset="testDatasetName"
+ MetricsReportingInterval=3
+ `, `
+ [dataset1]
+ Sampler = "EMADynamicSampler"
+ GoalSampleRate = 10
+ UseTraceLength = true
+ AddSampleRateKeyToTrace = true
+ FieldList = "[request.method]"
+ Weight = 0.4
+ `)
+ defer os.Remove(rules)
+ defer os.Remove(config)
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
+ _, err := NewConfig(config, rules, func(err error) {})
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "Error:Field validation for 'AddSampleRateKeyToTraceField'")
+}
- _, err = configFile.Write([]byte(`
+func TestDebugServiceAddr(t *testing.T) {
+ config, rules := createTempConfigs(t, `
DebugServiceAddr = "localhost:8085"
[InMemCollector]
CacheCapacity=1000
[HoneycombMetrics]
- MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsOpsrampAPI="http://jirs5"
MetricsAPIKey="1234"
MetricsDataset="testDatasetName"
MetricsReportingInterval=3
- `))
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
- c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
if d, _ := c.GetDebugServiceAddr(); d != "localhost:8085" {
@@ -272,32 +443,22 @@ func TestDebugServiceAddr(t *testing.T) {
}
func TestDryRun(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
-
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
-
- _, err = configFile.Write([]byte(`
+ config, rules := createTempConfigs(t, `
[InMemCollector]
CacheCapacity=1000
[HoneycombMetrics]
- MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsOpsrampAPI="http://jirs5"
MetricsAPIKey="1234"
MetricsDataset="testDatasetName"
MetricsReportingInterval=3
- `))
-
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
-
- _, err = rulesFile.Write([]byte(`
+ `, `
DryRun=true
- `))
+ `)
+ defer os.Remove(rules)
+ defer os.Remove(config)
- c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
if d := c.GetIsDryRun(); d != true {
@@ -306,29 +467,21 @@ func TestDryRun(t *testing.T) {
}
func TestMaxAlloc(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
-
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
-
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
-
- _, err = configFile.Write([]byte(`
+ config, rules := createTempConfigs(t, `
[InMemCollector]
CacheCapacity=1000
MaxAlloc=17179869184
[HoneycombMetrics]
- MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsOpsrampAPI="http://jirs5"
MetricsAPIKey="1234"
MetricsDataset="testDatasetName"
MetricsReportingInterval=3
- `))
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
- c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
expected := uint64(16 * 1024 * 1024 * 1024)
@@ -338,28 +491,16 @@ func TestMaxAlloc(t *testing.T) {
}
func TestGetSamplerTypes(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
-
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
-
- _, err = configFile.Write([]byte(`
+ config, rules := createTempConfigs(t, `
[InMemCollector]
CacheCapacity=1000
[HoneycombMetrics]
- MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsOpsrampAPI="http://jirs5"
MetricsAPIKey="1234"
MetricsDataset="testDatasetName"
MetricsReportingInterval=3
- `))
-
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
-
- dummyConfig := []byte(`
+ `, `
Sampler = "DeterministicSampler"
SampleRate = 2
@@ -369,7 +510,7 @@ func TestGetSamplerTypes(t *testing.T) {
FieldList = ["request.method","response.status_code"]
UseTraceLength = true
AddSampleRateKeyToTrace = true
- AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key"
+ AddSampleRateKeyToTraceField = "meta.tracing-proxy.dynsampler_key"
ClearFrequencySec = 60
[dataset2]
@@ -383,6 +524,7 @@ func TestGetSamplerTypes(t *testing.T) {
GoalSampleRate = 10
UseTraceLength = true
AddSampleRateKeyToTrace = true
+ AddSampleRateKeyToTraceField = "meta.tracing-proxy.dynsampler_key"
FieldList = "[request.method]"
Weight = 0.3
@@ -391,51 +533,69 @@ func TestGetSamplerTypes(t *testing.T) {
Sampler = "TotalThroughputSampler"
GoalThroughputPerSec = 100
FieldList = "[request.method]"
-`)
+ `)
+ defer os.Remove(rules)
+ defer os.Remove(config)
- _, err = rulesFile.Write(dummyConfig)
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
- rulesFile.Close()
-
- c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
- if err != nil {
- t.Error(err)
- }
-
- if d, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist"); assert.Equal(t, nil, err) {
+ if d, name, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist"); assert.Equal(t, nil, err) {
assert.IsType(t, &DeterministicSamplerConfig{}, d)
+ assert.Equal(t, "DeterministicSampler", name)
}
- if d, err := c.GetSamplerConfigForDataset("dataset 1"); assert.Equal(t, nil, err) {
+ if d, name, err := c.GetSamplerConfigForDataset("dataset 1"); assert.Equal(t, nil, err) {
assert.IsType(t, &DynamicSamplerConfig{}, d)
+ assert.Equal(t, "DynamicSampler", name)
}
- if d, err := c.GetSamplerConfigForDataset("dataset2"); assert.Equal(t, nil, err) {
+ if d, name, err := c.GetSamplerConfigForDataset("dataset2"); assert.Equal(t, nil, err) {
assert.IsType(t, &DeterministicSamplerConfig{}, d)
+ assert.Equal(t, "DeterministicSampler", name)
}
- if d, err := c.GetSamplerConfigForDataset("dataset3"); assert.Equal(t, nil, err) {
+ if d, name, err := c.GetSamplerConfigForDataset("dataset3"); assert.Equal(t, nil, err) {
assert.IsType(t, &EMADynamicSamplerConfig{}, d)
+ assert.Equal(t, "EMADynamicSampler", name)
}
- if d, err := c.GetSamplerConfigForDataset("dataset4"); assert.Equal(t, nil, err) {
+ if d, name, err := c.GetSamplerConfigForDataset("dataset4"); assert.Equal(t, nil, err) {
assert.IsType(t, &TotalThroughputSamplerConfig{}, d)
+ assert.Equal(t, "TotalThroughputSampler", name)
}
}
func TestDefaultSampler(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ config, rules := createTempConfigs(t, `
+ [InMemCollector]
+ CacheCapacity=1000
+
+ [HoneycombMetrics]
+ MetricsOpsrampAPI="http://jirs5"
+ MetricsAPIKey="1234"
+ MetricsDataset="testDatasetName"
+ MetricsReportingInterval=3
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
+
+ c, err := NewConfig(config, rules, func(err error) {})
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
assert.NoError(t, err)
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
+ s, name, err := c.GetSamplerConfigForDataset("nonexistent")
+
assert.NoError(t, err)
+ assert.Equal(t, "DeterministicSampler", name)
+
+ assert.IsType(t, &DeterministicSamplerConfig{}, s)
+}
+
+func TestDatasetPrefix(t *testing.T) {
+ config, rules := createTempConfigs(t, `
+ DatasetPrefix = "dataset"
- dummy := []byte(`
[InMemCollector]
CacheCapacity=1000
@@ -444,35 +604,63 @@ func TestDefaultSampler(t *testing.T) {
MetricsAPIKey="1234"
MetricsDataset="testDatasetName"
MetricsReportingInterval=3
- `)
- _, err = configFile.Write(dummy)
+ [HoneycombLogger]
+ LoggerHoneycombAPI="http://honeycomb.io"
+ LoggerAPIKey="1234"
+ LoggerDataset="loggerDataset"
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
+
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
- configFile.Close()
- c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
+ assert.Equal(t, "dataset", c.GetDatasetPrefix())
+}
- assert.NoError(t, err)
+func TestQueryAuthToken(t *testing.T) {
+ config, rules := createTempConfigs(t, `
+ QueryAuthToken = "MySeekretToken"
+
+ [InMemCollector]
+ CacheCapacity=1000
+
+ [HoneycombMetrics]
+ MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsAPIKey="1234"
+ MetricsDataset="testDatasetName"
+ MetricsReportingInterval=3
- s, err := c.GetSamplerConfigForDataset("nonexistent")
+ [HoneycombLogger]
+ LoggerHoneycombAPI="http://honeycomb.io"
+ LoggerAPIKey="1234"
+ LoggerDataset="loggerDataset" `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
- assert.IsType(t, &DeterministicSamplerConfig{}, s)
+ assert.Equal(t, "MySeekretToken", c.GetQueryAuthToken())
}
-func TestHoneycombLoggerConfig(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
+func TestGRPCServerParameters(t *testing.T) {
+ tmpDir, err := os.MkdirTemp("", "")
assert.NoError(t, err)
defer os.RemoveAll(tmpDir)
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
+ configFile, err := os.CreateTemp(tmpDir, "*.toml")
assert.NoError(t, err)
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
+ _, err = configFile.Write([]byte(`
+ [GRPCServerParameters]
+ MaxConnectionIdle = "1m"
+ MaxConnectionAge = "2m"
+ MaxConnectionAgeGrace = "3m"
+ Time = "4m"
+ Timeout = "5m"
- dummy := []byte(`
[InMemCollector]
CacheCapacity=1000
@@ -486,41 +674,57 @@ func TestHoneycombLoggerConfig(t *testing.T) {
LoggerHoneycombAPI="http://honeycomb.io"
LoggerAPIKey="1234"
LoggerDataset="loggerDataset"
- LoggerSamplerEnabled=true
- LoggerSamplerThroughput=10
- `)
-
- _, err = configFile.Write(dummy)
+ `))
assert.NoError(t, err)
configFile.Close()
- c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
-
+ rulesFile, err := os.CreateTemp(tmpDir, "*.toml")
assert.NoError(t, err)
- loggerConfig, err := c.GetHoneycombLoggerConfig()
-
+ c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
assert.NoError(t, err)
- assert.Equal(t, "http://honeycomb.io", loggerConfig.LoggerHoneycombAPI)
- assert.Equal(t, "1234", loggerConfig.LoggerAPIKey)
- assert.Equal(t, "loggerDataset", loggerConfig.LoggerDataset)
- assert.Equal(t, true, loggerConfig.LoggerSamplerEnabled)
- assert.Equal(t, 10, loggerConfig.LoggerSamplerThroughput)
+ assert.Equal(t, 1*time.Minute, c.GetGRPCMaxConnectionIdle())
+ assert.Equal(t, 2*time.Minute, c.GetGRPCMaxConnectionAge())
+ assert.Equal(t, 3*time.Minute, c.GetGRPCMaxConnectionAgeGrace())
+ assert.Equal(t, 4*time.Minute, c.GetGRPCTime())
+ assert.Equal(t, 5*time.Minute, c.GetGRPCTimeout())
}
-func TestHoneycombLoggerConfigDefaults(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+func TestHoneycombAdditionalErrorConfig(t *testing.T) {
+ config, rules := createTempConfigs(t, `
+ AdditionalErrorFields = [
+ "first",
+ "second"
+ ]
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
- assert.NoError(t, err)
+ [InMemCollector]
+ CacheCapacity=1000
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
+ [HoneycombMetrics]
+ MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsAPIKey="1234"
+ MetricsDataset="testDatasetName"
+ MetricsReportingInterval=3
+
+ [HoneycombLogger]
+ LoggerHoneycombAPI="http://honeycomb.io"
+ LoggerAPIKey="1234"
+ LoggerDataset="loggerDataset"
+ LoggerSamplerEnabled=true
+ LoggerSamplerThroughput=10
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
+
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
- dummy := []byte(`
+ assert.Equal(t, []string{"first", "second"}, c.GetAdditionalErrorFields())
+}
+
+func TestHoneycombAdditionalErrorDefaults(t *testing.T) {
+ config, rules := createTempConfigs(t, `
[InMemCollector]
CacheCapacity=1000
@@ -534,20 +738,69 @@ func TestHoneycombLoggerConfigDefaults(t *testing.T) {
LoggerHoneycombAPI="http://honeycomb.io"
LoggerAPIKey="1234"
LoggerDataset="loggerDataset"
- `)
+ LoggerSamplerEnabled=true
+ LoggerSamplerThroughput=10
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
- _, err = configFile.Write(dummy)
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
- configFile.Close()
- c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
+ assert.Equal(t, []string{"trace.span_id"}, c.GetAdditionalErrorFields())
+}
+
+func TestSampleCacheParameters(t *testing.T) {
+ config, rules := createTempConfigs(t, `
+ [InMemCollector]
+ CacheCapacity=1000
+ [HoneycombMetrics]
+ MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsAPIKey="1234"
+ MetricsDataset="testDatasetName"
+ MetricsReportingInterval=3
+
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
+
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
- loggerConfig, err := c.GetHoneycombLoggerConfig()
+ s := c.GetSampleCacheConfig()
+ assert.Equal(t, "legacy", s.Type)
+ assert.Equal(t, uint(10_000), s.KeptSize)
+ assert.Equal(t, uint(1_000_000), s.DroppedSize)
+ assert.Equal(t, 10*time.Second, s.SizeCheckInterval)
+}
+
+func TestSampleCacheParametersCuckoo(t *testing.T) {
+ config, rules := createTempConfigs(t, `
+ [InMemCollector]
+ CacheCapacity=1000
+
+ [HoneycombMetrics]
+ MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsAPIKey="1234"
+ MetricsDataset="testDatasetName"
+ MetricsReportingInterval=3
+
+ [SampleCache]
+ Type="cuckoo"
+ KeptSize=100_000
+ DroppedSize=10_000_000
+ SizeCheckInterval="60s"
+ `, "")
+ defer os.Remove(rules)
+ defer os.Remove(config)
+ c, err := NewConfig(config, rules, func(err error) {})
assert.NoError(t, err)
- assert.Equal(t, false, loggerConfig.LoggerSamplerEnabled)
- assert.Equal(t, 5, loggerConfig.LoggerSamplerThroughput)
+ s := c.GetSampleCacheConfig()
+ assert.Equal(t, "cuckoo", s.Type)
+ assert.Equal(t, uint(100_000), s.KeptSize)
+ assert.Equal(t, uint(10_000_000), s.DroppedSize)
+ assert.Equal(t, 1*time.Minute, s.SizeCheckInterval)
}
diff --git a/config/config_test_reload_error_test.go b/config/config_test_reload_error_test.go
index af7447e12f..e60084d08d 100644
--- a/config/config_test_reload_error_test.go
+++ b/config/config_test_reload_error_test.go
@@ -1,9 +1,8 @@
-// +build all !race
+//go:build all || !race
package config
import (
- "io/ioutil"
"os"
"sync"
"testing"
@@ -13,14 +12,14 @@ import (
)
func TestErrorReloading(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "")
+ tmpDir, err := os.MkdirTemp("", "")
assert.NoError(t, err)
defer os.RemoveAll(tmpDir)
- rulesFile, err := ioutil.TempFile(tmpDir, "*.toml")
+ rulesFile, err := os.CreateTemp(tmpDir, "*.toml")
assert.NoError(t, err)
- configFile, err := ioutil.TempFile(tmpDir, "*.toml")
+ configFile, err := os.CreateTemp(tmpDir, "*.toml")
assert.NoError(t, err)
dummy := []byte(`
@@ -28,7 +27,7 @@ func TestErrorReloading(t *testing.T) {
CacheCapacity=1000
[HoneycombMetrics]
- MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsOpsrampAPI="http://jirs5"
MetricsAPIKey="1234"
MetricsDataset="testDatasetName"
MetricsReportingInterval=3
@@ -55,9 +54,12 @@ func TestErrorReloading(t *testing.T) {
t.Error(err)
}
- d, _ := c.GetSamplerConfigForDataset("dataset5")
+ d, name, _ := c.GetSamplerConfigForDataset("dataset5")
if _, ok := d.(DeterministicSamplerConfig); ok {
- t.Error("received", d, "expected", "DeterministicSampler")
+ t.Error("type received", d, "expected", "DeterministicSampler")
+ }
+ if name != "DeterministicSampler" {
+ t.Error("name received", d, "expected", "DeterministicSampler")
}
wg := &sync.WaitGroup{}
@@ -73,7 +75,7 @@ func TestErrorReloading(t *testing.T) {
}
}()
- err = ioutil.WriteFile(rulesFile.Name(), []byte(`Sampler="InvalidSampler"`), 0644)
+ err = os.WriteFile(rulesFile.Name(), []byte(`Sampler="InvalidSampler"`), 0644)
if err != nil {
t.Error(err)
@@ -82,7 +84,7 @@ func TestErrorReloading(t *testing.T) {
wg.Wait()
// config should error and not update sampler to invalid type
- d, _ = c.GetSamplerConfigForDataset("dataset5")
+ d, _, _ = c.GetSamplerConfigForDataset("dataset5")
if _, ok := d.(DeterministicSamplerConfig); ok {
t.Error("received", d, "expected", "DeterministicSampler")
}
diff --git a/config/file_config.go b/config/file_config.go
index f06b85f53e..caabb3a18d 100644
--- a/config/file_config.go
+++ b/config/file_config.go
@@ -1,17 +1,29 @@
package config
import (
+ "crypto/md5"
+ "encoding/hex"
"errors"
"fmt"
+ "github.com/opsramp/libtrace-go/transmission"
+ "io"
"net"
+ "net/url"
+ "os"
+ "sort"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"github.com/go-playground/validator"
- libhoney "github.com/honeycombio/libhoney-go"
- viper "github.com/spf13/viper"
+ "github.com/opsramp/libtrace-go"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/viper"
+)
+
+const (
+ DefaultDataset = "ds"
)
type fileConfig struct {
@@ -21,57 +33,75 @@ type fileConfig struct {
callbacks []func()
errorCallback func(error)
mux sync.RWMutex
+ lastLoadTime time.Time
}
-type RulesBasedSamplerCondition struct {
- Field string
- Operator string
- Value interface{}
-}
-
-func (r *RulesBasedSamplerCondition) String() string {
- return fmt.Sprintf("%+v", *r)
-}
-
-type RulesBasedSamplerRule struct {
- Name string
- SampleRate int
- Drop bool
- Condition []*RulesBasedSamplerCondition
-}
-
-func (r *RulesBasedSamplerRule) String() string {
- return fmt.Sprintf("%+v", *r)
+type configContents struct {
+ ListenAddr string `validate:"required"`
+ PeerListenAddr string `validate:"required"`
+ CompressPeerCommunication bool
+ GRPCListenAddr string
+ GRPCPeerListenAddr string
+ OpsrampAPI string `validate:"required,url"`
+ Dataset string
+ LoggingLevel string `validate:"required"`
+ Collector string `validate:"required,oneof= InMemCollector"`
+ Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"`
+ SendDelay time.Duration `validate:"required"`
+ BatchTimeout time.Duration
+ TraceTimeout time.Duration `validate:"required"`
+ MaxBatchSize uint `validate:"required"`
+ SendTicker time.Duration `validate:"required"`
+ UpstreamBufferSize int `validate:"required"`
+ PeerBufferSize int `validate:"required"`
+ DebugServiceAddr string
+ DryRun bool
+ DryRunFieldName string
+ PeerManagement PeerManagementConfig `validate:"required"`
+ InMemCollector InMemoryCollectorCacheCapacity `validate:"required"`
+ AddHostMetadataToTrace bool
+ AddAdditionalMetadata map[string]string
+ AddRuleReasonToTrace bool
+ EnvironmentCacheTTL time.Duration
+ DatasetPrefix string
+ QueryAuthToken string
+ GRPCServerParameters GRPCServerParameters
+ AdditionalErrorFields []string
+ AddSpanCountToRoot bool
+ CacheOverrunStrategy string
+ SampleCache SampleCacheConfig `validate:"required"`
+
+ UseTls bool
+ UseTlsInSecure bool
+
+ ProxyConfiguration
+ AuthConfiguration
+ MetricsConfig
+ RetryConfiguration *RetryConfiguration
}
-type RulesBasedSamplerConfig struct {
- Rule []*RulesBasedSamplerRule
+type RetryConfiguration struct {
+ InitialInterval time.Duration
+ RandomizationFactor float64
+ Multiplier float64
+ MaxInterval time.Duration
+ MaxElapsedTime time.Duration
}
-func (r *RulesBasedSamplerConfig) String() string {
- return fmt.Sprintf("%+v", *r)
+type ProxyConfiguration struct {
+ Protocol string
+ Host string
+ Port int64
+ Username string
+ Password string
}
-type configContents struct {
- ListenAddr string `validate:"required"`
- PeerListenAddr string `validate:"required"`
- APIKeys []string `validate:"required"`
- HoneycombAPI string `validate:"required,url"`
- Logger string `validate:"required,oneof= logrus honeycomb"`
- LoggingLevel string `validate:"required"`
- Collector string `validate:"required,oneof= InMemCollector"`
- Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"`
- Metrics string `validate:"required,oneof= prometheus honeycomb"`
- SendDelay time.Duration `validate:"required"`
- TraceTimeout time.Duration `validate:"required"`
- SendTicker time.Duration `validate:"required"`
- UpstreamBufferSize int `validate:"required"`
- PeerBufferSize int `validate:"required"`
- DebugServiceAddr string
- DryRun bool
- DryRunFieldName string
- PeerManagement PeerManagementConfig `validate:"required"`
- InMemCollector InMemoryCollectorCacheCapacity `validate:"required"`
+type AuthConfiguration struct {
+ SkipAuth bool
+ Endpoint string `validate:"url"`
+ Key string
+ Secret string
+ TenantId string
}
type InMemoryCollectorCacheCapacity struct {
@@ -80,61 +110,109 @@ type InMemoryCollectorCacheCapacity struct {
MaxAlloc uint64
}
-type HoneycombLevel int
-
-type HoneycombLoggerConfig struct {
- LoggerHoneycombAPI string `validate:"required,url"`
- LoggerAPIKey string `validate:"required"`
- LoggerDataset string `validate:"required"`
- LoggerSamplerEnabled bool
- LoggerSamplerThroughput int
- Level HoneycombLevel
+type LogrusLoggerConfig struct {
+ LogFormatter string `validate:"required",toml:"LogFormatter"`
+ LogOutput string `validate:"required,oneof= stdout stderr file",toml:"LogOutput"`
+ File struct {
+ FileName string `toml:"FileName"`
+ MaxSize int `toml:"MaxSize"`
+ MaxBackups int `toml:"MaxBackups"`
+ Compress bool `toml:"Compress"`
+ } `toml:"File"`
}
-type PrometheusMetricsConfig struct {
- MetricsListenAddr string `validate:"required"`
-}
-
-type HoneycombMetricsConfig struct {
- MetricsHoneycombAPI string `validate:"required,url"`
- MetricsAPIKey string `validate:"required"`
- MetricsDataset string `validate:"required"`
- MetricsReportingInterval int64 `validate:"required"`
+type MetricsConfig struct {
+ Enable bool
+ ListenAddr string `validate:"required"`
+ OpsRampAPI string
+ ReportingInterval int64
+ MetricsList []string
}
type PeerManagementConfig struct {
Type string `validate:"required,oneof= file redis"`
Peers []string `validate:"dive,url"`
RedisHost string
+ RedisUsername string
+ RedisPassword string
+ UseTLS bool
+ UseTLSInsecure bool
IdentifierInterfaceName string
UseIPV6Identifier bool
RedisIdentifier string
+ Timeout time.Duration
+ Strategy string `validate:"required,oneof= legacy hash"`
+}
+
+type SampleCacheConfig struct {
+ Type string `validate:"required,oneof= legacy cuckoo"`
+ KeptSize uint `validate:"gte=500"`
+ DroppedSize uint `validate:"gte=100_000"`
+ SizeCheckInterval time.Duration `validate:"gte=1_000_000_000"` // 1 second minimum
+}
+
+// GRPCServerParameters allow you to configure the GRPC ServerParameters used
+// by refinery's own GRPC server:
+// https://pkg.go.dev/google.golang.org/grpc/keepalive#ServerParameters
+type GRPCServerParameters struct {
+ MaxConnectionIdle time.Duration
+ MaxConnectionAge time.Duration
+ MaxConnectionAgeGrace time.Duration
+ Time time.Duration
+ Timeout time.Duration
}
// NewConfig creates a new config struct
func NewConfig(config, rules string, errorCallback func(error)) (Config, error) {
c := viper.New()
- c.BindEnv("PeerManagement.RedisHost", "REFINERY_REDIS_HOST")
- c.SetDefault("ListenAddr", "0.0.0.0:8080")
- c.SetDefault("PeerListenAddr", "0.0.0.0:8081")
- c.SetDefault("APIKeys", []string{"*"})
- c.SetDefault("PeerManagement.Peers", []string{"http://127.0.0.1:8081"})
+ c.SetDefault("ListenAddr", "0.0.0.0:8082")
+ c.SetDefault("PeerListenAddr", "0.0.0.0:8083")
+ c.SetDefault("CompressPeerCommunication", true)
+ c.SetDefault("PeerManagement.Peers", []string{"http://127.0.0.1:8082"})
c.SetDefault("PeerManagement.Type", "file")
+ c.SetDefault("PeerManagement.UseTLS", false)
+ c.SetDefault("PeerManagement.UseTLSInsecure", false)
c.SetDefault("PeerManagement.UseIPV6Identifier", false)
- c.SetDefault("HoneycombAPI", "https://api.honeycomb.io")
- c.SetDefault("Logger", "logrus")
- c.SetDefault("LoggingLevel", "debug")
+ c.SetDefault("OpsrampAPI", "")
+ c.SetDefault("Dataset", DefaultDataset)
+ c.SetDefault("PeerManagement.Timeout", 5*time.Second)
+ c.SetDefault("PeerManagement.Strategy", "legacy")
+ c.SetDefault("LoggingLevel", "info")
c.SetDefault("Collector", "InMemCollector")
- c.SetDefault("Metrics", "honeycomb")
c.SetDefault("SendDelay", 2*time.Second)
+ c.SetDefault("BatchTimeout", libtrace.DefaultBatchTimeout)
c.SetDefault("TraceTimeout", 60*time.Second)
+ c.SetDefault("MaxBatchSize", 500)
c.SetDefault("SendTicker", 100*time.Millisecond)
- c.SetDefault("UpstreamBufferSize", libhoney.DefaultPendingWorkCapacity)
- c.SetDefault("PeerBufferSize", libhoney.DefaultPendingWorkCapacity)
+ c.SetDefault("UpstreamBufferSize", libtrace.DefaultPendingWorkCapacity)
+ c.SetDefault("PeerBufferSize", libtrace.DefaultPendingWorkCapacity)
c.SetDefault("MaxAlloc", uint64(0))
- c.SetDefault("HoneycombLogger.LoggerSamplerEnabled", false)
- c.SetDefault("HoneycombLogger.LoggerSamplerThroughput", 5)
+ c.SetDefault("AddHostMetadataToTrace", false)
+ c.SetDefault("AddAdditionalMetadata", map[string]string{"app": "default"})
+ c.SetDefault("AddRuleReasonToTrace", false)
+ c.SetDefault("EnvironmentCacheTTL", time.Hour)
+ c.SetDefault("GRPCServerParameters.MaxConnectionIdle", 1*time.Minute)
+ c.SetDefault("GRPCServerParameters.MaxConnectionAge", time.Duration(0))
+ c.SetDefault("GRPCServerParameters.MaxConnectionAgeGrace", time.Duration(0))
+ c.SetDefault("GRPCServerParameters.Time", 10*time.Second)
+ c.SetDefault("GRPCServerParameters.Timeout", 2*time.Second)
+ c.SetDefault("AdditionalErrorFields", []string{"trace.span_id"})
+ c.SetDefault("AddSpanCountToRoot", false)
+ c.SetDefault("CacheOverrunStrategy", "resize")
+ c.SetDefault("SampleCache.Type", "legacy")
+ c.SetDefault("SampleCache.KeptSize", 10_000)
+ c.SetDefault("SampleCache.DroppedSize", 1_000_000)
+ c.SetDefault("SampleCache.SizeCheckInterval", 10*time.Second)
+
+ // AuthConfig Defaults
+ c.SetDefault("AuthConfiguration.SkipAuth", false)
+
+ // MetricsConfig Defaults
+ c.SetDefault("MetricsConfig.Enable", false)
+ c.SetDefault("MetricsConfig.ListenAddr", "0.0.0.0:2112")
+ c.SetDefault("MetricsConfig.ReportingInterval", 10)
+ c.SetDefault("MetricsConfig.MetricsList", []string{".*"})
c.SetConfigFile(config)
err := c.ReadInConfig()
@@ -148,7 +226,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error)
r.SetDefault("Sampler", "DeterministicSampler")
r.SetDefault("SampleRate", 1)
r.SetDefault("DryRun", false)
- r.SetDefault("DryRunFieldName", "refinery_kept")
+ r.SetDefault("DryRunFieldName", "tracing-proxy_kept")
r.SetConfigFile(rules)
err = r.ReadInConfig()
@@ -177,7 +255,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error)
return nil, err
}
- err = fc.validateConditionalConfigs()
+ err = fc.validateGeneralConfigs()
if err != nil {
return nil, err
}
@@ -204,7 +282,7 @@ func (f *fileConfig) onChange(in fsnotify.Event) {
return
}
- err = f.validateConditionalConfigs()
+ err = f.validateGeneralConfigs()
if err != nil {
f.errorCallback(err)
return
@@ -218,9 +296,6 @@ func (f *fileConfig) onChange(in fsnotify.Event) {
f.unmarshal()
- f.mux.RLock()
- defer f.mux.RUnlock()
-
for _, c := range f.callbacks {
c()
}
@@ -244,40 +319,24 @@ func (f *fileConfig) unmarshal() error {
return nil
}
-func (f *fileConfig) validateConditionalConfigs() error {
- // validate logger config
- loggerType, err := f.GetLoggerType()
- if err != nil {
- return err
- }
- if loggerType == "honeycomb" {
- _, err = f.GetHoneycombLoggerConfig()
- if err != nil {
- return err
- }
- }
+func (f *fileConfig) validateGeneralConfigs() error {
+ f.lastLoadTime = time.Now()
// validate metrics config
- metricsType, err := f.GetMetricsType()
- if err != nil {
- return err
- }
- if metricsType == "honeycomb" {
- _, err = f.GetHoneycombMetricsConfig()
- if err != nil {
- return err
- }
+ metricsConfig := f.GetMetricsConfig()
+ if metricsConfig.ReportingInterval < 10 {
+ return fmt.Errorf("mertics reporting interval %d not allowed, must be >= 10", metricsConfig.ReportingInterval)
}
- if metricsType == "prometheus" {
- _, err = f.GetPrometheusMetricsConfig()
- if err != nil {
- return err
- }
+ if len(metricsConfig.MetricsList) < 1 {
+ return fmt.Errorf("mertics list cant be empty")
}
+
return nil
}
func (f *fileConfig) validateSamplerConfigs() error {
+ logrus.Debugf("Sampler rules config: %+v", f.rules)
+
keys := f.rules.AllKeys()
for _, key := range keys {
parts := strings.Split(key, ".")
@@ -298,16 +357,16 @@ func (f *fileConfig) validateSamplerConfigs() error {
case "TotalThroughputSampler":
i = &TotalThroughputSamplerConfig{}
default:
- return errors.New("Invalid or missing default sampler type")
+ return fmt.Errorf("Invalid or missing default sampler type: %s", t)
}
err := f.rules.Unmarshal(i)
if err != nil {
- return err
+ return fmt.Errorf("Failed to unmarshal sampler rule: %w", err)
}
v := validator.New()
err = v.Struct(i)
if err != nil {
- return err
+ return fmt.Errorf("Failed to validate sampler rule: %w", err)
}
}
@@ -327,18 +386,18 @@ func (f *fileConfig) validateSamplerConfigs() error {
case "TotalThroughputSampler":
i = &TotalThroughputSamplerConfig{}
default:
- return errors.New("Invalid or missing dataset sampler type")
+ return fmt.Errorf("Invalid or missing dataset sampler type: %s", t)
}
datasetName := parts[0]
if sub := f.rules.Sub(datasetName); sub != nil {
err := sub.Unmarshal(i)
if err != nil {
- return err
+ return fmt.Errorf("Failed to unmarshal dataset sampler rule: %w", err)
}
v := validator.New()
err = v.Struct(i)
if err != nil {
- return err
+ return fmt.Errorf("Failed to validate dataset sampler rule: %w", err)
}
}
}
@@ -375,11 +434,39 @@ func (f *fileConfig) GetPeerListenAddr() (string, error) {
return f.conf.PeerListenAddr, nil
}
-func (f *fileConfig) GetAPIKeys() ([]string, error) {
+func (f *fileConfig) GetCompressPeerCommunication() bool {
f.mux.RLock()
defer f.mux.RUnlock()
- return f.conf.APIKeys, nil
+ return f.conf.CompressPeerCommunication
+}
+
+func (f *fileConfig) GetGRPCListenAddr() (string, error) {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ // GRPC listen addr is optional, only check value is valid if not empty
+ if f.conf.GRPCListenAddr != "" {
+ _, _, err := net.SplitHostPort(f.conf.GRPCListenAddr)
+ if err != nil {
+ return "", err
+ }
+ }
+ return f.conf.GRPCListenAddr, nil
+}
+
+func (f *fileConfig) GetGRPCPeerListenAddr() (string, error) {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ // GRPC listen addr is optional, only check value is valid if not empty
+ if f.conf.GRPCPeerListenAddr != "" {
+ _, _, err := net.SplitHostPort(f.conf.GRPCPeerListenAddr)
+ if err != nil {
+ return "", err
+ }
+ }
+ return f.conf.GRPCPeerListenAddr, nil
}
func (f *fileConfig) GetPeerManagementType() (string, error) {
@@ -389,6 +476,13 @@ func (f *fileConfig) GetPeerManagementType() (string, error) {
return f.conf.PeerManagement.Type, nil
}
+func (f *fileConfig) GetPeerManagementStrategy() (string, error) {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.PeerManagement.Strategy, nil
+}
+
func (f *fileConfig) GetPeers() ([]string, error) {
f.mux.RLock()
defer f.mux.RUnlock()
@@ -403,6 +497,60 @@ func (f *fileConfig) GetRedisHost() (string, error) {
return f.config.GetString("PeerManagement.RedisHost"), nil
}
+func (f *fileConfig) GetRedisUsername() (string, error) {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.config.GetString("PeerManagement.RedisUsername"), nil
+}
+
+func (f *fileConfig) GetRedisPassword() (string, error) {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.config.GetString("PeerManagement.RedisPassword"), nil
+}
+
+func (f *fileConfig) GetRedisPrefix() string {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ prefix := f.config.GetString("PeerManagement.RedisPrefix")
+ if prefix == "" {
+ prefix = "tracing-proxy"
+ }
+
+ return prefix
+}
+
+func (f *fileConfig) GetRedisDatabase() int {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.config.GetInt("PeerManagement.RedisDatabase")
+}
+
+func (f *fileConfig) GetProxyConfig() ProxyConfiguration {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.ProxyConfiguration
+}
+
+func (f *fileConfig) GetUseTLS() (bool, error) {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.config.GetBool("PeerManagement.UseTLS"), nil
+}
+
+func (f *fileConfig) GetUseTLSInsecure() (bool, error) {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.config.GetBool("PeerManagement.UseTLSInsecure"), nil
+}
+
func (f *fileConfig) GetIdentifierInterfaceName() (string, error) {
f.mux.RLock()
defer f.mux.RUnlock()
@@ -424,51 +572,62 @@ func (f *fileConfig) GetRedisIdentifier() (string, error) {
return f.config.GetString("PeerManagement.RedisIdentifier"), nil
}
-func (f *fileConfig) GetHoneycombAPI() (string, error) {
+func (f *fileConfig) GetOpsrampAPI() (string, error) {
f.mux.RLock()
defer f.mux.RUnlock()
- return f.conf.HoneycombAPI, nil
+ _, err := url.Parse(f.conf.OpsrampAPI)
+ if err != nil {
+ return "", err
+ }
+
+ return f.conf.OpsrampAPI, nil
}
-func (f *fileConfig) GetLoggingLevel() (string, error) {
+func (f *fileConfig) GetAuthConfig() AuthConfiguration {
f.mux.RLock()
defer f.mux.RUnlock()
- return f.conf.LoggingLevel, nil
+ return f.conf.AuthConfiguration
}
-func (f *fileConfig) GetLoggerType() (string, error) {
+func (f *fileConfig) GetRetryConfig() *RetryConfiguration {
f.mux.RLock()
defer f.mux.RUnlock()
- return f.conf.Logger, nil
+ if f.conf.RetryConfiguration == nil {
+ defaultConfig := transmission.NewDefaultRetrySettings()
+ return &RetryConfiguration{
+ InitialInterval: defaultConfig.InitialInterval,
+ RandomizationFactor: defaultConfig.RandomizationFactor,
+ Multiplier: defaultConfig.Multiplier,
+ MaxInterval: defaultConfig.MaxInterval,
+ MaxElapsedTime: defaultConfig.MaxElapsedTime,
+ }
+ }
+
+ return f.conf.RetryConfiguration
}
-func (f *fileConfig) GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) {
+func (f *fileConfig) GetDataset() (string, error) {
f.mux.RLock()
defer f.mux.RUnlock()
- hlConfig := &HoneycombLoggerConfig{}
- if sub := f.config.Sub("HoneycombLogger"); sub != nil {
- err := sub.UnmarshalExact(hlConfig)
- if err != nil {
- return *hlConfig, err
- }
+ return f.conf.Dataset, nil
+}
- // https://github.com/spf13/viper/issues/747
- hlConfig.LoggerSamplerEnabled = f.config.GetBool("HoneycombLogger.LoggerSamplerEnabled")
- hlConfig.LoggerSamplerThroughput = f.config.GetInt("HoneycombLogger.LoggerSamplerThroughput")
+func (f *fileConfig) GetTenantId() (string, error) {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
- v := validator.New()
- err = v.Struct(hlConfig)
- if err != nil {
- return *hlConfig, err
- }
+ return f.conf.AuthConfiguration.TenantId, nil
+}
- return *hlConfig, nil
- }
- return *hlConfig, errors.New("No config found for HoneycombLogger")
+func (f *fileConfig) GetLoggingLevel() (string, error) {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.LoggingLevel, nil
}
func (f *fileConfig) GetCollectorType() (string, error) {
@@ -478,10 +637,48 @@ func (f *fileConfig) GetCollectorType() (string, error) {
return f.conf.Collector, nil
}
-func (f *fileConfig) GetSamplerConfigForDataset(dataset string) (interface{}, error) {
+func (f *fileConfig) GetAllSamplerRules() (map[string]interface{}, error) {
+ samplers := make(map[string]interface{})
+
+ keys := f.rules.AllKeys()
+ for _, key := range keys {
+ parts := strings.Split(key, ".")
+
+ // extract default sampler rules
+ if parts[0] == "sampler" {
+ err := f.rules.Unmarshal(&samplers)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal sampler rule: %w", err)
+ }
+ t := f.rules.GetString(key)
+ samplers["sampler"] = t
+ continue
+ }
+
+ // extract all dataset sampler rules
+ if len(parts) > 1 && parts[1] == "sampler" {
+ t := f.rules.GetString(key)
+ m := make(map[string]interface{})
+ datasetName := parts[0]
+ if sub := f.rules.Sub(datasetName); sub != nil {
+ err := sub.Unmarshal(&m)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal sampler rule for dataset %s: %w", datasetName, err)
+ }
+ }
+ m["sampler"] = t
+ samplers[datasetName] = m
+ }
+ }
+ return samplers, nil
+}
+
+func (f *fileConfig) GetSamplerConfigForDataset(dataset string) (interface{}, string, error) {
f.mux.RLock()
defer f.mux.RUnlock()
+ const notfound = "not found"
+
key := fmt.Sprintf("%s.Sampler", dataset)
if ok := f.rules.IsSet(key); ok {
t := f.rules.GetString(key)
@@ -499,11 +696,11 @@ func (f *fileConfig) GetSamplerConfigForDataset(dataset string) (interface{}, er
case "TotalThroughputSampler":
i = &TotalThroughputSamplerConfig{}
default:
- return nil, errors.New("No Sampler found")
+ return nil, notfound, errors.New("No Sampler found")
}
if sub := f.rules.Sub(dataset); sub != nil {
- return i, sub.Unmarshal(i)
+ return i, t, sub.Unmarshal(i)
}
} else if ok := f.rules.IsSet("Sampler"); ok {
@@ -522,13 +719,13 @@ func (f *fileConfig) GetSamplerConfigForDataset(dataset string) (interface{}, er
case "TotalThroughputSampler":
i = &TotalThroughputSamplerConfig{}
default:
- return nil, errors.New("No Sampler found")
+ return nil, notfound, errors.New("No Sampler found")
}
- return i, f.rules.Unmarshal(i)
+ return i, t, f.rules.Unmarshal(i)
}
- return nil, errors.New("No Sampler found")
+ return nil, notfound, errors.New("No Sampler found")
}
func (f *fileConfig) GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCapacity, error) {
@@ -546,55 +743,34 @@ func (f *fileConfig) GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCap
return *capacity, errors.New("No config found for inMemCollector")
}
-func (f *fileConfig) GetMetricsType() (string, error) {
+func (f *fileConfig) GetLogrusConfig() (*LogrusLoggerConfig, error) {
f.mux.RLock()
defer f.mux.RUnlock()
- return f.conf.Metrics, nil
-}
+ logrusConfig := &LogrusLoggerConfig{}
-func (f *fileConfig) GetHoneycombMetricsConfig() (HoneycombMetricsConfig, error) {
- f.mux.RLock()
- defer f.mux.RUnlock()
-
- hmConfig := &HoneycombMetricsConfig{}
- if sub := f.config.Sub("HoneycombMetrics"); sub != nil {
- err := sub.UnmarshalExact(hmConfig)
+ if sub := f.config.Sub("LogrusLogger"); sub != nil {
+ err := sub.UnmarshalExact(logrusConfig)
if err != nil {
- return *hmConfig, err
+ return logrusConfig, err
}
v := validator.New()
- err = v.Struct(hmConfig)
+ err = v.Struct(logrusConfig)
if err != nil {
- return *hmConfig, err
+ return logrusConfig, err
}
- return *hmConfig, nil
+ return logrusConfig, nil
}
- return *hmConfig, errors.New("No config found for HoneycombMetrics")
+ return nil, errors.New("No config found for LogrusConfig")
}
-func (f *fileConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) {
+func (f *fileConfig) GetMetricsConfig() MetricsConfig {
f.mux.RLock()
defer f.mux.RUnlock()
- pcConfig := &PrometheusMetricsConfig{}
- if sub := f.config.Sub("PrometheusMetrics"); sub != nil {
- err := sub.UnmarshalExact(pcConfig)
- if err != nil {
- return *pcConfig, err
- }
-
- v := validator.New()
- err = v.Struct(pcConfig)
- if err != nil {
- return *pcConfig, err
- }
-
- return *pcConfig, nil
- }
- return *pcConfig, errors.New("No config found for PrometheusMetrics")
+ return f.conf.MetricsConfig
}
func (f *fileConfig) GetSendDelay() (time.Duration, error) {
@@ -604,6 +780,13 @@ func (f *fileConfig) GetSendDelay() (time.Duration, error) {
return f.conf.SendDelay, nil
}
+func (f *fileConfig) GetBatchTimeout() time.Duration {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.BatchTimeout
+}
+
func (f *fileConfig) GetTraceTimeout() (time.Duration, error) {
f.mux.RLock()
defer f.mux.RUnlock()
@@ -611,6 +794,13 @@ func (f *fileConfig) GetTraceTimeout() (time.Duration, error) {
return f.conf.TraceTimeout, nil
}
+func (f *fileConfig) GetMaxBatchSize() uint {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.MaxBatchSize
+}
+
func (f *fileConfig) GetOtherConfig(name string, iface interface{}) error {
f.mux.RLock()
defer f.mux.RUnlock()
@@ -671,3 +861,188 @@ func (f *fileConfig) GetDryRunFieldName() string {
return f.conf.DryRunFieldName
}
+
+func (f *fileConfig) GetAddHostMetadataToTrace() bool {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.AddHostMetadataToTrace
+}
+
+func (f *fileConfig) GetAddAdditionalMetadata() map[string]string {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ if len(f.conf.AddAdditionalMetadata) <= 5 {
+ return f.conf.AddAdditionalMetadata
+ }
+
+ // sorting the keys and sending the first 5
+ var keys []string
+ for k := range f.conf.AddAdditionalMetadata {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ m := map[string]string{}
+ for index := 0; index < 5; index++ {
+ if val, ok := f.conf.AddAdditionalMetadata[keys[index]]; ok {
+ m[keys[index]] = val
+ }
+ }
+
+ return m
+}
+
+func (f *fileConfig) GetSendMetricsToOpsRamp() bool {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.MetricsConfig.Enable
+}
+
+func (f *fileConfig) GetGlobalUseTLS() bool {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.UseTls
+}
+
+func (f *fileConfig) GetGlobalUseTLSInsecureSkip() bool {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return !f.conf.UseTlsInSecure
+}
+
+func (f *fileConfig) GetAddRuleReasonToTrace() bool {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.AddRuleReasonToTrace
+}
+
+func (f *fileConfig) GetEnvironmentCacheTTL() time.Duration {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.EnvironmentCacheTTL
+}
+
+func (f *fileConfig) GetDatasetPrefix() string {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.DatasetPrefix
+}
+
+func (f *fileConfig) GetQueryAuthToken() string {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.QueryAuthToken
+}
+
+func (f *fileConfig) GetGRPCMaxConnectionIdle() time.Duration {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.GRPCServerParameters.MaxConnectionIdle
+}
+
+func (f *fileConfig) GetGRPCMaxConnectionAge() time.Duration {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.GRPCServerParameters.MaxConnectionAge
+}
+
+func (f *fileConfig) GetGRPCMaxConnectionAgeGrace() time.Duration {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.GRPCServerParameters.MaxConnectionAgeGrace
+}
+
+func (f *fileConfig) GetGRPCTime() time.Duration {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.GRPCServerParameters.Time
+}
+
+func (f *fileConfig) GetGRPCTimeout() time.Duration {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.GRPCServerParameters.Timeout
+}
+
+func (f *fileConfig) GetPeerTimeout() time.Duration {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.PeerManagement.Timeout
+}
+
+func (f *fileConfig) GetAdditionalErrorFields() []string {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.AdditionalErrorFields
+}
+
+func (f *fileConfig) GetAddSpanCountToRoot() bool {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.AddSpanCountToRoot
+}
+
+func (f *fileConfig) GetCacheOverrunStrategy() string {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.CacheOverrunStrategy
+}
+
+func (f *fileConfig) GetSampleCacheConfig() SampleCacheConfig {
+ f.mux.RLock()
+ defer f.mux.RUnlock()
+
+ return f.conf.SampleCache
+}
+
+// calculates an MD5 sum for a file that returns the same result as the md5sum command
+func calcMD5For(filename string) string {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err.Error()
+ }
+ defer f.Close()
+ data, err := io.ReadAll(f)
+ if err != nil {
+ return err.Error()
+ }
+ h := md5.New()
+ if _, err := h.Write(data); err != nil {
+ return err.Error()
+ }
+ return hex.EncodeToString(h.Sum(nil))
+}
+
+func (f *fileConfig) GetConfigMetadata() []ConfigMetadata {
+ ret := make([]ConfigMetadata, 2)
+ ret[0] = ConfigMetadata{
+ Type: "config",
+ ID: f.config.ConfigFileUsed(),
+ Hash: calcMD5For(f.config.ConfigFileUsed()),
+ LoadedAt: f.lastLoadTime.Format(time.RFC3339),
+ }
+ ret[1] = ConfigMetadata{
+ Type: "rules",
+ ID: f.rules.ConfigFileUsed(),
+ Hash: calcMD5For(f.rules.ConfigFileUsed()),
+ LoadedAt: f.lastLoadTime.Format(time.RFC3339),
+ }
+ return ret
+}
diff --git a/config/mock.go b/config/mock.go
index 6064bffd97..9611efb03a 100644
--- a/config/mock.go
+++ b/config/mock.go
@@ -16,47 +16,74 @@ type MockConfig struct {
GetCollectorTypeVal string
GetInMemoryCollectorCacheCapacityErr error
GetInMemoryCollectorCacheCapacityVal InMemoryCollectorCacheCapacity
- GetHoneycombAPIErr error
- GetHoneycombAPIVal string
+ GetOpsrampAPIErr error
+ GetOpsrampAPIVal string
GetListenAddrErr error
GetListenAddrVal string
GetPeerListenAddrErr error
GetPeerListenAddrVal string
+ GetCompressPeerCommunicationsVal bool
+ GetGRPCListenAddrErr error
+ GetGRPCListenAddrVal string
GetLoggerTypeErr error
GetLoggerTypeVal string
- GetHoneycombLoggerConfigErr error
- GetHoneycombLoggerConfigVal HoneycombLoggerConfig
GetLoggingLevelErr error
GetLoggingLevelVal string
GetOtherConfigErr error
// GetOtherConfigVal must be a JSON representation of the config struct to be populated.
- GetOtherConfigVal string
- GetPeersErr error
- GetPeersVal []string
- GetRedisHostErr error
- GetRedisHostVal string
- GetSamplerTypeErr error
- GetSamplerTypeVal interface{}
- GetMetricsTypeErr error
- GetMetricsTypeVal string
- GetHoneycombMetricsConfigErr error
- GetHoneycombMetricsConfigVal HoneycombMetricsConfig
- GetPrometheusMetricsConfigErr error
- GetPrometheusMetricsConfigVal PrometheusMetricsConfig
- GetSendDelayErr error
- GetSendDelayVal time.Duration
- GetTraceTimeoutErr error
- GetTraceTimeoutVal time.Duration
- GetUpstreamBufferSizeVal int
- GetPeerBufferSizeVal int
- SendTickerVal time.Duration
- IdentifierInterfaceName string
- UseIPV6Identifier bool
- RedisIdentifier string
- PeerManagementType string
- DebugServiceAddr string
- DryRun bool
- DryRunFieldName string
+ GetOtherConfigVal string
+ GetPeersErr error
+ GetPeersVal []string
+ GetRedisHostErr error
+ GetRedisHostVal string
+ GetRedisUsernameErr error
+ GetRedisUsernameVal string
+ GetRedisPasswordErr error
+ GetRedisPasswordVal string
+ GetUseTLSErr error
+ GetUseTLSVal bool
+ GetUseTLSInsecureErr error
+ GetUseTLSInsecureVal bool
+ GetSamplerTypeErr error
+ GetSamplerTypeName string
+ GetSamplerTypeVal interface{}
+ GetMetricsTypeErr error
+ GetMetricsTypeVal string
+ GetOpsRampMetricsConfigErr error
+ GetOpsRampMetricsConfigVal MetricsConfig
+ GetSendDelayErr error
+ GetSendDelayVal time.Duration
+ GetBatchTimeoutVal time.Duration
+ GetTraceTimeoutErr error
+ GetTraceTimeoutVal time.Duration
+ GetMaxBatchSizeVal uint
+ GetUpstreamBufferSizeVal int
+ GetPeerBufferSizeVal int
+ SendTickerVal time.Duration
+ IdentifierInterfaceName string
+ UseIPV6Identifier bool
+ RedisIdentifier string
+ PeerManagementType string
+ PeerManagementStrategy string
+ DebugServiceAddr string
+ DryRun bool
+ DryRunFieldName string
+ AddHostMetadataToTrace bool
+ AddRuleReasonToTrace bool
+ EnvironmentCacheTTL time.Duration
+ DatasetPrefix string
+ QueryAuthToken string
+ GRPCMaxConnectionIdle time.Duration
+ GRPCMaxConnectionAge time.Duration
+ GRPCMaxConnectionAgeGrace time.Duration
+ GRPCTime time.Duration
+ GRPCTimeout time.Duration
+ PeerTimeout time.Duration
+ AdditionalErrorFields []string
+ AddSpanCountToRoot bool
+ CacheOverrunStrategy string
+ SampleCache SampleCacheConfig
+ CfgMetadata []ConfigMetadata
Mux sync.RWMutex
}
@@ -69,65 +96,83 @@ func (m *MockConfig) ReloadConfig() {
callback()
}
}
+
func (m *MockConfig) RegisterReloadCallback(callback func()) {
m.Mux.Lock()
m.Callbacks = append(m.Callbacks, callback)
m.Mux.Unlock()
}
+
func (m *MockConfig) GetAPIKeys() ([]string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
return m.GetAPIKeysVal, m.GetAPIKeysErr
}
+
func (m *MockConfig) GetCollectorType() (string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
return m.GetCollectorTypeVal, m.GetCollectorTypeErr
}
+
func (m *MockConfig) GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCapacity, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
return m.GetInMemoryCollectorCacheCapacityVal, m.GetInMemoryCollectorCacheCapacityErr
}
-func (m *MockConfig) GetHoneycombAPI() (string, error) {
+
+func (m *MockConfig) GetOpsrampAPI() (string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
- return m.GetHoneycombAPIVal, m.GetHoneycombAPIErr
+ return m.GetOpsrampAPIVal, m.GetOpsrampAPIErr
}
+
func (m *MockConfig) GetListenAddr() (string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
return m.GetListenAddrVal, m.GetListenAddrErr
}
+
func (m *MockConfig) GetPeerListenAddr() (string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
return m.GetPeerListenAddrVal, m.GetPeerListenAddrErr
}
-func (m *MockConfig) GetLoggerType() (string, error) {
+
+func (m *MockConfig) GetCompressPeerCommunication() bool {
m.Mux.RLock()
defer m.Mux.RUnlock()
- return m.GetLoggerTypeVal, m.GetLoggerTypeErr
+ return m.GetCompressPeerCommunicationsVal
+}
+
+func (m *MockConfig) GetGRPCListenAddr() (string, error) {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ return m.GetGRPCListenAddrVal, m.GetGRPCListenAddrErr
}
-func (m *MockConfig) GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) {
+
+func (m *MockConfig) GetLoggerType() (string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
- return m.GetHoneycombLoggerConfigVal, m.GetHoneycombLoggerConfigErr
+ return m.GetLoggerTypeVal, m.GetLoggerTypeErr
}
+
func (m *MockConfig) GetLoggingLevel() (string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
return m.GetLoggingLevelVal, m.GetLoggingLevelErr
}
+
func (m *MockConfig) GetOtherConfig(name string, iface interface{}) error {
m.Mux.RLock()
defer m.Mux.RUnlock()
@@ -138,42 +183,77 @@ func (m *MockConfig) GetOtherConfig(name string, iface interface{}) error {
}
return m.GetOtherConfigErr
}
+
func (m *MockConfig) GetPeers() ([]string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
return m.GetPeersVal, m.GetPeersErr
}
+
func (m *MockConfig) GetRedisHost() (string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
return m.GetRedisHostVal, m.GetRedisHostErr
}
-func (m *MockConfig) GetMetricsType() (string, error) {
+
+func (m *MockConfig) GetRedisUsername() (string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
- return m.GetMetricsTypeVal, m.GetMetricsTypeErr
+ return m.GetRedisUsernameVal, m.GetRedisUsernameErr
+}
+
+func (m *MockConfig) GetRedisPassword() (string, error) {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ return m.GetRedisPasswordVal, m.GetRedisPasswordErr
}
-func (m *MockConfig) GetHoneycombMetricsConfig() (HoneycombMetricsConfig, error) {
+
+func (m *MockConfig) GetUseTLS() (bool, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
- return m.GetHoneycombMetricsConfigVal, m.GetHoneycombMetricsConfigErr
+ return m.GetUseTLSVal, m.GetUseTLSErr
}
-func (m *MockConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) {
+
+func (m *MockConfig) GetUseTLSInsecure() (bool, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
- return m.GetPrometheusMetricsConfigVal, m.GetPrometheusMetricsConfigErr
+ return m.GetUseTLSInsecureVal, m.GetUseTLSInsecureErr
}
+
+func (m *MockConfig) GetMetricsType() (string, error) {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ return m.GetMetricsTypeVal, m.GetMetricsTypeErr
+}
+
+func (m *MockConfig) GetPrometheusMetricsConfig() (MetricsConfig, error) {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ return m.GetOpsRampMetricsConfigVal, m.GetOpsRampMetricsConfigErr
+}
+
func (m *MockConfig) GetSendDelay() (time.Duration, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
return m.GetSendDelayVal, m.GetSendDelayErr
}
+
+func (m *MockConfig) GetBatchTimeout() time.Duration {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ return m.GetBatchTimeoutVal
+}
+
func (m *MockConfig) GetTraceTimeout() (time.Duration, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
@@ -181,12 +261,28 @@ func (m *MockConfig) GetTraceTimeout() (time.Duration, error) {
return m.GetTraceTimeoutVal, m.GetTraceTimeoutErr
}
+func (m *MockConfig) GetMaxBatchSize() uint {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ return m.GetMaxBatchSizeVal
+}
+
// TODO: allow per-dataset mock values
-func (m *MockConfig) GetSamplerConfigForDataset(dataset string) (interface{}, error) {
+func (m *MockConfig) GetSamplerConfigForDataset(dataset string) (interface{}, string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
- return m.GetSamplerTypeVal, m.GetSamplerTypeErr
+ return m.GetSamplerTypeVal, m.GetSamplerTypeName, m.GetSamplerTypeErr
+}
+
+// GetAllSamplerRules returns all dataset rules, including the default
+func (m *MockConfig) GetAllSamplerRules() (map[string]interface{}, error) {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ v := map[string]interface{}{"dataset1": m.GetSamplerTypeVal}
+ return v, m.GetSamplerTypeErr
}
func (m *MockConfig) GetUpstreamBufferSize() int {
@@ -195,6 +291,7 @@ func (m *MockConfig) GetUpstreamBufferSize() int {
return m.GetUpstreamBufferSizeVal
}
+
func (m *MockConfig) GetPeerBufferSize() int {
m.Mux.RLock()
defer m.Mux.RUnlock()
@@ -237,6 +334,13 @@ func (m *MockConfig) GetPeerManagementType() (string, error) {
return m.PeerManagementType, nil
}
+func (m *MockConfig) GetPeerManagementStrategy() (string, error) {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ return m.PeerManagementStrategy, nil
+}
+
func (m *MockConfig) GetDebugServiceAddr() (string, error) {
m.Mux.RLock()
defer m.Mux.RUnlock()
@@ -257,3 +361,115 @@ func (m *MockConfig) GetDryRunFieldName() string {
return m.DryRunFieldName
}
+
+func (m *MockConfig) GetAddHostMetadataToTrace() bool {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ return m.AddHostMetadataToTrace
+}
+
+func (m *MockConfig) GetAddRuleReasonToTrace() bool {
+ m.Mux.RLock()
+ defer m.Mux.RUnlock()
+
+ return m.AddRuleReasonToTrace
+}
+
+func (f *MockConfig) GetEnvironmentCacheTTL() time.Duration {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.EnvironmentCacheTTL
+}
+
+func (f *MockConfig) GetDatasetPrefix() string {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.DatasetPrefix
+}
+
+func (f *MockConfig) GetQueryAuthToken() string {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.QueryAuthToken
+}
+
+func (f *MockConfig) GetGRPCMaxConnectionIdle() time.Duration {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.GRPCMaxConnectionIdle
+}
+
+func (f *MockConfig) GetGRPCMaxConnectionAge() time.Duration {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.GRPCMaxConnectionAge
+}
+
+func (f *MockConfig) GetGRPCMaxConnectionAgeGrace() time.Duration {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.GRPCMaxConnectionAgeGrace
+}
+
+func (f *MockConfig) GetGRPCTime() time.Duration {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.GRPCTime
+}
+
+func (f *MockConfig) GetGRPCTimeout() time.Duration {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.GRPCTimeout
+}
+
+func (f *MockConfig) GetPeerTimeout() time.Duration {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.PeerTimeout
+}
+
+func (f *MockConfig) GetAdditionalErrorFields() []string {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.AdditionalErrorFields
+}
+
+func (f *MockConfig) GetAddSpanCountToRoot() bool {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.AddSpanCountToRoot
+}
+
+func (f *MockConfig) GetCacheOverrunStrategy() string {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.CacheOverrunStrategy
+}
+
+func (f *MockConfig) GetSampleCacheConfig() SampleCacheConfig {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.SampleCache
+}
+
+func (f *MockConfig) GetConfigMetadata() []ConfigMetadata {
+ f.Mux.RLock()
+ defer f.Mux.RUnlock()
+
+ return f.CfgMetadata
+}
diff --git a/config/sampler_config.go b/config/sampler_config.go
index 170ed932c8..afb2ca2ca4 100644
--- a/config/sampler_config.go
+++ b/config/sampler_config.go
@@ -1,5 +1,11 @@
package config
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
type DeterministicSamplerConfig struct {
SampleRate int `validate:"required,gte=1"`
}
@@ -10,7 +16,7 @@ type DynamicSamplerConfig struct {
FieldList []string `validate:"required"`
UseTraceLength bool
AddSampleRateKeyToTrace bool
- AddSampleRateKeyToTraceField string
+ AddSampleRateKeyToTraceField string `validate:"required_with=AddSampleRateKeyToTrace"`
}
type EMADynamicSamplerConfig struct {
@@ -25,7 +31,7 @@ type EMADynamicSamplerConfig struct {
FieldList []string `validate:"required"`
UseTraceLength bool
AddSampleRateKeyToTrace bool
- AddSampleRateKeyToTraceField string
+ AddSampleRateKeyToTraceField string `validate:"required_with=AddSampleRateKeyToTrace"`
}
type TotalThroughputSamplerConfig struct {
@@ -34,5 +40,365 @@ type TotalThroughputSamplerConfig struct {
FieldList []string `validate:"required"`
UseTraceLength bool
AddSampleRateKeyToTrace bool
- AddSampleRateKeyToTraceField string
+ AddSampleRateKeyToTraceField string `validate:"required_with=AddSampleRateKeyToTrace"`
+}
+
+type RulesBasedSamplerCondition struct {
+ Field string
+ Operator string
+ Value interface{}
+ Datatype string
+ Matches func(value any, exists bool) bool
+}
+
+func (r *RulesBasedSamplerCondition) Init() error {
+ return r.setMatchesFunction()
+}
+
+func (r *RulesBasedSamplerCondition) String() string {
+ return fmt.Sprintf("%+v", *r)
+}
+
+func (r *RulesBasedSamplerCondition) setMatchesFunction() error {
+ switch r.Operator {
+ case "exists":
+ r.Matches = func(value any, exists bool) bool {
+ return exists
+ }
+ return nil
+ case "not-exists":
+ r.Matches = func(value any, exists bool) bool {
+ return !exists
+ }
+ return nil
+ case "!=", "=", ">", "<", "<=", ">=":
+ return setCompareOperators(r, r.Operator)
+ case "starts-with", "contains", "does-not-contain":
+ err := setMatchStringBasedOperators(r, r.Operator)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unknown operator '%s'", r.Operator)
+ }
+ return nil
+}
+
+func tryConvertToInt(v any) (int, bool) {
+ switch value := v.(type) {
+ case int:
+ return value, true
+ case int64:
+ return int(value), true
+ case float64:
+ return int(value), true
+ case bool:
+ return 0, false
+ case string:
+ n, err := strconv.Atoi(value)
+ if err == nil {
+ return n, true
+ }
+ return 0, false
+ default:
+ return 0, false
+ }
+}
+
+func tryConvertToFloat(v any) (float64, bool) {
+ switch value := v.(type) {
+ case float64:
+ return value, true
+ case int:
+ return float64(value), true
+ case int64:
+ return float64(value), true
+ case bool:
+ return 0, false
+ case string:
+ n, err := strconv.ParseFloat(value, 64)
+ return n, err == nil
+ default:
+ return 0, false
+ }
+}
+
+// In the case of strings, we want to stringize everything we get through a
+// "standard" format, which we are defining as whatever Go does with the %v
+// operator to sprintf. This will make sure that no matter how people encode
+// their values, they compare on an equal footing.
+func tryConvertToString(v any) (string, bool) {
+ return fmt.Sprintf("%v", v), true
+}
+
+func tryConvertToBool(v any) bool {
+ value, ok := tryConvertToString(v)
+ if !ok {
+ return false
+ }
+ str, err := strconv.ParseBool(value)
+ if err != nil {
+ return false
+ }
+ if str {
+ return true
+ } else {
+ return false
+ }
+}
+
+func setCompareOperators(r *RulesBasedSamplerCondition, condition string) error {
+ switch r.Datatype {
+ case "string":
+ conditionValue, ok := tryConvertToString(r.Value)
+ if !ok {
+ return fmt.Errorf("could not convert %v to string", r.Value)
+ }
+
+ // check if conditionValue and spanValue are not equal
+ switch condition {
+ case "!=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToString(spanValue); exists && ok {
+ return n != conditionValue
+ }
+ return false
+ }
+ return nil
+ case "=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToString(spanValue); exists && ok {
+ return n == conditionValue
+ }
+ return false
+ }
+ return nil
+ case ">":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToString(spanValue); exists && ok {
+ return n > conditionValue
+ }
+ return false
+ }
+ return nil
+ case "<":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToString(spanValue); exists && ok {
+ return n < conditionValue
+ }
+ return false
+ }
+ return nil
+ case "<=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToString(spanValue); exists && ok {
+ return n <= conditionValue
+ }
+ return false
+ }
+ return nil
+ }
+ case "int":
+ // check if conditionValue and spanValue are not equal
+ conditionValue, ok := tryConvertToInt(r.Value)
+ if !ok {
+ return fmt.Errorf("could not convert %v to string", r.Value)
+ }
+ switch condition {
+ case "!=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToInt(spanValue); exists && ok {
+ return n != conditionValue
+ }
+ return false
+ }
+ return nil
+ case "=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToInt(spanValue); exists && ok {
+ return n == conditionValue
+ }
+ return false
+ }
+ return nil
+ case ">":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToInt(spanValue); exists && ok {
+ return n > conditionValue
+ }
+ return false
+ }
+ return nil
+ case ">=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToInt(spanValue); exists && ok {
+ return n >= conditionValue
+ }
+ return false
+ }
+ return nil
+ case "<":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToInt(spanValue); exists && ok {
+ return n < conditionValue
+ }
+ return false
+ }
+ return nil
+ case "<=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToInt(spanValue); exists && ok {
+ return n <= conditionValue
+ }
+ return false
+ }
+ return nil
+ }
+ case "float":
+ conditionValue, ok := tryConvertToFloat(r.Value)
+ if !ok {
+ return fmt.Errorf("could not convert %v to string", r.Value)
+ }
+ // check if conditionValue and spanValue are not equal
+ switch condition {
+ case "!=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToFloat(spanValue); exists && ok {
+ return n != conditionValue
+ }
+ return false
+ }
+ return nil
+ case "=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToFloat(spanValue); exists && ok {
+ return n == conditionValue
+ }
+ return false
+ }
+ return nil
+ case ">":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToFloat(spanValue); exists && ok {
+ return n > conditionValue
+ }
+ return false
+ }
+ return nil
+ case ">=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToFloat(spanValue); exists && ok {
+ return n >= conditionValue
+ }
+ return false
+ }
+ return nil
+ case "<":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToFloat(spanValue); exists && ok {
+ return n < conditionValue
+ }
+ return false
+ }
+ return nil
+ case "<=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n, ok := tryConvertToFloat(spanValue); exists && ok {
+ return n <= conditionValue
+ }
+ return false
+ }
+ return nil
+ }
+ case "bool":
+ conditionValue := tryConvertToBool(r.Value)
+
+ switch condition {
+ case "!=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n := tryConvertToBool(spanValue); exists && n {
+ return n != conditionValue
+ }
+ return false
+ }
+ return nil
+ case "=":
+ r.Matches = func(spanValue any, exists bool) bool {
+ if n := tryConvertToBool(spanValue); exists && n {
+ return n == conditionValue
+ }
+ return false
+ }
+ return nil
+ }
+ case "":
+ // user did not specify dataype, so do not specify matches function
+ default:
+ return fmt.Errorf("%s must be either string, int, float or bool", r.Datatype)
+ }
+ return nil
+}
+
+func setMatchStringBasedOperators(r *RulesBasedSamplerCondition, condition string) error {
+ conditionValue, ok := tryConvertToString(r.Value)
+ if !ok {
+ return fmt.Errorf("%s value must be a string, but was '%s'", condition, r.Value)
+ }
+
+ switch condition {
+ case "starts-with":
+ r.Matches = func(spanValue any, exists bool) bool {
+ s, ok := tryConvertToString(spanValue)
+ if ok {
+ return strings.HasPrefix(s, conditionValue)
+ }
+ return false
+ }
+ case "contains":
+ r.Matches = func(spanValue any, exists bool) bool {
+ s, ok := tryConvertToString(spanValue)
+ if ok {
+ return strings.Contains(s, conditionValue)
+ }
+ return false
+ }
+ case "does-not-contain":
+ r.Matches = func(spanValue any, exists bool) bool {
+ s, ok := tryConvertToString(spanValue)
+ if ok {
+ return !strings.Contains(s, conditionValue)
+ }
+ return false
+ }
+ }
+
+ return nil
+}
+
+type RulesBasedDownstreamSampler struct {
+ DynamicSampler *DynamicSamplerConfig
+ EMADynamicSampler *EMADynamicSamplerConfig
+ TotalThroughputSampler *TotalThroughputSamplerConfig
+}
+
+type RulesBasedSamplerRule struct {
+ Name string
+ SampleRate int
+ Sampler *RulesBasedDownstreamSampler
+ Drop bool
+ Scope string `validate:"oneof=span trace"`
+ Condition []*RulesBasedSamplerCondition
+}
+
+func (r *RulesBasedSamplerRule) String() string {
+ return fmt.Sprintf("%+v", *r)
+}
+
+type RulesBasedSamplerConfig struct {
+ Rule []*RulesBasedSamplerRule
+ CheckNestedFields bool
+}
+
+func (r *RulesBasedSamplerConfig) String() string {
+ return fmt.Sprintf("%+v", *r)
}
diff --git a/config_complete.toml b/config_complete.toml
deleted file mode 100644
index 8e37a591d5..0000000000
--- a/config_complete.toml
+++ /dev/null
@@ -1,264 +0,0 @@
-#####################
-## Refinery Config ##
-#####################
-
-# ListenAddr is the IP and port on which to listen for incoming events. Incoming
-# traffic is expected to be HTTP, so if using SSL put something like nginx in
-# front to do the decryption.
-# Should be of the form 0.0.0.0:8080
-# Not eligible for live reload.
-ListenAddr = "0.0.0.0:8080"
-
-# PeerListenAddr is the IP and port on which to listen for traffic being
-# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL
-# put something like nginx in front to do the decryption. Must be different from
-# ListenAddr
-# Should be of the form 0.0.0.0:8081
-# Not eligible for live reload.
-PeerListenAddr = "0.0.0.0:8081"
-
-# APIKeys is a list of Honeycomb API keys that the proxy will accept. This list
-# only applies to events - other Honeycomb API actions will fall through to the
-# upstream API directly.
-# Adding keys here causes events arriving with API keys not in this list to be
-# rejected with an HTTP 401 error If an API key that is a literal '*' is in the
-# list, all API keys are accepted.
-# Eligible for live reload.
-APIKeys = [
- # "replace-me",
- # "more-optional-keys",
- "*", # wildcard accept all keys
- ]
-
-# HoneycombAPI is the URL for the upstream Honeycomb API.
-# Eligible for live reload.
-HoneycombAPI = "https://api.honeycomb.io"
-
-# SendDelay is a short timer that will be triggered when a trace is complete.
-# Refinery will wait this duration before actually sending the trace. The
-# reason for this short delay is to allow for small network delays or clock
-# jitters to elapse and any final spans to arrive before actually sending the
-# trace. This supports duration strings with supplied units. Set to 0 for
-# immediate sends.
-# Eligible for live reload.
-SendDelay = "2s"
-
-# TraceTimeout is a long timer; it represents the outside boundary of how long
-# to wait before sending an incomplete trace. Normally traces are sent when the
-# root span arrives. Sometimes the root span never arrives (due to crashes or
-# whatever), and this timer will send a trace even without having received the
-# root span. If you have particularly long-lived traces you should increase this
-# timer. This supports duration strings with supplied units.
-# Eligible for live reload.
-TraceTimeout = "60s"
-
-# SendTicker is a short timer; it determines the duration to use to check for traces to send
-SendTicker = "100ms"
-
-# LoggingLevel is the level above which we should log. Debug is very verbose,
-# and should only be used in pre-production environments. Info is the
-# recommended level. Valid options are "debug", "info", "error", and
-# "panic"
-# Not eligible for live reload.
-LoggingLevel = "debug"
-
-# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use
-# when buffering events that will be forwarded to peers or the upstream API.
-UpstreamBufferSize = 10000
-PeerBufferSize = 10000
-
-# DebugServiceAddr sets the IP and port the debug service will run on
-# The debug service will only run if the command line flag -d is specified
-# The debug service runs on the first open port between localhost:6060 and :6069 by default
-# DebugServiceAddr = "localhost:8085"
-
-############################
-## Implementation Choices ##
-############################
-
-# Each of the config options below chooses an implementation of a Refinery
-# component to use. Depending on the choice there may be more configuration
-# required below in the section for that choice. Changing implementation choices
-# requires a process restart; these changes will not be picked up by a live
-# config reload. (Individual config options for a given implementation may be
-# eligible for live reload).
-
-# Collector describes which collector to use for collecting traces. The only
-# current valid option is "InMemCollector".. More can be added by adding
-# implementations of the Collector interface.
-Collector = "InMemCollector"
-
-# Logger describes which logger to use for Refinery logs. Valid options are
-# "logrus" and "honeycomb". The logrus option will write logs to STDOUT and the
-# honeycomb option will send them to a Honeycomb dataset.
-Logger = "honeycomb"
-
-# Metrics describes which service to use for Refinery metrics. Valid options are
-# "prometheus" and "honeycomb". The prometheus option starts a listener that
-# will reply to a request for /metrics. The honeycomb option will send summary
-# metrics to a Honeycomb dataset.
-Metrics = "honeycomb"
-
-#########################
-## Peer Management ##
-#########################
-
-# [PeerManagement]
-# Type = "file"
-# Peers is the list of all servers participating in this proxy cluster. Events
-# will be sharded evenly across all peers based on the Trace ID. Values here
-# should be the base URL used to access the peer, and should include scheme,
-# hostname (or ip address) and port. All servers in the cluster should be in
-# this list, including this host.
-# Peers = [
- # "http://127.0.0.1:8081",
- # "http://127.0.0.1:8081",
- # "http://10.1.2.3.4:8080",
- # "http://refinery-1231:8080",
- # "http://peer-3.fqdn" // assumes port 80
-# ]
-
-# [PeerManagement]
-# Type = "redis"
-# RedisHost is is used to connect to redis for peer cluster membership management.
-# Further, if the environment variable 'REFINERY_REDIS_HOST' is set it takes
-# precedence and this value is ignored.
-# Not eligible for live reload.
-# RedisHost = "localhost:6379"
-
-# IdentifierInterfaceName is optional. By default, when using RedisHost, Refinery will use
-# the local hostname to identify itself to other peers in Redis. If your environment
-# requires that you use IPs as identifiers (for example, if peers can't resolve eachother
-# by name), you can specify the network interface that Refinery is listening on here.
-# Refinery will use the first unicast address that it finds on the specified network
-# interface as its identifier.
-# Not eligible for live reload.
-# IdentifierInterfaceName = "eth0"
-
-# UseIPV6Identifier is optional. If using IdentifierInterfaceName, Refinery will default to the first
-# IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use
-# the first IPV6 unicast address found.
-# UseIPV6Identifier = false
-
-# RedisIdentifier is optional. By default, when using RedisHost, Refinery will use
-# the local hostname to identify itself to other peers in Redis. If your environment
-# requires that you use IPs as identifiers (for example, if peers can't resolve eachother
-# by name), you can specify the exact identifier (IP address, etc) to use here.
-# Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set.
-# RedisIdentifier = "192.168.1.1"
-
-#########################
-## In-Memory Collector ##
-#########################
-
-# InMemCollector brings together all the settings that are relevant to
-# collecting spans together to make traces.
-[InMemCollector]
-
-# The collection cache is used to collect all spans into a trace as well as
-# remember the sampling decision for any spans that might come in after the
-# trace has been marked "complete" (either by timing out or seeing the root
-# span). The number of traces in the cache should be many multiples (100x to
-# 1000x) of the total number of concurrently active traces (trace throughput *
-# trace duration).
-# Eligible for live reload. Growing the cache capacity with a live config reload
-# is fine. Avoid shrinking it with a live reload (you can, but it may cause
-# temporary odd sampling decisions).
-CacheCapacity = 1000
-
-# MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are
-# supported.
-# If set to a non-zero value, once per tick (see SendTicker) the collector
-# will compare total allocated bytes to this value. If allocation is too
-# high, cache capacity will be reduced and an error will be logged.
-# Useful values for this setting are generally in the range of 75%-90% of
-# available system memory.
-MaxAlloc = 0
-
-###################
-## Logrus Logger ##
-###################
-
-# LogrusLogger is a section of the config only used if you are using the
-# LogrusLogger to send all logs to STDOUT using the logrus package. If you are
-# using a different logger (eg honeycomb logger) you can leave all this
-# commented out.
-[LogrusLogger]
-
-# logrus logger currently has no options!
-
-######################
-## Honeycomb Logger ##
-######################
-
-# HoneycombLogger is a section of the config only used if you are using the
-# HoneycombLogger to send all logs to a Honeycomb Dataset. If you are using a
-# different logger (eg file-based logger) you can leave all this commented out.
-
-[HoneycombLogger]
-
-# LoggerHoneycombAPI is the URL for the upstream Honeycomb API.
-# Eligible for live reload.
-LoggerHoneycombAPI = "https://api.honeycomb.io"
-
-# LoggerAPIKey is the API key to use to send log events to the Honeycomb logging
-# dataset. This is separate from the APIKeys used to authenticate regular
-# traffic.
-# Eligible for live reload.
-LoggerAPIKey = "abcd1234"
-
-# LoggerDataset is the name of the dataset to which to send Refinery logs
-# Eligible for live reload.
-LoggerDataset = "Refinery Logs"
-
-# LoggerSamplerEnabled enables a PerKeyThroughput dynamic sampler for log messages.
-# This will sample log messages based on [log level:message] key on a per second throughput basis.
-# Not eligible for live reload.
-LoggerSamplerEnabled = true
-
-# LoggerSamplerThroughput is the per key per second throughput for the log message dynamic sampler.
-# Not eligible for live reload.
-LoggerSamplerThroughput = 10
-
-#######################
-## Honeycomb Metrics ##
-#######################
-
-# HoneycombMetrics is a section of the config only used if you are using the
-# HoneycombMetrics to send all metrics to a Honeycomb Dataset. If you are using a
-# different metrics service (eg prometheus or metricsd) you can leave all this
-# commented out.
-
-[HoneycombMetrics]
-
-# MetricsHoneycombAPI is the URL for the upstream Honeycomb API.
-# Eligible for live reload.
-MetricsHoneycombAPI = "https://api.honeycomb.io"
-
-# MetricsAPIKey is the API key to use to send log events to the Honeycomb logging
-# dataset. This is separate from the APIKeys used to authenticate regular
-# traffic.
-# Eligible for live reload.
-MetricsAPIKey = "abcd1234"
-
-# MetricsDataset is the name of the dataset to which to send Refinery metrics
-# Eligible for live reload.
-MetricsDataset = "Refinery Metrics"
-
-# MetricsReportingInterval is the frequency (in seconds) to send metric events
-# to Honeycomb. Between 1 and 60 is recommended.
-# Not eligible for live reload.
-MetricsReportingInterval = 3
-
-
-#####################@##
-## Prometheus Metrics ##
-#####################@##
-
-[PrometheusMetrics]
-
-# MetricsListenAddr determines the interface and port on which Prometheus will
-# listen for requests for /metrics. Must be different from the main Refinery
-# listener.
-# Not eligible for live reload.
-# MetricsListenAddr = "localhost:2112"
diff --git a/config_complete.yaml b/config_complete.yaml
new file mode 100644
index 0000000000..240c5eb3d7
--- /dev/null
+++ b/config_complete.yaml
@@ -0,0 +1,406 @@
+########################
+## Trace Proxy Config ##
+########################
+
+# ListenAddr is the IP and port on which to listen for incoming events. Incoming
+# traffic is expected to be HTTP, so if using SSL put something like nginx in
+# front to do the TLS Termination.
+ListenAddr: 0.0.0.0:8082
+
+# GRPCListenAddr is the IP and port on which to listen for incoming events over
+# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in
+# front to do the TLS Termination.
+GRPCListenAddr: 0.0.0.0:9090
+
+# PeerListenAddr is the IP and port on which to listen for traffic being
+# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL
+# put something like nginx in front to do the decryption. Must be different from
+# ListenAddr
+PeerListenAddr: 0.0.0.0:8083
+
+GRPCPeerListenAddr: 0.0.0.0:8084
+
+# CompressPeerCommunication determines whether to compress span data
+# it forwards to peers. If it costs money to transmit data between different
+# instances (e.g. they're spread across AWS availability zones), then you
+# almost certainly want compression enabled to reduce your bill. The option to
+# disable it is provided as an escape hatch for deployments that value lower CPU
+# utilization over data transfer costs.
+CompressPeerCommunication: true
+
+# OpsrampAPI is the URL for the upstream Opsramp API.
+OpsrampAPI: ""
+
+# Dataset you want to use for sampling
+Dataset: "ds"
+
+#Tls Options
+UseTls: true
+UseTlsInsecure: false
+
+# LoggingLevel valid options are "debug", "info", "error", and "panic".
+LoggingLevel: error
+
+# SendDelay is a short timer that will be triggered when a trace is complete.
+# Trace Proxy will wait for this duration before actually sending the trace. The
+# reason for this short delay is to allow for small network delays or clock
+# jitters to elapse and any final spans to arrive before actually sending the
+# trace. This supports duration strings with supplied units. Set to 0 for
+# immediate sends.
+SendDelay: 2s
+
+# BatchTimeout dictates how frequently to send unfulfilled batches. By default
+# this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms.
+# Eligible for live reload.
+BatchTimeout: 1s
+
+# TraceTimeout is a long timer; it represents the outside boundary of how long
+# to wait before sending an incomplete trace. Normally traces are sent when the
+# root span arrives. Sometimes the root span never arrives (due to crashes or
+# whatever), and this timer will send a trace even without having received the
+# root span. If you have particularly long-lived traces you should increase this
+# timer. This supports duration strings with supplied units.
+TraceTimeout: 60s
+
+# MaxBatchSize is the number of events to be included in the batch for sending
+MaxBatchSize: 500
+
+# SendTicker is a short timer; it determines the duration to use to check for traces to send
+SendTicker: 100ms
+
+# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use
+# when buffering events that will be forwarded to peers or the upstream API.
+UpstreamBufferSize: 1000
+PeerBufferSize: 1000
+
+# AddHostMetadataToTrace determines whether to add information about
+# the host that tracing proxy is running on to the spans that it processes.
+# If enabled, information about the host will be added to each span with the
+# key 'meta.local_hostname'.
+AddHostMetadataToTrace: false
+
+# AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics
+# the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"}
+# max number of additional keys supported is 5, if the limit exceeds then we considered the first 5
+# based on sorted order of keys
+# "app" label is mandatory
+AddAdditionalMetadata: { "app": "default" }
+
+# EnvironmentCacheTTL is the amount of time a cache entry will live that associates
+# an API key with an environment name.
+# Cache misses lookup the environment name using OpsRampAPI config value.
+# Default is 1 hour ("1h").
+EnvironmentCacheTTL: "1h"
+
+# QueryAuthToken, if specified, provides a token that must be specified with
+# the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed.
+# These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and
+# are not typically needed in normal operation.
+# Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN.
+# If left unspecified, the /query endpoints are inaccessible.
+# QueryAuthToken: "some-random-value"
+
+# AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which
+# contains text indicating which rule was evaluated that caused the trace to be included.
+AddRuleReasonToTrace: true
+
+# AdditionalErrorFields should be a list of span fields that should be included when logging
+# errors that happen during ingestion of events (for example, the span too large error).
+# This is primarily useful in trying to track down misbehaving senders in a large installation.
+# The fields `dataset`, `apihost`, and `environment` are always included.
+# If a field is not present in the span, it will not be present in the error log.
+# Default is ["trace.span_id"].
+AdditionalErrorFields:
+ - trace.span_id
+
+# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate
+# the number of child spans on the trace at the time the sampling decision was made.
+# This value is available to the rules-based sampler, making it possible to write rules that
+# are dependent upon the number of spans in the trace.
+# Default is false.
+AddSpanCountToRoot: false
+
+# CacheOverrunStrategy controls the cache management behavior under memory pressure.
+# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again,
+# which is generally not helpful unless it occurs because of a permanent change in traffic patterns.
+# In the "impact" strategy, the items having the most impact on the cache size are
+# ejected from the cache earlier than normal but the cache is not resized.
+# In all cases, it only applies if MaxAlloc is nonzero.
+# Default is "resize" for compatibility but "impact" is recommended for most installations.
+CacheOverrunStrategy: "impact"
+
+#########################
+## Retry Configuration ##
+#########################
+RetryConfiguration:
+ # InitialInterval the time to wait after the first failure before retrying.
+ InitialInterval: 500ms
+ # RandomizationFactor is a random factor used to calculate next backoff
+ # Randomized interval = RetryInterval * (1 Âą RandomizationFactor)
+ RandomizationFactor: 0.5
+ # Multiplier is the value multiplied by the backoff interval bounds
+ Multiplier: 1.5
+ # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between
+ # consecutive retries will always be `MaxInterval`.
+ MaxInterval: 60s
+ # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request.
+ # Once this value is reached, the data is discarded.
+ MaxElapsedTime: 15m
+
+#########################
+## Proxy Configuration ##
+#########################
+ProxyConfiguration:
+ # Protocol accepts http and https
+ Protocol: "http"
+ # Host takes the proxy server address
+ Host: ""
+ # Port takes the proxy server port
+ Port: 3128
+ # UserName takes the proxy username
+ Username: ""
+ # Password takes the proxy password
+ Password: ""
+
+##################################
+## Authentication Configuration ##
+##################################
+AuthConfiguration:
+ # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made
+ Endpoint: ""
+ # Key - authentication key provided in OpsRamp Portal
+ Key: ""
+ # Secret - authentication Secret provided in OpsRamp Portal
+ Secret: ""
+ # TenantId - tenant/client id to which the traces are to be posted
+ TenantId: ""
+
+############################
+## Implementation Choices ##
+############################
+# Each of the config options below chooses an implementation of a Trace Proxy
+# component to use. Depending on the choice, there may be more configuration
+# required below in the section for that choice. Changing implementation choices
+# requires a process restart.
+# Collector describes which collector to use for collecting traces. The only
+# current valid option is "InMemCollector". More can be added by adding
+# implementations of the Collector interface.
+Collector: "InMemCollector"
+
+# InMemCollector brings together all the settings that are relevant to
+# collecting spans together to make traces.
+InMemCollector:
+
+ # The collection cache is used to collect all spans into a trace as well as
+ # remember the sampling decision for any spans that might come in after the
+ # trace has been marked "complete" (either by timing out or seeing the root
+ # span). The number of traces in the cache should be many multiples (100x to
+ # 1000x) of the total number of concurrently active traces (trace throughput *
+ # trace duration).
+ CacheCapacity: 1000
+
+ # MaxAlloc is optional. If set, it must be an integer >= 0.
+ # If set to a non-zero value, once per tick (see SendTicker) the collector
+ # will compare total allocated bytes to this value. If allocation is too
+ # high, cache capacity will be reduced and an error will be logged.
+ # Useful values for this setting are generally in the range of 75%-90% of
+ # available system memory. Using 80% is the recommended.
+ # This value should be set in according to the resources.limits.memory
+ # By default that setting is 4GB, and this is set to 85% of that limit
+ # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837
+ # MaxAlloc: 3435973836
+ MaxAlloc: 0
+
+#####################
+## Peer Management ##
+#####################
+
+# Configure how OpsRamp-Tracing-Proxy peers are discovered and managed
+PeerManagement:
+ # Strategy controls the way that traces are assigned to Trace Proxy nodes.
+ # The "legacy" strategy uses a simple algorithm that unfortunately causes
+ # 1/2 of the in-flight traces to be assigned to a different node whenever the
+ # number of nodes changes.
+ # The legacy strategy is deprecated and is intended to be removed in a future release.
+ # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the
+ # number of nodes) are disrupted when the node count changes.
+ # Not eligible for live reload.
+ Strategy: "hash"
+
+ ###########################################################
+ ###### File (Suitable only for VM based deployments ######
+ ###### and single replica k8s deployments) ######
+ ###########################################################
+ #Type: "file"
+
+ # Peers is the list of all servers participating in this proxy cluster. Events
+ # will be sharded evenly across all peers based on the Trace ID. Values here
+ # should be the base URL used to access the peer, and should include scheme,
+ # hostname (or ip address) and port. All servers in the cluster should be in
+ # this list, including this host.
+ #Peers: [
+ # "http://127.0.0.1:8084", #only grpc peer listener used
+ #]
+ ###########################################################
+
+ ###########################################################
+ ###### Redis (Suitable for all types of deployments) ######
+ ###########################################################
+ ## The type should always be redis when deployed to Kubernetes environments
+ Type: "redis"
+
+ ## RedisHost is used to connect to redis for peer cluster membership management.
+ ## Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes
+ ## precedence and this value is ignored.
+ ## Not eligible for live reload.
+ ## RedisHost will default to the name used for the release or name overrides depending on what is used,
+ ## but can be overriden to a specific value.
+ RedisHost: ""
+
+ ## RedisUsername is the username used to connect to redis for peer cluster membership management.
+ ## If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes
+ ## precedence and this value is ignored.
+ ## Not eligible for live reload.
+ RedisUsername: ""
+
+ ## RedisPassword is the password used to connect to redis for peer cluster membership management.
+ ## If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes
+ ## precedence and this value is ignored.
+ ## Not eligible for live reload.
+ RedisPassword: ""
+
+ ## RedisPrefix is a string used as a prefix for the keys in redis while storing
+ ## the peer membership. It might be useful to set this in any situation where
+ ## multiple trace-proxy clusters or multiple applications want to share a single
+ ## Redis instance. It may not be blank.
+ RedisPrefix: "tracing-proxy"
+
+ ## RedisDatabase is an integer from 0-15 indicating the database number to use
+ ## for the Redis instance storing the peer membership. It might be useful to set
+ ## this in any situation where multiple trace-proxy clusters or multiple
+ ## applications want to share a single Redis instance.
+ RedisDatabase: 0
+
+ ## UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2.
+ ## Not eligible for live reload.
+ UseTLS:
+
+ ## UseTLSInsecure disables certificate checks
+ ## Not eligible for live reload.
+ UseTLSInsecure: true
+
+ ## IdentifierInterfaceName is optional.
+ ## Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name.
+ ## When configured the pod's IP will be used in the peer list
+ IdentifierInterfaceName: eth0
+
+ ## UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first
+ ## IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use
+ ## the first IPV6 unicast address found.
+ UseIPV6Identifier: false
+ ###########################################################
+
+# LogrusLogger is a section of the config only used if you are using the
+# LogrusLogger to send all logs to STDOUT using the logrus package.
+LogrusLogger:
+ # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"]
+ LogFormatter: 'json'
+ # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"]
+ LogOutput: 'stdout'
+
+MetricsConfig:
+ # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp
+ Enable: true
+
+ # ListenAddr determines the interface and port on which Prometheus will
+ # listen for requests for /metrics. Must be different from the main Trace Proxy
+ # listener.
+ ListenAddr: '0.0.0.0:2112'
+
+ # OpsRampAPI is the URL for the upstream OpsRamp API.
+ OpsRampAPI: ""
+
+ # ReportingInterval is the frequency specified in seconds at which
+ # the metrics are collected and sent to OpsRamp
+ ReportingInterval: 10
+
+ # MetricsList is a list of regular expressions which match the metric
+ # names. Keep the list as small as possible since too many regular expressions can lead to bad performance.
+ # Internally, all the items in the list are concatenated using '|' to make the computation faster.
+ MetricsList: [ ".*" ]
+
+GRPCServerParameters:
+# MaxConnectionIdle is a duration for the amount of time after which an
+# idle connection would be closed by sending a GoAway. Idleness duration is
+# defined since the most recent time the number of outstanding RPCs became
+# zero or the connection establishment.
+# 0s sets duration to infinity which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219
+# MaxConnectionIdle: "1m"
+
+# MaxConnectionAge is a duration for the maximum amount of time a
+# connection may exist before it will be closed by sending a GoAway. A
+# random jitter of +/-10% will be added to MaxConnectionAge to spread out
+# connection storms.
+# 0s sets duration to infinity which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222
+# MaxConnectionAge: "0s"
+
+# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
+# which the connection will be forcibly closed.
+# 0s sets duration to infinity which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227
+# MaxConnectionAgeGrace: "0s"
+
+# After a duration of this time if the server doesn't see any activity it
+# pings the client to see if the transport is still alive.
+# If set below 1s, a minimum value of 1s will be used instead.
+# 0s sets duration to 2 hours which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230
+# Time: "10s"
+
+# After having pinged for keepalive check, the server waits for a duration
+# of Timeout and if no activity is seen even after that the connection is
+# closed.
+# 0s sets duration to 20 seconds which is the default:
+# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233
+# Timeout: "2s"
+
+################################
+## Sample Cache Configuration ##
+################################
+
+# Sample Cache Configuration controls the sample cache used to retain information about trace
+# status after the sampling decision has been made.
+SampleCacheConfig:
+# Type controls the type of sample cache used.
+# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is
+# 5x the size of the trace cache. This is tracing proxy's original sample cache strategy.
+# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember
+# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces.
+# It is also more configurable. The cuckoo filter is recommended for most installations.
+# Default is "legacy".
+# Type: "cuckoo"
+
+# KeptSize controls the number of traces preserved in the cuckoo kept traces cache.
+# tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some
+# statistical information. This is most useful in cases where the trace was sent before sending
+# the root span, so that the root span can be decorated with accurate metadata.
+# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes).
+# It Does not apply to the "legacy" type of cache.
+# KeptSize: 10_000
+
+# DroppedSize controls the size of the cuckoo dropped traces cache.
+# This cache consumes 4-6 bytes per trace at a scale of millions of traces.
+# Changing its size with live reload sets a future limit, but does not have an immediate effect.
+# Default is 1_000_000 traces.
+# It Does not apply to the "legacy" type of cache.
+# DroppedSize: 1_000_000
+
+# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates
+# the remaining capacity of its dropped traces cache and possibly cycles it.
+# This cache is quite resilient so it doesn't need to happen very often, but the
+# operation is also inexpensive.
+# Default is 10 seconds.
+# It Does not apply to the "legacy" type of cache.
+# SizeCheckInterval: "10s"
\ No newline at end of file
diff --git a/deploy/app-brigade-manifest.json b/deploy/app-brigade-manifest.json
new file mode 100644
index 0000000000..386f274deb
--- /dev/null
+++ b/deploy/app-brigade-manifest.json
@@ -0,0 +1,34 @@
+{
+ "payload": [
+ {
+ "appid": "3e148737-ea6f-48e3-a62e-ae35cf135520",
+ "stages": [
+ {
+ "stagename": "deployment",
+ "payload": [
+ {
+ "filename": "tracing-proxy-svc.yml"
+ },
+ {
+ "filename": "tracing-proxy-deployment.yml"
+ }
+ ]
+ }
+ ],
+ "Version": "${version}"
+ }
+ ],
+ "configmap": {
+ "name": "tracing-proxy-cm",
+ "comment": "Please include configmap file paths in docker/Dockerfile as needed for tini",
+ "infra": [
+ "elasticache",
+ "clusterinfo"
+ ],
+ "config": [
+ "tracing-proxy"
+ ]
+ },
+ "multi-region": "supported",
+ "namespace": "opsramp-tracing-proxy"
+}
\ No newline at end of file
diff --git a/deploy/tracing-proxy-deployment.yml b/deploy/tracing-proxy-deployment.yml
new file mode 100644
index 0000000000..089c875c60
--- /dev/null
+++ b/deploy/tracing-proxy-deployment.yml
@@ -0,0 +1,67 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: opsramp-tracing-proxy
+ labels:
+ app: opsramp-tracing-proxy
+ appid: "${appid}"
+ version: "${version}"
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: opsramp-tracing-proxy
+ template:
+ metadata:
+ labels:
+ app: opsramp-tracing-proxy
+ name: opsramp-tracing-proxy
+ version: "${version}"
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: name
+ operator: In
+ values:
+ - opsramp-tracing-proxy
+ topologyKey: kubernetes.io/hostname
+ weight: 100
+ imagePullSecrets:
+ - name: quay.io
+ restartPolicy: Always
+ containers:
+ - name: opsramp-tracing-proxy
+ image: ${docker_image}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: http
+ containerPort: 8082
+ protocol: TCP
+ - name: grpc
+ containerPort: 9090
+ protocol: TCP
+ - name: peer
+ containerPort: 8083
+ protocol: TCP
+ - containerPort: 8084
+ name: grpc-peer
+ resources:
+ requests:
+ memory: "2048Mi"
+ cpu: "2"
+ limits:
+ memory: "8096Mi"
+ cpu: "4"
+ volumeMounts:
+ - name: tracing-configs
+ mountPath: /config/data
+ volumes:
+ - configMap:
+ name: tracing-proxy-cm
+ name: tracing-configs
+
diff --git a/deploy/tracing-proxy-svc.yml b/deploy/tracing-proxy-svc.yml
new file mode 100644
index 0000000000..9a5e241500
--- /dev/null
+++ b/deploy/tracing-proxy-svc.yml
@@ -0,0 +1,25 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: opsramp-tracing-proxy
+spec:
+ selector:
+ app: opsramp-tracing-proxy
+ ports:
+ - protocol: TCP
+ port: 9090
+ targetPort: 9090
+ name: grpc
+ - protocol: TCP
+ port: 8082
+ targetPort: 8082
+ name: http
+ - protocol: TCP
+ port: 8083
+ targetPort: 8083
+ name: peer
+ - protocol: TCP
+ port: 8084
+ targetPort: 8084
+ name: grpc-peer
\ No newline at end of file
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000000..03f217764c
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,42 @@
+version: '3.8'
+
+services:
+ redis:
+ image: redis
+ ports:
+ - "6379:6379"
+ profiles:
+ - redis
+ - all
+
+ redis-commander:
+ image: rediscommander/redis-commander:latest
+ environment:
+ - REDIS_HOSTS=local:redis:6379
+ ports:
+ - "4042:8081"
+ depends_on:
+ - redis
+ profiles:
+ - redis
+ - all
+
+ tracing-proxy:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ deploy:
+ mode: replicated
+ replicas: 3
+ endpoint_mode: vip
+ expose:
+ - "4317"
+ - "8084"
+ - "8082"
+ ports:
+ - "8082-8084:8082"
+ depends_on:
+ - redis
+ profiles:
+ - tracing-proxy
+ - all
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 276bd77a6b..98a7412b30 100644
--- a/go.mod
+++ b/go.mod
@@ -1,41 +1,82 @@
-module github.com/honeycombio/refinery
+module github.com/opsramp/tracing-proxy
-go 1.14
+go 1.19
require (
- github.com/DataDog/zstd v1.4.5 // indirect
- github.com/davecgh/go-spew v1.1.1
+ github.com/dgryski/go-wyhash v0.0.0-20191203203029-c4841ae36371
github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b
github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d
- github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect
- github.com/fsnotify/fsnotify v1.4.9
- github.com/garyburd/redigo v1.6.0
- github.com/go-playground/universal-translator v0.17.0 // indirect
+ github.com/fsnotify/fsnotify v1.6.0
github.com/go-playground/validator v9.31.0+incompatible
- github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d
- github.com/hashicorp/golang-lru v0.5.1
+ github.com/gogo/protobuf v1.3.2
+ github.com/golang/snappy v0.0.4
+ github.com/gomodule/redigo v1.8.9
+ github.com/gorilla/mux v1.8.0
+ github.com/hashicorp/golang-lru v0.5.4
github.com/honeycombio/dynsampler-go v0.2.1
- github.com/honeycombio/libhoney-go v1.12.4
- github.com/jessevdk/go-flags v1.4.0
- github.com/json-iterator/go v1.1.6
- github.com/klauspost/compress v1.10.3
- github.com/leodido/go-urn v1.2.0 // indirect
- github.com/mitchellh/mapstructure v1.3.3 // indirect
- github.com/pelletier/go-toml v1.8.0 // indirect
- github.com/pkg/errors v0.8.1
- github.com/prometheus/client_golang v0.9.3
+ github.com/jessevdk/go-flags v1.5.0
+ github.com/json-iterator/go v1.1.12
+ github.com/klauspost/compress v1.16.7
+ github.com/opsramp/husky v0.0.0-20230719151104-01eeb1b7e530
+ github.com/opsramp/libtrace-go v0.0.0-20230719150918-e2ba67c0f350
+ github.com/panmari/cuckoofilter v1.0.3
+ github.com/pelletier/go-toml/v2 v2.0.5
+ github.com/pkg/errors v0.9.1
+ github.com/prometheus/client_golang v1.14.0
+ github.com/prometheus/client_model v0.3.0
+ github.com/prometheus/common v0.39.0
+ github.com/prometheus/prometheus v0.41.0
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
- github.com/sirupsen/logrus v1.2.0
- github.com/spf13/afero v1.3.2 // indirect
- github.com/spf13/cast v1.3.1 // indirect
+ github.com/sirupsen/logrus v1.9.0
+ github.com/spf13/viper v1.13.0
+ github.com/stretchr/testify v1.8.1
+ github.com/tidwall/gjson v1.14.3
+ github.com/vmihailenco/msgpack/v5 v5.3.5
+ google.golang.org/grpc v1.56.2
+ gopkg.in/natefinch/lumberjack.v2 v2.0.0
+ gopkg.in/yaml.v2 v2.4.0
+)
+
+require (
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 // indirect
+ github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect
+ github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 // indirect
+ github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 // indirect
+ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect
+ github.com/go-playground/locales v0.13.0 // indirect
+ github.com/go-playground/universal-translator v0.17.0 // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/leodido/go-urn v1.2.0 // indirect
+ github.com/magiconair/properties v1.8.6 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/pelletier/go-toml v1.9.5 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/procfs v0.8.0 // indirect
+ github.com/spf13/afero v1.8.2 // indirect
+ github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
- github.com/spf13/viper v1.7.0
- github.com/stretchr/testify v1.5.1
- github.com/vmihailenco/msgpack/v4 v4.3.11
- golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6 // indirect
- golang.org/x/text v0.3.3 // indirect
- gopkg.in/alexcesaro/statsd.v2 v2.0.0
+ github.com/subosito/gotenv v1.4.1 // indirect
+ github.com/tidwall/match v1.1.1 // indirect
+ github.com/tidwall/pretty v1.2.0 // indirect
+ github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
+ golang.org/x/net v0.12.0 // indirect
+ golang.org/x/sys v0.10.0 // indirect
+ golang.org/x/text v0.11.0 // indirect
+ google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect
+ google.golang.org/protobuf v1.31.0 // indirect
+ gopkg.in/alexcesaro/statsd.v2 v2.0.0 // indirect
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
- gopkg.in/ini.v1 v1.57.0 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index aed211cfef..0fcb752db8 100644
--- a/go.sum
+++ b/go.sum
@@ -3,44 +3,409 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
+cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
+cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
+cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
+cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4=
+cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
+cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o=
+cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
+cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
+cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
+cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
+cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
+cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk=
+cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
+cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc=
+cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04=
+cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno=
+cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
+cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
+cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
+cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
+cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=
+cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0=
+cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
+cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
+cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=
+cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=
+cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ=
+cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
+cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
+cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=
+cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=
+cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo=
+cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
+cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
+cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8=
+cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8=
+cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM=
+cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc=
+cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI=
+cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE=
+cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE=
+cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4=
+cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
+cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw=
+cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
+cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=
+cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI=
+cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
+cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
+cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=
+cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0=
+cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk=
+cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg=
+cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590=
+cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk=
+cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
+cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U=
+cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
+cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM=
+cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
+cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=
+cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI=
+cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4=
+cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
+cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
+cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
+cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY=
+cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
+cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg=
+cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
+cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
+cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
+cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
+cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
+cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=
+cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE=
+cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
+cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
+cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
+cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo=
+cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=
+cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
+cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38=
+cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
+cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I=
+cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=
+cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA=
+cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
+cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
+cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
+cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo=
+cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo=
+cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ=
+cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g=
+cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
+cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c=
+cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
+cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=
+cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=
+cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=
+cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek=
+cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
+cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM=
+cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
+cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU=
+cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU=
+cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k=
+cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
+cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=
+cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
+cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=
+cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
+cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI=
+cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
+cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc=
+cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
+cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w=
+cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
+cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=
+cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg=
+cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY=
+cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
+cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=
+cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=
+cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w=
+cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM=
+cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60=
+cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo=
+cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=
+cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=
+cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
+cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
+cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA=
+cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
+cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
+cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM=
+cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
+cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
+cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
+cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
+cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
+cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM=
+cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
+cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs=
+cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
+cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
+cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
+cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
+cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
+cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
+cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8=
+cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
+cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
+cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE=
+cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
+cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE=
+cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
+cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
+cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
+cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
+cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM=
+cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA=
+cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY=
+cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=
+cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s=
+cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8=
+cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI=
+cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
+cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
+cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
+cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
+cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
+cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
+cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8=
+cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
+cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
+cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
+cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
+cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=
+cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA=
+cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
+cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4=
+cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
+cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA=
+cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk=
+cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE=
+cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc=
+cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs=
+cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg=
+cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo=
+cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw=
+cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E=
+cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU=
+cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70=
+cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo=
+cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0=
+cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=
+cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg=
+cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
+cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=
+cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
+cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
+cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=
+cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo=
+cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE=
+cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
+cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
+cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
+cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg=
+cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c=
+cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs=
+cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70=
+cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y=
+cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A=
+cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA=
+cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM=
+cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA=
+cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
+cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU=
+cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
+cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4=
+cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY=
+cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc=
+cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y=
+cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do=
+cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
+cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=
+cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=
+cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk=
+cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
+cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=
+cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4=
+cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
+cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4=
+cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0=
+cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU=
+cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q=
+cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
+cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=
+cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=
+cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk=
+cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
+cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU=
+cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
+cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=
+cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=
+cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4=
+cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
+cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco=
+cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
+cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E=
+cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
+cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4=
+cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
+cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
+cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
+cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
+cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
+cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
+cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
+cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
+cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
+cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
+cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
+cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM=
+cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA=
+cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8=
+cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4=
+cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ=
+cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg=
+cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
+cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
+cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
+cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
+cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk=
+cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
+cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
+cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=
+cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M=
+cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU=
+cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
+cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
+cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo=
+cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY=
+cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
+cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE=
+cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
+cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w=
+cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
+cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE=
+cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg=
+cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc=
+cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A=
+cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo=
+cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ=
+cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=
+cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=
+cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M=
+cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
-github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
-github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 h1:BS21ZUJ/B5X2UVUbczfmdWH7GapPWAhxcMsDnjJTU1E=
+github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
+github.com/dgryski/go-wyhash v0.0.0-20191203203029-c4841ae36371 h1:bz5ApY1kzFBvw3yckuyRBCtqGvprWrKswYK468nm+Gs=
+github.com/dgryski/go-wyhash v0.0.0-20191203203029-c4841ae36371/go.mod h1:/ENMIO1SQeJ5YQeUWWpbX8f+bS8INHrrhFjXgEqi4LA=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0=
@@ -59,230 +424,262 @@ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 h1:KnnwHN59Jx
github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691/go.mod h1:sKLL1iua/0etWfo/nPCmyz+v2XDMXy+Ho53W7RAuZNY=
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk=
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
-github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/validator v9.31.0+incompatible h1:UA72EPEogEnq76ehGdEDp4Mit+3FDh548oRqwVgNsHA=
github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+PugkyDjY2bRrL/UBU4f3rvrgkN3V8JEig=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws=
+github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d h1:mksP7mUlZu0fpgMVMfDnaVvErqRL05HM3Kk+rBkZK54=
-github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
+github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
+github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0/go.mod h1:4OGVnY4qf2+gw+ssiHbW+pq4mo2yko94YxxMmXZ7jCA=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8=
github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI=
-github.com/honeycombio/libhoney-go v1.12.4 h1:rWAoxhpvu2briq85wZc04osHgKtueCLAk/3igqTX3+Q=
-github.com/honeycombio/libhoney-go v1.12.4/go.mod h1:tp2qtK0xMZyG/ZfykkebQESKFS78xpyPr2wEswZ1j6U=
-github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
+github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8=
-github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
+github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
-github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8=
-github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
+github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw=
-github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs=
-github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/opsramp/husky v0.0.0-20230719151104-01eeb1b7e530 h1:1lgA35HukLuhLrONDpHvmnrlGdmozMD5oju1Pk2RLgo=
+github.com/opsramp/husky v0.0.0-20230719151104-01eeb1b7e530/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw=
+github.com/opsramp/libtrace-go v0.0.0-20230719150918-e2ba67c0f350 h1:h2W9jda/cvoiir4kyJRGlaMb6+aeAASInkVE7+ZCP7M=
+github.com/opsramp/libtrace-go v0.0.0-20230719150918-e2ba67c0f350/go.mod h1:yn9rTiwFOqvh/3VqS5jaIo1vmYf/Mast5jptJz3GJvU=
+github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU=
+github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
+github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
+github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
+github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/prometheus v0.41.0 h1:+QR4QpzwE54zsKk2K7EUkof3tHxa3b/fyw7xJ4jR1Ns=
+github.com/prometheus/prometheus v0.41.0/go.mod h1:Uu5817xm7ibU/VaDZ9pu1ssGzcpO9Bd+LyoZ76RpHyo=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.3.2 h1:GDarE4TJQI52kYSbSAmLiId1Elfj+xgSDqrUZxFhxlU=
-github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
-github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
+github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU=
+github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE=
-github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
-github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY=
-github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
+github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
+github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
+github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
+github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
+github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
+github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
+github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
+github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
+github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -292,94 +689,323 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
+golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6 h1:X9xIZ1YU8bLZA3l6gqDUHSFiD0GFI9S548h6C8nDtOY=
-golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
+google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
+google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08=
+google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
+google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -389,32 +1015,190 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
+google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
+google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw=
+google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
+google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
+google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw=
+google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM=
+google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE=
+google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
+google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
-gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
-gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/internal/peer/file.go b/internal/peer/file.go
index efd13bf7f4..44269f2501 100644
--- a/internal/peer/file.go
+++ b/internal/peer/file.go
@@ -1,22 +1,114 @@
package peer
-import "github.com/honeycombio/refinery/config"
+import (
+ "context"
+ "fmt"
+ "github.com/opsramp/libtrace-go/proto/proxypb"
+ "github.com/opsramp/tracing-proxy/config"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+ "net/url"
+ "sort"
+ "sync"
+ "time"
+)
type filePeers struct {
- c config.Config
+ c config.Config
+ peers []string
+ callbacks []func()
+ peerLock sync.Mutex
}
+var firstOccurancesOfGetPeers bool = false
+
// NewFilePeers returns a peers collection backed by the config file
func newFilePeers(c config.Config) Peers {
- return &filePeers{
- c: c,
+ p := &filePeers{
+ c: c,
+ peers: make([]string, 1),
+ callbacks: make([]func(), 0),
}
+
+ go p.watchFilePeers()
+
+ return p
}
func (p *filePeers) GetPeers() ([]string, error) {
- return p.c.GetPeers()
+
+ if !firstOccurancesOfGetPeers {
+ firstOccurancesOfGetPeers = true
+ return p.c.GetPeers()
+ }
+ p.peerLock.Lock()
+ defer p.peerLock.Unlock()
+ retList := make([]string, len(p.peers))
+ copy(retList, p.peers)
+ return retList, nil
}
+func (p *filePeers) watchFilePeers() {
+ tk := time.NewTicker(20 * time.Second)
+ originalPeerList, _ := p.c.GetPeers()
+ sort.Strings(originalPeerList)
+ oldPeerList := originalPeerList
+ for range tk.C {
+ currentPeers := getPeerMembers(originalPeerList)
+ sort.Strings(currentPeers)
+ if !equal(currentPeers, oldPeerList) {
+ p.peerLock.Lock()
+ p.peers = currentPeers
+ oldPeerList = currentPeers
+ p.peerLock.Unlock()
+ for _, callback := range p.callbacks {
+ // don't block on any of the callbacks.
+ go callback()
+ }
+ }
+ }
+}
func (p *filePeers) RegisterUpdatedPeersCallback(callback func()) {
// do nothing, file based peers are not reloaded
+ p.callbacks = append(p.callbacks, callback)
+}
+
+func getPeerMembers(originalPeerlist []string) []string {
+ var workingPeers []string
+ wg := sync.WaitGroup{}
+ for _, peer := range originalPeerlist {
+ wg.Add(1)
+ go func(goPeer string) {
+ opened := isOpen(goPeer)
+ if opened {
+ workingPeers = append(workingPeers, goPeer)
+ }
+ wg.Done()
+ }(peer)
+ }
+ wg.Wait()
+ return workingPeers
+}
+
+func isOpen(peerURL string) bool {
+ u, err := url.Parse(peerURL)
+ if err != nil {
+ return false
+ }
+
+ opts := []grpc.DialOption{
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ }
+ conn, err := grpc.Dial(fmt.Sprintf("%s:%s", u.Hostname(), u.Port()), opts...)
+ if err != nil {
+ return false
+ }
+ defer conn.Close()
+ client := proxypb.NewTraceProxyServiceClient(conn)
+
+ resp, err := client.Status(context.TODO(), &proxypb.StatusRequest{})
+ if err != nil {
+ return false
+ }
+ return resp.GetPeerActive()
}
diff --git a/internal/peer/file_test.go b/internal/peer/file_test.go
index b8e453087a..60603120c1 100644
--- a/internal/peer/file_test.go
+++ b/internal/peer/file_test.go
@@ -1,11 +1,9 @@
-// +build all race
-
package peer
import (
"testing"
- "github.com/honeycombio/refinery/config"
+ "github.com/opsramp/tracing-proxy/config"
)
func TestFilePeers(t *testing.T) {
diff --git a/internal/peer/peers.go b/internal/peer/peers.go
index ff5a2615c2..e70e7e2378 100644
--- a/internal/peer/peers.go
+++ b/internal/peer/peers.go
@@ -1,9 +1,10 @@
package peer
import (
+ "context"
"errors"
- "github.com/honeycombio/refinery/config"
+ "github.com/opsramp/tracing-proxy/config"
)
// Peers holds the collection of peers for the cluster
@@ -13,7 +14,7 @@ type Peers interface {
RegisterUpdatedPeersCallback(callback func())
}
-func NewPeers(c config.Config) (Peers, error) {
+func NewPeers(ctx context.Context, c config.Config, done chan struct{}) (Peers, error) {
t, err := c.GetPeerManagementType()
if err != nil {
@@ -24,8 +25,8 @@ func NewPeers(c config.Config) (Peers, error) {
case "file":
return newFilePeers(c), nil
case "redis":
- return newRedisPeers(c)
+ return newRedisPeers(ctx, c, done)
default:
- return nil, errors.New("Invalid PeerManagement Type")
+ return nil, errors.New("invalid config option 'PeerManagement.Type'")
}
}
diff --git a/internal/peer/peers_test.go b/internal/peer/peers_test.go
index 5d11be5085..e257ba4bc8 100644
--- a/internal/peer/peers_test.go
+++ b/internal/peer/peers_test.go
@@ -1,22 +1,27 @@
-// +build all race
-
package peer
import (
+ "context"
+ "strings"
"testing"
+ "time"
- "github.com/honeycombio/refinery/config"
+ "github.com/opsramp/tracing-proxy/config"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestNewPeers(t *testing.T) {
c := &config.MockConfig{
PeerManagementType: "file",
+ PeerTimeout: 5 * time.Second,
}
- p, err := NewPeers(c)
-
- assert.Equal(t, nil, err)
+ done := make(chan struct{})
+ defer close(done)
+ p, err := NewPeers(context.Background(), c, done)
+ assert.NoError(t, err)
+ require.NotNil(t, p)
switch i := p.(type) {
case *filePeers:
@@ -27,11 +32,12 @@ func TestNewPeers(t *testing.T) {
c = &config.MockConfig{
GetPeerListenAddrVal: "0.0.0.0:8081",
PeerManagementType: "redis",
+ PeerTimeout: 5 * time.Second,
}
- p, err = NewPeers(c)
-
- assert.Equal(t, nil, err)
+ p, err = NewPeers(context.Background(), c, done)
+ assert.NoError(t, err)
+ require.NotNil(t, p)
switch i := p.(type) {
case *redisPeers:
@@ -39,3 +45,31 @@ func TestNewPeers(t *testing.T) {
t.Errorf("received %T expected %T", i, &redisPeers{})
}
}
+
+func TestPeerShutdown(t *testing.T) {
+ c := &config.MockConfig{
+ GetPeerListenAddrVal: "0.0.0.0:8081",
+ PeerManagementType: "redis",
+ PeerTimeout: 5 * time.Second,
+ }
+
+ done := make(chan struct{})
+ p, err := NewPeers(context.Background(), c, done)
+ assert.NoError(t, err)
+ require.NotNil(t, p)
+
+ peer, ok := p.(*redisPeers)
+ assert.True(t, ok)
+
+ peers, err := peer.GetPeers()
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(peers))
+ assert.True(t, strings.HasPrefix(peers[0], "http"))
+ assert.True(t, strings.HasSuffix(peers[0], "8081"))
+
+ close(done)
+ time.Sleep(100 * time.Millisecond)
+ peers, err = peer.GetPeers()
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(peers))
+}
diff --git a/internal/peer/redis.go b/internal/peer/redis.go
index 1e947f90fa..3531793ab6 100644
--- a/internal/peer/redis.go
+++ b/internal/peer/redis.go
@@ -2,8 +2,10 @@ package peer
import (
"context"
+ "crypto/tls"
"errors"
"fmt"
+ "github.com/opsramp/libtrace-go/transmission"
"net"
"os"
"sort"
@@ -11,9 +13,9 @@ import (
"sync"
"time"
- "github.com/garyburd/redigo/redis"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/internal/redimem"
+ "github.com/gomodule/redigo/redis"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/internal/redimem"
"github.com/sirupsen/logrus"
)
@@ -44,25 +46,40 @@ type redisPeers struct {
}
// NewRedisPeers returns a peers collection backed by redis
-func newRedisPeers(c config.Config) (Peers, error) {
+func newRedisPeers(ctx context.Context, c config.Config, done chan struct{}) (Peers, error) {
redisHost, _ := c.GetRedisHost()
if redisHost == "" {
redisHost = "localhost:6379"
}
+ options := buildOptions(c)
pool := &redis.Pool{
MaxIdle: 3,
MaxActive: 30,
IdleTimeout: 5 * time.Minute,
Wait: true,
Dial: func() (redis.Conn, error) {
- return redis.Dial(
- "tcp", redisHost,
- redis.DialReadTimeout(1*time.Second),
- redis.DialConnectTimeout(1*time.Second),
- redis.DialDatabase(0), // TODO enable multiple databases for multiple samproxies
+ // if redis is started at the same time as tracing-proxy, connecting to redis can
+ // fail and cause tracing-proxy to error out.
+ // Instead, we will try to connect to redis for up to 10 seconds with
+ // a 1 second delay between attempts to allow the redis process to init
+ var (
+ conn redis.Conn
+ err error
)
+ for timeout := time.After(10 * time.Second); ; {
+ select {
+ case <-timeout:
+ return nil, err
+ default:
+ conn, err = redis.Dial("tcp", redisHost, options...)
+ if err == nil {
+ return conn, nil
+ }
+ time.Sleep(time.Second)
+ }
+ }
},
}
@@ -75,7 +92,7 @@ func newRedisPeers(c config.Config) (Peers, error) {
peers := &redisPeers{
store: &redimem.RedisMembership{
- Prefix: "refinery",
+ Prefix: c.GetRedisPrefix(),
Pool: pool,
},
peers: make([]string, 1),
@@ -85,14 +102,18 @@ func newRedisPeers(c config.Config) (Peers, error) {
}
// register myself once
- err = peers.store.Register(context.TODO(), address, peerEntryTimeout)
+ for !transmission.DefaultAvailability.Status() {
+ logrus.Info("peer is not available yet")
+ time.Sleep(5 * time.Second)
+ }
+ err = peers.store.Register(ctx, address, peerEntryTimeout)
if err != nil {
- logrus.WithError(err).Errorf("failed to register self with peer store")
+ logrus.WithError(err).Errorf("failed to register self with redis peer store")
return nil, err
}
// go establish a regular registration heartbeat to ensure I stay alive in redis
- go peers.registerSelf()
+ go peers.registerSelf(done)
// get our peer list once to seed ourselves
peers.updatePeerListOnce()
@@ -100,7 +121,7 @@ func newRedisPeers(c config.Config) (Peers, error) {
// go watch the list of peers and trigger callbacks whenever it changes.
// populate my local list of peers so each request can hit memory and only hit
// redis on a ticker
- go peers.watchPeers()
+ go peers.watchPeers(done)
return peers, nil
}
@@ -119,19 +140,42 @@ func (p *redisPeers) RegisterUpdatedPeersCallback(cb func()) {
// registerSelf inserts self into the peer list and updates self's entry on a
// regular basis so it doesn't time out and get removed from the list of peers.
-// If this function stops, this host will get ejected from other's peer lists.
-func (p *redisPeers) registerSelf() {
+// When this function stops, it tries to remove the registered key.
+func (p *redisPeers) registerSelf(done chan struct{}) {
tk := time.NewTicker(refreshCacheInterval)
- for range tk.C {
- // every 5 seconds, insert a 30sec timeout record
- p.store.Register(context.TODO(), p.publicAddr, peerEntryTimeout)
+ for {
+ select {
+ case <-tk.C:
+ if !transmission.DefaultAvailability.Status() {
+ continue
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout())
+ // every interval, insert a timeout record. we ignore the error
+ // here since Register() logs the error for us.
+ p.store.Register(ctx, p.publicAddr, peerEntryTimeout)
+ cancel()
+ case <-done:
+ // unregister ourselves
+ ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout())
+ p.store.Unregister(ctx, p.publicAddr)
+ cancel()
+ return
+ }
}
}
func (p *redisPeers) updatePeerListOnce() {
- currentPeers, err := p.store.GetMembers(context.TODO())
+ ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout())
+ defer cancel()
+
+ currentPeers, err := p.store.GetMembers(ctx)
if err != nil {
- // TODO maybe do something better here?
+ logrus.WithError(err).
+ WithFields(logrus.Fields{
+ "name": p.publicAddr,
+ "timeout": p.c.GetPeerTimeout().String(),
+ }).
+ Error("get members failed")
return
}
sort.Strings(currentPeers)
@@ -141,41 +185,117 @@ func (p *redisPeers) updatePeerListOnce() {
p.peerLock.Unlock()
}
-func (p *redisPeers) watchPeers() {
+func (p *redisPeers) watchPeers(done chan struct{}) {
oldPeerList := p.peers
sort.Strings(oldPeerList)
tk := time.NewTicker(refreshCacheInterval)
- for range tk.C {
- currentPeers, err := p.store.GetMembers(context.TODO())
- if err != nil {
- // TODO maybe do something better here?
- continue
- }
- sort.Strings(currentPeers)
- if !equal(oldPeerList, currentPeers) {
- // update peer list and trigger callbacks saying the peer list has changed
+ for {
+ select {
+ case <-tk.C:
+ ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout())
+ currentPeers, err := p.store.GetMembers(ctx)
+ cancel()
+
+ if err != nil {
+ logrus.WithError(err).
+ WithFields(logrus.Fields{
+ "name": p.publicAddr,
+ "timeout": p.c.GetPeerTimeout().String(),
+ "oldPeers": oldPeerList,
+ }).
+ Error("get members failed during watch")
+ continue
+ }
+
+ sort.Strings(currentPeers)
+ if !equal(oldPeerList, currentPeers) {
+ // update peer list and trigger callbacks saying the peer list has changed
+ p.peerLock.Lock()
+ p.peers = currentPeers
+ oldPeerList = currentPeers
+ p.peerLock.Unlock()
+ for _, callback := range p.callbacks {
+ // don't block on any of the callbacks.
+ go callback()
+ }
+ }
+ case <-done:
p.peerLock.Lock()
- p.peers = currentPeers
- oldPeerList = currentPeers
+ p.peers = []string{}
p.peerLock.Unlock()
- for _, callback := range p.callbacks {
- // don't block on any of the callbacks.
- go callback()
- }
+ return
}
}
}
+func buildOptions(c config.Config) []redis.DialOption {
+ options := []redis.DialOption{
+ redis.DialReadTimeout(1 * time.Second),
+ redis.DialConnectTimeout(1 * time.Second),
+ redis.DialDatabase(c.GetRedisDatabase()),
+ }
+
+ username, _ := c.GetRedisUsername()
+ if username != "" {
+ options = append(options, redis.DialUsername(username))
+ }
+
+ password, _ := c.GetRedisPassword()
+ if password != "" {
+ options = append(options, redis.DialPassword(password))
+ }
+
+ useTLS, _ := c.GetUseTLS()
+ tlsInsecure, _ := c.GetUseTLSInsecure()
+ if useTLS {
+ tlsConfig := &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ }
+
+ if tlsInsecure {
+ tlsConfig.InsecureSkipVerify = true
+ }
+
+ options = append(options,
+ redis.DialTLSConfig(tlsConfig),
+ redis.DialUseTLS(true))
+ }
+
+ return options
+}
+
func publicAddr(c config.Config) (string, error) {
// compute the public version of my peer listen address
- listenAddr, _ := c.GetPeerListenAddr()
+ //listenAddr, _ := c.GetPeerListenAddr() //Temporarily removed http peer listen addr, only grpc listener
+ listenAddr, _ := c.GetGRPCPeerListenAddr()
_, port, err := net.SplitHostPort(listenAddr)
if err != nil {
return "", err
}
+ var myIdentifier string
+
+ // If RedisIdentifier is set, use as identifier.
+ if redisIdentifier, _ := c.GetRedisIdentifier(); redisIdentifier != "" {
+ myIdentifier = redisIdentifier
+ logrus.WithField("identifier", myIdentifier).Info("using specified RedisIdentifier from config")
+ } else {
+ // Otherwise, determine idenntifier from network interface.
+ myIdentifier, err = getIdentifierFromInterfaces(c)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ publicListenAddr := fmt.Sprintf("http://%s:%s", myIdentifier, port)
+
+ return publicListenAddr, nil
+}
+
+// Scan network interfaces to determine an identifier from either IP or hostname.
+func getIdentifierFromInterfaces(c config.Config) (string, error) {
myIdentifier, _ := os.Hostname()
identifierInterfaceName, _ := c.GetIdentifierInterfaceName()
@@ -215,16 +335,7 @@ func publicAddr(c config.Config) (string, error) {
logrus.WithField("identifier", myIdentifier).WithField("interface", ifc.Name).Info("using identifier from interface")
}
- redisIdentifier, _ := c.GetRedisIdentifier()
-
- if redisIdentifier != "" {
- myIdentifier = redisIdentifier
- logrus.WithField("identifier", myIdentifier).Info("using specific identifier from config")
- }
-
- publicListenAddr := fmt.Sprintf("http://%s:%s", myIdentifier, port)
-
- return publicListenAddr, nil
+ return myIdentifier, nil
}
// equal tells whether a and b contain the same elements.
diff --git a/internal/redimem/redimem.go b/internal/redimem/redimem.go
index 600502d0a3..b38e959d3d 100644
--- a/internal/redimem/redimem.go
+++ b/internal/redimem/redimem.go
@@ -8,7 +8,7 @@ import (
"strings"
"time"
- "github.com/garyburd/redigo/redis"
+ "github.com/gomodule/redigo/redis"
"github.com/sirupsen/logrus"
)
@@ -20,13 +20,17 @@ type Membership interface {
// in order to remain a member of the group.
Register(ctx context.Context, memberName string, timeout time.Duration) error
+ // Unregister removes a name from the list immediately. It's intended to be
+ // used during shutdown so that there's no delay in the case of deliberate downsizing.
+ Unregister(ctx context.Context, memberName string) error
+
// GetMembers retrieves the list of all currently registered members. Members
// that have registered but timed out will not be returned.
GetMembers(ctx context.Context) ([]string, error)
}
const (
- globalPrefix = "refinery"
+ globalPrefix = "tracing-proxy"
defaultRepeatCount = 2
// redisScanTimeout indicates how long to attempt to scan for peers.
@@ -87,6 +91,27 @@ func (rm *RedisMembership) Register(ctx context.Context, memberName string, time
return nil
}
+func (rm *RedisMembership) Unregister(ctx context.Context, memberName string) error {
+ err := rm.validateDefaults()
+ if err != nil {
+ return err
+ }
+ key := fmt.Sprintf("%sâĸ%sâĸ%s", globalPrefix, rm.Prefix, memberName)
+ conn, err := rm.Pool.GetContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+ _, err = conn.Do("DEL", key)
+ if err != nil {
+ logrus.WithField("name", memberName).
+ WithField("err", err).
+ Error("unregistration failed")
+ return err
+ }
+ return nil
+}
+
// GetMembers reaches out to Redis to retrieve a list of all members in the
// cluster. It does this multiple times (how many is configured on
// initializition) and takes the union of the results returned.
@@ -189,10 +214,8 @@ func (rm *RedisMembership) scan(conn redis.Conn, pattern, count string, timeout
break
}
- if keys != nil {
- for _, key := range keys {
- keyChan <- key
- }
+ for _, key := range keys {
+ keyChan <- key
}
// redis will return 0 when we have iterated over the entire set
diff --git a/logger/honeycomb.go b/logger/honeycomb.go
deleted file mode 100644
index f5f6c76fd9..0000000000
--- a/logger/honeycomb.go
+++ /dev/null
@@ -1,256 +0,0 @@
-package logger
-
-import (
- "errors"
- "fmt"
- "net/http"
- "os"
- "strings"
- "time"
-
- "github.com/honeycombio/dynsampler-go"
- libhoney "github.com/honeycombio/libhoney-go"
- "github.com/honeycombio/libhoney-go/transmission"
-
- "github.com/honeycombio/refinery/config"
-)
-
-// HoneycombLogger is a Logger implementation that sends all logs to a Honeycomb
-// dataset. It requires a HoneycombLogger section of the config to exist with
-// three keys, LoggerHoneycombAPI, LoggerAPIKey, and LoggerDataset.
-type HoneycombLogger struct {
- Config config.Config `inject:""`
- UpstreamTransport *http.Transport `inject:"upstreamTransport"`
- Version string `inject:"version"`
- loggerConfig config.HoneycombLoggerConfig
- libhClient *libhoney.Client
- builder *libhoney.Builder
- sampler dynsampler.Sampler
-}
-
-type HoneycombEntry struct {
- loggerConfig config.HoneycombLoggerConfig
- builder *libhoney.Builder
- sampler dynsampler.Sampler
-}
-
-const (
- UnknownLevel config.HoneycombLevel = iota
- DebugLevel
- InfoLevel
- WarnLevel
- ErrorLevel
- PanicLevel
-)
-
-func (h *HoneycombLogger) Start() error {
- // logLevel is defined outside the HoneycombLogger section
- // and is set independently, before Start() is called, so we need to
- // preserve it.
- // TODO: make LogLevel part of the HoneycombLogger/LogrusLogger sections?
- logLevel := h.loggerConfig.Level
- loggerConfig, err := h.Config.GetHoneycombLoggerConfig()
- if err != nil {
- return err
- }
- loggerConfig.Level = logLevel
- h.loggerConfig = loggerConfig
- var loggerTx transmission.Sender
- if h.loggerConfig.LoggerAPIKey == "" {
- loggerTx = &transmission.DiscardSender{}
- } else {
- loggerTx = &transmission.Honeycomb{
- // logs are often sent in flurries; flush every half second
- MaxBatchSize: 100,
- BatchTimeout: 500 * time.Millisecond,
- UserAgentAddition: "refinery/" + h.Version + " (metrics)",
- Transport: h.UpstreamTransport,
- PendingWorkCapacity: libhoney.DefaultPendingWorkCapacity,
- }
- }
-
- if loggerConfig.LoggerSamplerEnabled {
- h.sampler = &dynsampler.PerKeyThroughput{
- ClearFrequencySec: 10,
- PerKeyThroughputPerSec: loggerConfig.LoggerSamplerThroughput,
- MaxKeys: 1000,
- }
- err := h.sampler.Start()
- if err != nil {
- return err
- }
- }
-
- libhClientConfig := libhoney.ClientConfig{
- APIHost: h.loggerConfig.LoggerHoneycombAPI,
- APIKey: h.loggerConfig.LoggerAPIKey,
- Dataset: h.loggerConfig.LoggerDataset,
- Transmission: loggerTx,
- }
- libhClient, err := libhoney.NewClient(libhClientConfig)
- if err != nil {
- return err
- }
- h.libhClient = libhClient
-
- if hostname, err := os.Hostname(); err == nil {
- h.libhClient.AddField("hostname", hostname)
- }
- startTime := time.Now()
- h.libhClient.AddDynamicField("process_uptime_seconds", func() interface{} {
- return time.Now().Sub(startTime) / time.Second
- })
-
- h.builder = h.libhClient.NewBuilder()
-
- // listen for responses from honeycomb, log to STDOUT if something unusual
- // comes back
- go h.readResponses()
-
- // listen for config reloads
- h.Config.RegisterReloadCallback(h.reloadBuilder)
-
- fmt.Printf("Starting Honeycomb Logger - see Honeycomb %s dataset for service logs\n", h.loggerConfig.LoggerDataset)
-
- return nil
-}
-
-func (h *HoneycombLogger) readResponses() {
- resps := h.libhClient.TxResponses()
- for resp := range resps {
- respString := fmt.Sprintf("Response: status: %d, duration: %s", resp.StatusCode, resp.Duration)
- // read response, log if there's an error
- switch {
- case resp.StatusCode == 0: // log message dropped due to sampling
- continue
- case resp.Err != nil:
- fmt.Fprintf(os.Stderr, "Honeycomb Logger got an error back from Honeycomb while trying to send a log line: %s, error: %s, body: %s\n", respString, resp.Err.Error(), string(resp.Body))
- case resp.StatusCode > 202:
- fmt.Fprintf(os.Stderr, "Honeycomb Logger got an unexpected status code back from Honeycomb while trying to send a log line: %s, %s\n", respString, string(resp.Body))
- }
- }
-}
-
-func (h *HoneycombLogger) reloadBuilder() {
- h.Debug().Logf("reloading config for Honeycomb logger")
- // preseve log level
- logLevel := h.loggerConfig.Level
- loggerConfig, err := h.Config.GetHoneycombLoggerConfig()
- if err != nil {
- // complain about this both to STDOUT and to the previously configured
- // honeycomb logger
- fmt.Printf("failed to reload configs for Honeycomb logger: %+v\n", err)
- h.Error().Logf("failed to reload configs for Honeycomb logger: %+v", err)
- return
- }
- loggerConfig.Level = logLevel
- h.loggerConfig = loggerConfig
- h.builder.APIHost = h.loggerConfig.LoggerHoneycombAPI
- h.builder.WriteKey = h.loggerConfig.LoggerAPIKey
- h.builder.Dataset = h.loggerConfig.LoggerDataset
-}
-
-func (h *HoneycombLogger) Stop() error {
- fmt.Printf("stopping honey logger\n")
- libhoney.Flush()
- return nil
-}
-
-func (h *HoneycombLogger) Debug() Entry {
- if h.loggerConfig.Level > DebugLevel {
- return nullEntry
- }
-
- ev := &HoneycombEntry{
- loggerConfig: h.loggerConfig,
- builder: h.builder.Clone(),
- sampler: h.sampler,
- }
- ev.builder.AddField("level", "debug")
-
- return ev
-}
-
-func (h *HoneycombLogger) Info() Entry {
- if h.loggerConfig.Level > InfoLevel {
- return nullEntry
- }
-
- ev := &HoneycombEntry{
- loggerConfig: h.loggerConfig,
- builder: h.builder.Clone(),
- sampler: h.sampler,
- }
- ev.builder.AddField("level", "info")
-
- return ev
-}
-
-func (h *HoneycombLogger) Error() Entry {
- if h.loggerConfig.Level > ErrorLevel {
- return nullEntry
- }
-
- ev := &HoneycombEntry{
- loggerConfig: h.loggerConfig,
- builder: h.builder.Clone(),
- sampler: h.sampler,
- }
- ev.builder.AddField("level", "error")
-
- return ev
-}
-
-func (h *HoneycombLogger) SetLevel(level string) error {
- sanitizedLevel := strings.TrimSpace(strings.ToLower(level))
- var lvl config.HoneycombLevel
- switch sanitizedLevel {
- case "debug":
- lvl = DebugLevel
- case "info":
- lvl = InfoLevel
- case "warn", "warning":
- lvl = WarnLevel
- case "error":
- lvl = ErrorLevel
- case "panic":
- lvl = PanicLevel
- default:
- return errors.New(fmt.Sprintf("unrecognized logging level: %s", level))
- }
- h.loggerConfig.Level = lvl
- return nil
-}
-
-func (h *HoneycombEntry) WithField(key string, value interface{}) Entry {
- h.builder.AddField(key, value)
- return h
-}
-
-func (h *HoneycombEntry) WithString(key string, value string) Entry {
- return h.WithField(key, value)
-}
-
-func (h *HoneycombEntry) WithFields(fields map[string]interface{}) Entry {
- h.builder.Add(fields)
- return h
-}
-
-func (h *HoneycombEntry) Logf(f string, args ...interface{}) {
- ev := h.builder.NewEvent()
- msg := fmt.Sprintf(f, args...)
- ev.AddField("msg", msg)
- ev.Metadata = map[string]string{
- "api_host": ev.APIHost,
- "dataset": ev.Dataset,
- }
- level, ok := ev.Fields()["level"].(string)
- if !ok {
- level = "unknown"
- }
- if h.sampler != nil {
- rate := h.sampler.GetSampleRate(fmt.Sprintf(`%s:%s`, level, msg))
- ev.SampleRate = uint(rate)
- }
- ev.Send()
-}
diff --git a/logger/logger.go b/logger/logger.go
index d36bf01c5b..b70eb9ddcb 100644
--- a/logger/logger.go
+++ b/logger/logger.go
@@ -1,18 +1,18 @@
package logger
-import (
- "fmt"
- "os"
-
- "github.com/honeycombio/refinery/config"
-)
+import "github.com/sirupsen/logrus"
type Logger interface {
Debug() Entry
Info() Entry
Error() Entry
+ Fatal() Entry
+ Panic() Entry
+ Warn() Entry
// SetLevel sets the logging level (debug, info, warn, error)
SetLevel(level string) error
+
+ Init() *logrus.Logger
}
type Entry interface {
@@ -26,21 +26,6 @@ type Entry interface {
Logf(f string, args ...interface{})
}
-func GetLoggerImplementation(c config.Config) Logger {
- var logger Logger
- loggerType, err := c.GetLoggerType()
- if err != nil {
- fmt.Printf("unable to get logger type from config: %v\n", err)
- os.Exit(1)
- }
- switch loggerType {
- case "honeycomb":
- logger = &HoneycombLogger{}
- case "logrus":
- logger = &LogrusLogger{}
- default:
- fmt.Printf("unknown logger type %s. Exiting.\n", loggerType)
- os.Exit(1)
- }
- return logger
+func GetLoggerImplementation() Logger {
+ return &LogrusLogger{}
}
diff --git a/logger/logger_test.go b/logger/logger_test.go
deleted file mode 100644
index b0ed975630..0000000000
--- a/logger/logger_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build all race
-
-package logger
-
-import (
- "testing"
-
- "github.com/honeycombio/refinery/config"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestHoneycombLoggerRespectsLogLevelAfterStart(t *testing.T) {
- cfg := &config.MockConfig{GetHoneycombLoggerConfigVal: config.HoneycombLoggerConfig{}}
- hcLogger := &HoneycombLogger{
- Config: cfg,
- loggerConfig: config.HoneycombLoggerConfig{Level: WarnLevel},
- }
-
- assert.Equal(t, WarnLevel, hcLogger.loggerConfig.Level)
- err := hcLogger.Start()
- assert.Nil(t, err)
- assert.Equal(t, WarnLevel, hcLogger.loggerConfig.Level)
-}
diff --git a/logger/logrus.go b/logger/logrus.go
index 05ce9ab2be..67faeaf996 100644
--- a/logger/logrus.go
+++ b/logger/logrus.go
@@ -1,9 +1,15 @@
package logger
import (
+ "fmt"
+ "github.com/opsramp/tracing-proxy/config"
"github.com/sirupsen/logrus"
-
- "github.com/honeycombio/refinery/config"
+ "gopkg.in/natefinch/lumberjack.v2"
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "sync"
)
// LogrusLogger is a Logger implementation that sends all logs to stdout using
@@ -21,11 +27,108 @@ type LogrusEntry struct {
}
func (l *LogrusLogger) Start() error {
- l.logger = logrus.New()
l.logger.SetLevel(l.level)
+ l.logger.SetReportCaller(false) // using a hook to do the same, so avoiding additional processing here
+ l.logger.AddHook(&CallerHook{})
+
+ logrusConfig, err := l.Config.GetLogrusConfig()
+ if err != nil {
+ return err
+ }
+
+ switch logrusConfig.LogOutput {
+ case "stdout":
+ l.logger.SetOutput(os.Stdout)
+ case "stderr":
+ l.logger.SetOutput(os.Stderr)
+ case "file":
+ l.logger.SetOutput(&lumberjack.Logger{
+ Filename: logrusConfig.File.FileName,
+ MaxSize: logrusConfig.File.MaxSize,
+ MaxBackups: logrusConfig.File.MaxBackups,
+ Compress: logrusConfig.File.Compress,
+ })
+ }
+
+ switch logrusConfig.LogFormatter {
+ case "logfmt":
+ l.logger.SetFormatter(&logrus.TextFormatter{
+ DisableColors: true,
+ ForceQuote: true,
+ FullTimestamp: true,
+ DisableLevelTruncation: true,
+ QuoteEmptyFields: true,
+ FieldMap: logrus.FieldMap{
+ logrus.FieldKeyFile: "file",
+ logrus.FieldKeyTime: "timestamp",
+ logrus.FieldKeyLevel: "level",
+ logrus.FieldKeyMsg: "message",
+ logrus.FieldKeyFunc: "caller",
+ },
+ })
+ case "json":
+ l.logger.SetFormatter(&logrus.JSONFormatter{
+ FieldMap: logrus.FieldMap{
+ logrus.FieldKeyFile: "file",
+ logrus.FieldKeyTime: "timestamp",
+ logrus.FieldKeyLevel: "level",
+ logrus.FieldKeyMsg: "message",
+ logrus.FieldKeyFunc: "caller",
+ },
+ })
+ }
return nil
}
+func (l *LogrusLogger) Init() *logrus.Logger {
+ l.logger = logrus.New()
+ return l.logger
+}
+
+func (l *LogrusLogger) Panic() Entry {
+ if !l.logger.IsLevelEnabled(logrus.PanicLevel) {
+ return nullEntry
+ }
+
+ return &LogrusEntry{
+ entry: logrus.NewEntry(l.logger),
+ level: logrus.PanicLevel,
+ }
+}
+
+func (l *LogrusLogger) Fatal() Entry {
+ if !l.logger.IsLevelEnabled(logrus.FatalLevel) {
+ return nullEntry
+ }
+
+ return &LogrusEntry{
+ entry: logrus.NewEntry(l.logger),
+ level: logrus.FatalLevel,
+ }
+}
+
+func (l *LogrusLogger) Warn() Entry {
+ if !l.logger.IsLevelEnabled(logrus.WarnLevel) {
+ return nullEntry
+ }
+
+ return &LogrusEntry{
+ entry: logrus.NewEntry(l.logger),
+ level: logrus.WarnLevel,
+ }
+}
+
+func (l *LogrusLogger) Trace() Entry {
+ if !l.logger.IsLevelEnabled(logrus.TraceLevel) {
+ return nullEntry
+ }
+
+ return &LogrusEntry{
+ entry: logrus.NewEntry(l.logger),
+ level: logrus.TraceLevel,
+ }
+}
+
func (l *LogrusLogger) Debug() Entry {
if !l.logger.IsLevelEnabled(logrus.DebugLevel) {
return nullEntry
@@ -95,6 +198,14 @@ func (l *LogrusEntry) WithFields(fields map[string]interface{}) Entry {
func (l *LogrusEntry) Logf(f string, args ...interface{}) {
switch l.level {
+ case logrus.WarnLevel:
+ l.entry.Warnf(f, args...)
+ case logrus.FatalLevel:
+ l.entry.Fatalf(f, args...)
+ case logrus.PanicLevel:
+ l.entry.Panicf(f, args...)
+ case logrus.TraceLevel:
+ l.entry.Tracef(f, args...)
case logrus.DebugLevel:
l.entry.Debugf(f, args...)
case logrus.InfoLevel:
@@ -103,3 +214,56 @@ func (l *LogrusEntry) Logf(f string, args ...interface{}) {
l.entry.Errorf(f, args...)
}
}
+
+var (
+ callerInitOnce sync.Once
+ presentProjectRoot string
+)
+
+type CallerHook struct {
+}
+
+func (h *CallerHook) Fire(entry *logrus.Entry) error {
+ functionName, fileName := h.caller()
+ if fileName != "" {
+ entry.Data[logrus.FieldKeyFile] = fileName
+ }
+ if functionName != "" {
+ entry.Data[logrus.FieldKeyFunc] = functionName
+ }
+
+ return nil
+}
+
+func (h *CallerHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ logrus.InfoLevel,
+ logrus.DebugLevel,
+ }
+}
+
+func (h *CallerHook) caller() (function string, file string) {
+ callerInitOnce.Do(func() {
+ presentProjectRoot, _ = os.Getwd()
+ presentProjectRoot = path.Join(presentProjectRoot, "../")
+ })
+
+ pcs := make([]uintptr, 25)
+ _ = runtime.Callers(0, pcs)
+ frames := runtime.CallersFrames(pcs)
+
+ for next, again := frames.Next(); again; next, again = frames.Next() {
+ if !strings.Contains(next.File, "/usr/local/go/") &&
+ !strings.Contains(next.File, "logger") &&
+ !strings.Contains(next.File, "logrus") &&
+ strings.HasPrefix(next.File, presentProjectRoot) {
+ return next.Function, fmt.Sprintf("%s:%d", strings.TrimPrefix(next.File, presentProjectRoot), next.Line)
+ }
+ }
+
+ return
+}
diff --git a/logger/mock.go b/logger/mock.go
deleted file mode 100644
index 05eb1eff26..0000000000
--- a/logger/mock.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package logger
-
-import (
- "fmt"
-
- "github.com/honeycombio/refinery/config"
-)
-
-type MockLogger struct {
- Events []*MockLoggerEvent
-}
-
-type MockLoggerEvent struct {
- l *MockLogger
- level config.HoneycombLevel
- Fields map[string]interface{}
-}
-
-func (l *MockLogger) Debug() Entry {
- return &MockLoggerEvent{
- l: l,
- level: DebugLevel,
- Fields: make(map[string]interface{}),
- }
-}
-
-func (l *MockLogger) Info() Entry {
- return &MockLoggerEvent{
- l: l,
- level: InfoLevel,
- Fields: make(map[string]interface{}),
- }
-}
-
-func (l *MockLogger) Error() Entry {
- return &MockLoggerEvent{
- l: l,
- level: ErrorLevel,
- Fields: make(map[string]interface{}),
- }
-}
-
-func (l *MockLogger) SetLevel(level string) error {
- return nil
-}
-
-func (e *MockLoggerEvent) WithField(key string, value interface{}) Entry {
- e.Fields[key] = value
-
- return e
-}
-
-func (e *MockLoggerEvent) WithString(key string, value string) Entry {
- return e.WithField(key, value)
-}
-
-func (e *MockLoggerEvent) WithFields(fields map[string]interface{}) Entry {
- for k, v := range fields {
- e.Fields[k] = v
- }
-
- return e
-}
-
-func (e *MockLoggerEvent) Logf(f string, args ...interface{}) {
- msg := fmt.Sprintf(f, args...)
- switch e.level {
- case DebugLevel:
- e.WithField("debug", msg)
- case InfoLevel:
- e.WithField("info", msg)
- case ErrorLevel:
- e.WithField("error", msg)
- default:
- panic("unexpected log level")
- }
- e.l.Events = append(e.l.Events, e)
-}
diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go
deleted file mode 100644
index 3e89d635ab..0000000000
--- a/metrics/honeycomb.go
+++ /dev/null
@@ -1,351 +0,0 @@
-package metrics
-
-import (
- "context"
- "math"
- "net/http"
- "os"
- "runtime"
- "sort"
- "sync"
- "time"
-
- libhoney "github.com/honeycombio/libhoney-go"
- "github.com/honeycombio/libhoney-go/transmission"
-
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
-)
-
-type HoneycombMetrics struct {
- Config config.Config `inject:""`
- Logger logger.Logger `inject:""`
- UpstreamTransport *http.Transport `inject:"upstreamTransport"`
- Version string `inject:"version"`
-
- countersLock sync.Mutex
- counters map[string]*counter
- gaugesLock sync.Mutex
- gauges map[string]*gauge
- histogramsLock sync.Mutex
- histograms map[string]*histogram
-
- libhClient *libhoney.Client
-
- latestMemStatsLock sync.RWMutex
- latestMemStats runtime.MemStats
-
- //reportingFreq is the interval with which to report statistics
- reportingFreq int64
- reportingCancelFunc func()
-}
-
-type counter struct {
- lock sync.Mutex
- name string
- val int
-}
-
-type gauge struct {
- lock sync.Mutex
- name string
- val float64
-}
-
-type histogram struct {
- lock sync.Mutex
- name string
- vals []float64
-}
-
-func (h *HoneycombMetrics) Start() error {
- h.Logger.Debug().Logf("Starting HoneycombMetrics")
- defer func() { h.Logger.Debug().Logf("Finished starting HoneycombMetrics") }()
- mc, err := h.Config.GetHoneycombMetricsConfig()
- if err != nil {
- return err
- }
- if mc.MetricsReportingInterval < 1 {
- mc.MetricsReportingInterval = 1
- }
- h.reportingFreq = mc.MetricsReportingInterval
-
- if err = h.initLibhoney(mc); err != nil {
- return err
- }
-
- h.counters = make(map[string]*counter)
- h.gauges = make(map[string]*gauge)
- h.histograms = make(map[string]*histogram)
-
- // listen for config reloads
- h.Config.RegisterReloadCallback(h.reloadBuilder)
-
- return nil
-}
-
-func (h *HoneycombMetrics) reloadBuilder() {
- h.Logger.Debug().Logf("reloading config for honeeycomb metrics reporter")
- mc, err := h.Config.GetHoneycombMetricsConfig()
- if err != nil {
- // complain about this both to STDOUT and to the previously configured
- // honeycomb logger
- h.Logger.Error().Logf("failed to reload configs for Honeycomb metrics: %+v\n", err)
- return
- }
- h.libhClient.Close()
- // cancel the two reporting goroutines and restart them
- h.reportingCancelFunc()
- h.initLibhoney(mc)
-}
-
-func (h *HoneycombMetrics) initLibhoney(mc config.HoneycombMetricsConfig) error {
- metricsTx := &transmission.Honeycomb{
- // metrics are always sent as a single event, so don't wait for the timeout
- MaxBatchSize: 1,
- BlockOnSend: true,
- UserAgentAddition: "refinery/" + h.Version + " (metrics)",
- Transport: h.UpstreamTransport,
- }
- libhClientConfig := libhoney.ClientConfig{
- APIHost: mc.MetricsHoneycombAPI,
- APIKey: mc.MetricsAPIKey,
- Dataset: mc.MetricsDataset,
- Transmission: metricsTx,
- }
- libhClient, err := libhoney.NewClient(libhClientConfig)
- if err != nil {
- return err
- }
- h.libhClient = libhClient
-
- // add some general go metrics to every report
- // goroutines
- if hostname, err := os.Hostname(); err == nil {
- h.libhClient.AddField("hostname", hostname)
- }
- h.libhClient.AddDynamicField("num_goroutines",
- func() interface{} { return runtime.NumGoroutine() })
- ctx, cancel := context.WithCancel(context.Background())
- h.reportingCancelFunc = cancel
- go h.refreshMemStats(ctx)
- go h.readResponses(ctx)
- getAlloc := func() interface{} {
- var mem runtime.MemStats
- h.readMemStats(&mem)
- return mem.Alloc
- }
- h.libhClient.AddDynamicField("memory_inuse", getAlloc)
- startTime := time.Now()
- h.libhClient.AddDynamicField("process_uptime_seconds", func() interface{} {
- return time.Now().Sub(startTime) / time.Second
- })
- go h.reportToHoneycommb(ctx)
- return nil
-}
-
-// refreshMemStats caches memory statistics to avoid blocking sending honeycomb
-// metrics on gc pauses
-func (h *HoneycombMetrics) refreshMemStats(ctx context.Context) {
- // get memory metrics 5 times more frequently than we send metrics to make sure
- // we have relatively up to date mem statistics but not go wild and get them
- // all the time.
- // for _ = range {
- ticker := time.NewTicker(time.Duration(h.reportingFreq*1000/5) * time.Millisecond)
- for {
- select {
- case <-ticker.C:
- // Blocks if GC is running, maybe for a *looong* time.
- var mem runtime.MemStats
- runtime.ReadMemStats(&mem)
-
- h.latestMemStatsLock.Lock()
- h.latestMemStats = mem
- h.latestMemStatsLock.Unlock()
- case <-ctx.Done():
- // context canceled? we're being asked to stop this so it can be restarted.
- h.Logger.Debug().Logf("restarting honeycomb metrics refreshMemStats goroutine")
- return
- }
- }
-}
-
-// readResponses reads the responses from the libhoney responses queue and logs
-// any errors that come down it
-func (h *HoneycombMetrics) readResponses(ctx context.Context) {
- resps := h.libhClient.TxResponses()
- for {
- select {
- case resp := <-resps:
- // read response, log if there's an error
- var msg string
- var log logger.Entry
- switch {
- case resp.Err != nil:
- msg = "Metrics reporter got an error back from Honeycomb"
- log = h.Logger.Error().WithField("error", resp.Err.Error())
- case resp.StatusCode > 202:
- msg = "Metrics reporter got an unexpected status code back from Honeycomb"
- log = h.Logger.Error()
- }
- if log != nil {
- log.WithFields(map[string]interface{}{
- "status_code": resp.StatusCode,
- "body": string(resp.Body),
- "duration": resp.Duration,
- }).Logf(msg)
- }
- case <-ctx.Done():
- // bail out; we're refreshing the config and will launch a new
- // response reader.
- h.Logger.Debug().Logf("restarting honeycomb metrics read libhoney responses goroutine")
- return
- }
- }
-}
-
-// readMemStats is a drop-in replacement for runtime.ReadMemStats which won't
-// block waiting for a GC to finish.
-func (h *HoneycombMetrics) readMemStats(mem *runtime.MemStats) {
- h.latestMemStatsLock.RLock()
- defer h.latestMemStatsLock.RUnlock()
-
- *mem = h.latestMemStats
-}
-
-func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) {
- tick := time.NewTicker(time.Duration(h.reportingFreq) * time.Second)
- for {
- select {
- case <-ctx.Done():
- // context canceled? we're being asked to stop this so it can be restarted.
- return
- case <-tick.C:
- ev := h.libhClient.NewEvent()
- ev.Metadata = map[string]string{
- "api_host": ev.APIHost,
- "dataset": ev.Dataset,
- }
- h.countersLock.Lock()
- for _, count := range h.counters {
- count.lock.Lock()
- ev.AddField(count.name, count.val)
- count.val = 0
- count.lock.Unlock()
- }
- h.countersLock.Unlock()
-
- h.gaugesLock.Lock()
- for _, gauge := range h.gauges {
- gauge.lock.Lock()
- ev.AddField(gauge.name, gauge.val)
- // gauges should remain where they are until changed
- // gauge.val = 0
- gauge.lock.Unlock()
- }
- h.gaugesLock.Unlock()
-
- h.histogramsLock.Lock()
- for _, histogram := range h.histograms {
- histogram.lock.Lock()
- if len(histogram.vals) != 0 {
- sort.Float64s(histogram.vals)
- p50Index := int(math.Floor(float64(len(histogram.vals)) * 0.5))
- p95Index := int(math.Floor(float64(len(histogram.vals)) * 0.95))
- p99Index := int(math.Floor(float64(len(histogram.vals)) * 0.99))
- ev.AddField(histogram.name+"_p50", histogram.vals[p50Index])
- ev.AddField(histogram.name+"_p95", histogram.vals[p95Index])
- ev.AddField(histogram.name+"_p99", histogram.vals[p99Index])
- ev.AddField(histogram.name+"_min", histogram.vals[0])
- ev.AddField(histogram.name+"_max", histogram.vals[len(histogram.vals)-1])
- ev.AddField(histogram.name+"_avg", average(histogram.vals))
- histogram.vals = histogram.vals[:0]
- }
- histogram.lock.Unlock()
- }
- h.histogramsLock.Unlock()
-
- ev.Send()
- }
- }
-}
-
-func average(vals []float64) float64 {
- var total float64
- for _, val := range vals {
- total += val
- }
- return total / float64(len(vals))
-}
-
-func (h *HoneycombMetrics) Register(name string, metricType string) {
- switch metricType {
- case "counter":
- h.countersLock.Lock()
- defer h.countersLock.Unlock()
- // inside the lock, let's not race to create the counter
- _, ok := h.counters[name]
- if !ok {
- newCounter := &counter{
- name: name,
- }
- h.counters[name] = newCounter
- }
- case "gauge":
- h.gaugesLock.Lock()
- defer h.gaugesLock.Unlock()
- _, ok := h.gauges[name]
- if !ok {
- newGauge := &gauge{
- name: name,
- }
- h.gauges[name] = newGauge
- }
- case "histogram":
- h.histogramsLock.Lock()
- defer h.histogramsLock.Unlock()
- _, ok := h.histograms[name]
- if !ok {
- newGauge := &histogram{
- name: name,
- vals: make([]float64, 0),
- }
- h.histograms[name] = newGauge
- }
- default:
- h.Logger.Debug().Logf("unspported metric type %s", metricType)
- }
-}
-
-func (h *HoneycombMetrics) IncrementCounter(name string) {
- count, ok := h.counters[name]
- if !ok {
- h.Register(name, "counter")
- count = h.counters[name]
- }
- count.lock.Lock()
- defer count.lock.Unlock()
- count.val++
-}
-
-func (h *HoneycombMetrics) Gauge(name string, val float64) {
- gauge, ok := h.gauges[name]
- if !ok {
- h.Register(name, "gauge")
- gauge = h.gauges[name]
- }
- gauge.lock.Lock()
- defer gauge.lock.Unlock()
- gauge.val = val
-}
-
-func (h *HoneycombMetrics) Histogram(name string, obs float64) {
- histogram, ok := h.histograms[name]
- if !ok {
- h.Register(name, "histogram")
- histogram = h.histograms[name]
- }
- histogram.lock.Lock()
- defer histogram.lock.Unlock()
- histogram.vals = append(histogram.vals, obs)
-}
diff --git a/metrics/metrics.go b/metrics/metrics.go
index e40d02837b..8440629522 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -1,35 +1,89 @@
package metrics
import (
- "fmt"
- "os"
-
- "github.com/honeycombio/refinery/config"
+ "github.com/opsramp/tracing-proxy/types"
)
type Metrics interface {
// Register declares a metric; metricType should be one of counter, gauge, histogram
Register(name string, metricType string)
- IncrementCounter(name string)
- Gauge(name string, val float64)
- Histogram(name string, obs float64)
+ Increment(name string)
+ Gauge(name string, val interface{})
+ Count(name string, n interface{})
+ Histogram(name string, obs interface{})
+ RegisterWithDescriptionLabels(name string, metricType string, desc string, labels []string)
+
+ GaugeWithLabels(name string, labels map[string]string, value float64)
+ IncrementWithLabels(name string, labels map[string]string)
}
-func GetMetricsImplementation(c config.Config) Metrics {
- var metricsr Metrics
- metricsType, err := c.GetMetricsType()
- if err != nil {
- fmt.Printf("unable to get metrics type from config: %v\n", err)
- os.Exit(1)
- }
- switch metricsType {
- case "honeycomb":
- metricsr = &HoneycombMetrics{}
- case "prometheus":
- metricsr = &PromMetrics{}
+func GetMetricsImplementation(prefix string) Metrics {
+ return &OpsRampMetrics{prefix: prefix}
+}
+
+func ConvertNumeric(val interface{}) float64 {
+ switch n := val.(type) {
+ case int:
+ return float64(n)
+ case uint:
+ return float64(n)
+ case int64:
+ return float64(n)
+ case uint64:
+ return float64(n)
+ case int32:
+ return float64(n)
+ case uint32:
+ return float64(n)
+ case int16:
+ return float64(n)
+ case uint16:
+ return float64(n)
+ case int8:
+ return float64(n)
+ case uint8:
+ return float64(n)
+ case float64:
+ return n
+ case float32:
+ return float64(n)
default:
- fmt.Printf("unknown metrics type %s. Exiting.\n", metricsType)
- os.Exit(1)
+ return 0
+ }
+}
+
+func ExtractLabelsFromSpan(span *types.Span, labelToKeyMap map[string][]string) map[string]string {
+
+ labels := map[string]string{}
+
+ attributeMapKeys := []string{"spanAttributes", "resourceAttributes", "eventAttributes"}
+
+ for labelName, searchKeys := range labelToKeyMap {
+ for _, searchKey := range searchKeys {
+ // check of the higher level first
+ searchValue, exists := span.Data[searchKey]
+ if exists && searchValue != nil {
+ labels[labelName] = searchValue.(string)
+ continue
+ }
+
+ // check in the span, resource and event attributes when key is not found
+ for _, attributeKey := range attributeMapKeys {
+ if attribute, ok := span.Data[attributeKey]; ok && attribute != nil {
+ searchValue, exists = attribute.(map[string]interface{})[searchKey]
+ if exists && searchValue != nil {
+ labels[labelName] = searchValue.(string)
+ break
+ }
+ }
+ }
+
+ // if the key does not exist then set it to empty
+ if !exists {
+ labels[labelName] = ""
+ }
+ }
}
- return metricsr
+
+ return labels
}
diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go
deleted file mode 100644
index 0d4b6f0765..0000000000
--- a/metrics/metrics_test.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// +build all race
-
-package metrics
diff --git a/metrics/mock.go b/metrics/mock.go
index 496a319585..a2d6e9dbb6 100644
--- a/metrics/mock.go
+++ b/metrics/mock.go
@@ -27,19 +27,25 @@ func (m *MockMetrics) Register(name string, metricType string) {
m.Registrations[name] = metricType
}
-func (m *MockMetrics) IncrementCounter(name string) {
+func (m *MockMetrics) Increment(name string) {
m.lock.Lock()
defer m.lock.Unlock()
m.CounterIncrements[name] += 1
}
-func (m *MockMetrics) Gauge(name string, val float64) {
+func (m *MockMetrics) Gauge(name string, val interface{}) {
m.lock.Lock()
defer m.lock.Unlock()
- m.GaugeRecords[name] = val
+ m.GaugeRecords[name] = ConvertNumeric(val)
}
-func (m *MockMetrics) Histogram(name string, obs float64) {
+func (m *MockMetrics) Count(name string, val interface{}) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ m.CounterIncrements[name] += int(ConvertNumeric(val))
+}
+func (m *MockMetrics) Histogram(name string, val interface{}) {
m.lock.Lock()
defer m.lock.Unlock()
@@ -47,5 +53,5 @@ func (m *MockMetrics) Histogram(name string, obs float64) {
if !ok {
m.Histograms[name] = make([]float64, 0)
}
- m.Histograms[name] = append(m.Histograms[name], obs)
+ m.Histograms[name] = append(m.Histograms[name], ConvertNumeric(val))
}
diff --git a/metrics/nullmetrics.go b/metrics/nullmetrics.go
index cc0985076a..18d7eb5099 100644
--- a/metrics/nullmetrics.go
+++ b/metrics/nullmetrics.go
@@ -7,6 +7,7 @@ type NullMetrics struct{}
func (n *NullMetrics) Start() {}
func (n *NullMetrics) Register(name string, metricType string) {}
-func (n *NullMetrics) IncrementCounter(name string) {}
-func (n *NullMetrics) Gauge(name string, val float64) {}
-func (n *NullMetrics) Histogram(name string, obs float64) {}
+func (n *NullMetrics) Increment(name string) {}
+func (n *NullMetrics) Gauge(name string, val interface{}) {}
+func (n *NullMetrics) Count(name string, val interface{}) {}
+func (n *NullMetrics) Histogram(name string, obs interface{}) {}
diff --git a/metrics/opsramp.go b/metrics/opsramp.go
new file mode 100644
index 0000000000..354ecb7387
--- /dev/null
+++ b/metrics/opsramp.go
@@ -0,0 +1,667 @@
+package metrics
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/golang/snappy"
+ "github.com/gorilla/mux"
+ "github.com/prometheus/client_golang/prometheus/collectors"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ io_prometheus_client "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/prompb"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/gogo/protobuf/proto"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+)
+
+const (
+ missingMetricsWriteScope = "auth token provided not not have metrics:write scope"
+)
+
+var (
+ muxer *mux.Router
+ server *http.Server
+ serverMut sync.Mutex
+ hostname string
+)
+
+func init() {
+ muxer = mux.NewRouter()
+
+ hostname, _ = os.Hostname()
+}
+
+type OpsRampMetrics struct {
+ Config config.Config `inject:""`
+ Logger logger.Logger `inject:""`
+ // metrics keeps a record of all the registered metrics so that we can increment
+ // them by name
+ metrics map[string]interface{}
+ lock sync.RWMutex
+
+ Client http.Client
+
+ apiEndpoint string
+ tenantID string
+ re *regexp.Regexp
+ prefix string
+
+ authTokenEndpoint string
+ apiKey string
+ apiSecret string
+ oAuthToken *OpsRampAuthTokenResponse
+
+ promRegistry *prometheus.Registry
+}
+
+func (p *OpsRampMetrics) Start() error {
+ p.Logger.Debug().Logf("Starting OpsRampMetrics")
+ defer func() { p.Logger.Debug().Logf("Finished starting OpsRampMetrics") }()
+
+ metricsConfig := p.Config.GetMetricsConfig()
+
+ p.metrics = make(map[string]interface{})
+
+ // Create non-global registry.
+ p.promRegistry = prometheus.NewRegistry()
+
+ // Add go runtime metrics and process collectors to default metrics prefix
+ if p.prefix == "" {
+ p.promRegistry.MustRegister(
+ collectors.NewGoCollector(),
+ collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
+ )
+ }
+
+ listenURI := "/metrics"
+ if p.prefix != "" {
+ listenURI = fmt.Sprintf("/metrics/%s", strings.TrimSpace(p.prefix))
+ }
+ muxer.Handle(listenURI, promhttp.HandlerFor(
+ p.promRegistry,
+ promhttp.HandlerOpts{Registry: p.promRegistry, Timeout: 10 * time.Second},
+ ),
+ )
+ p.Logger.Info().Logf("registered metrics at %s for prefix: %s", listenURI, p.prefix)
+
+ if server != nil {
+ err := server.Shutdown(context.Background())
+ if err != nil {
+ p.Logger.Error().Logf("metrics server shutdown: %v", err)
+ }
+ }
+ serverMut.Lock()
+ server = &http.Server{
+ Addr: metricsConfig.ListenAddr,
+ Handler: muxer,
+ ReadHeaderTimeout: 10 * time.Second,
+ }
+ go func() {
+ defer serverMut.Unlock()
+ if err := server.ListenAndServe(); err != http.ErrServerClosed {
+ p.Logger.Error().Logf("%v", err)
+ }
+ }()
+
+ if p.Config.GetSendMetricsToOpsRamp() {
+ go func() {
+ metricsTicker := time.NewTicker(time.Duration(metricsConfig.ReportingInterval) * time.Second)
+ defer metricsTicker.Stop()
+ p.Populate()
+
+ // populating the oAuth Token Initially
+ err := p.RenewOAuthToken()
+ if err != nil {
+ p.Logger.Error().Logf("error while initializing oAuth Token Err: %v", err)
+ }
+
+ for range metricsTicker.C {
+ statusCode, err := p.Push()
+ if err != nil {
+ p.Logger.Error().Logf("error while pushing metrics with statusCode: %d and Error: %v", statusCode, err)
+ if err.Error() == missingMetricsWriteScope {
+ p.Logger.Info().Logf("renewing auth token since the existing token is missing metrics:write scope")
+ err := p.RenewOAuthToken()
+ if err != nil {
+ p.Logger.Error().Logf("error while initializing oAuth Token Err: %v", err)
+ }
+ }
+ }
+ }
+ }()
+ }
+
+ return nil
+}
+
+// Register takes a name and a metric type. The type should be one of "counter",
+// "gauge", or "histogram"
+func (p *OpsRampMetrics) Register(name string, metricType string) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ newMetric, exists := p.metrics[name]
+
+ // don't attempt to add the metric again as this will cause a panic
+ if exists {
+ return
+ }
+
+ switch metricType {
+ case "counter":
+ newMetric = promauto.With(p.promRegistry).NewCounter(prometheus.CounterOpts{
+ Name: name,
+ Namespace: p.prefix,
+ Help: name,
+ })
+ case "gauge":
+ newMetric = promauto.With(p.promRegistry).NewGauge(prometheus.GaugeOpts{
+ Name: name,
+ Namespace: p.prefix,
+ Help: name,
+ })
+ case "histogram":
+ newMetric = promauto.With(p.promRegistry).NewHistogram(prometheus.HistogramOpts{
+ Name: name,
+ Namespace: p.prefix,
+ Help: name,
+ // This is an attempt at a usable set of buckets for a wide range of metrics
+ // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous
+ Buckets: prometheus.ExponentialBuckets(1, 4, 16),
+ })
+ }
+
+ p.metrics[name] = newMetric
+}
+
+// RegisterWithDescriptionLabels takes a name, a metric type, description, labels. The type should be one of "counter",
+// "gauge", or "histogram"
+func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType string, desc string, labels []string) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ newMetric, exists := p.metrics[name]
+
+ // don't attempt to add the metric again as this will cause a panic
+ if exists {
+ return
+ }
+
+ switch metricType {
+ case "counter":
+ newMetric = promauto.With(p.promRegistry).NewCounterVec(prometheus.CounterOpts{
+ Name: name,
+ Namespace: p.prefix,
+ Help: desc,
+ }, labels)
+ case "gauge":
+ newMetric = promauto.With(p.promRegistry).NewGaugeVec(
+ prometheus.GaugeOpts{
+ Name: name,
+ Namespace: p.prefix,
+ Help: desc,
+ },
+ labels)
+ case "histogram":
+ newMetric = promauto.With(p.promRegistry).NewHistogramVec(prometheus.HistogramOpts{
+ Name: name,
+ Namespace: p.prefix,
+ Help: desc,
+ // This is an attempt at a usable set of buckets for a wide range of metrics
+ // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous
+ Buckets: prometheus.ExponentialBuckets(1, 4, 16),
+ }, labels)
+ }
+
+ p.metrics[name] = newMetric
+}
+
+func (p *OpsRampMetrics) Increment(name string) {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ if counterInterface, ok := p.metrics[name]; ok {
+ if counter, ok := counterInterface.(prometheus.Counter); ok {
+ counter.Inc()
+ }
+ }
+}
+func (p *OpsRampMetrics) Count(name string, n interface{}) {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ if counterInterface, ok := p.metrics[name]; ok {
+ if counter, ok := counterInterface.(prometheus.Counter); ok {
+ counter.Add(ConvertNumeric(n))
+ }
+ }
+}
+func (p *OpsRampMetrics) Gauge(name string, val interface{}) {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ if gaugeInterface, ok := p.metrics[name]; ok {
+ if gauge, ok := gaugeInterface.(prometheus.Gauge); ok {
+ gauge.Set(ConvertNumeric(val))
+ }
+ }
+}
+func (p *OpsRampMetrics) Histogram(name string, obs interface{}) {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ if histInterface, ok := p.metrics[name]; ok {
+ if hist, ok := histInterface.(prometheus.Histogram); ok {
+ hist.Observe(ConvertNumeric(obs))
+ }
+ }
+}
+
+func (p *OpsRampMetrics) GaugeWithLabels(name string, labels map[string]string, value float64) {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ if gaugeInterface, ok := p.metrics[name]; ok {
+ if gaugeVec, ok := gaugeInterface.(*prometheus.GaugeVec); ok {
+ gaugeVec.With(labels).Set(value)
+ }
+ }
+}
+
+func (p *OpsRampMetrics) IncrementWithLabels(name string, labels map[string]string) {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ if gaugeInterface, ok := p.metrics[name]; ok {
+ if gaugeVec, ok := gaugeInterface.(*prometheus.CounterVec); ok {
+ gaugeVec.With(labels).Inc()
+ }
+ }
+}
+
+type OpsRampAuthTokenResponse struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ ExpiresIn int `json:"expires_in"`
+ Scope string `json:"scope"`
+}
+
+func (p *OpsRampMetrics) Populate() {
+ metricsConfig := p.Config.GetMetricsConfig()
+ authConfig := p.Config.GetAuthConfig()
+ proxyConfig := p.Config.GetProxyConfig()
+
+ p.apiEndpoint = metricsConfig.OpsRampAPI
+
+ p.authTokenEndpoint = authConfig.Endpoint
+ p.apiKey = authConfig.Key
+ p.apiSecret = authConfig.Secret
+ p.tenantID = authConfig.TenantId
+
+ // Creating Regex for a list of metrics
+ regexString := ".*" // the default value is to take everything
+ if len(metricsConfig.MetricsList) >= 1 {
+ regexString = metricsConfig.MetricsList[0]
+ for index := 0; index < len(metricsConfig.MetricsList); index++ {
+ regexString = fmt.Sprintf("%s|%s", regexString, metricsConfig.MetricsList[index])
+ }
+ }
+ p.re = regexp.MustCompile(regexString)
+
+ proxyURL := ""
+ if proxyConfig.Host != "" && proxyConfig.Protocol != "" {
+ proxyURL = fmt.Sprintf("%s://%s:%d/", proxyConfig.Protocol, proxyConfig.Host, proxyConfig.Port)
+ if proxyConfig.Username != "" && proxyConfig.Password != "" {
+ proxyURL = fmt.Sprintf("%s://%s:%s@%s:%d", proxyConfig.Protocol, proxyConfig.Username, proxyConfig.Password, proxyConfig.Host, proxyConfig.Port)
+ p.Logger.Debug().Logf("Using Authentication for ProxyConfiguration Communication for Metrics")
+ }
+ }
+
+ p.Client = http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ MaxIdleConns: 10,
+ MaxConnsPerHost: 10,
+ IdleConnTimeout: 5 * time.Minute,
+ },
+ Timeout: time.Duration(240) * time.Second,
+ }
+ if proxyURL != "" {
+ proxyURL, err := url.Parse(proxyURL)
+ if err != nil {
+ p.Logger.Error().Logf("skipping proxy err: %v", err)
+ } else {
+ p.Client = http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyURL(proxyURL),
+ MaxIdleConns: 10,
+ MaxConnsPerHost: 10,
+ IdleConnTimeout: 5 * time.Minute,
+ },
+ Timeout: time.Duration(240) * time.Second,
+ }
+ }
+ }
+}
+
+func ConvertLabelsToMap(labels []prompb.Label) map[string]string {
+ labelMap := make(map[string]string)
+ for _, label := range labels {
+ labelMap[label.Name] = label.Value
+ }
+ return labelMap
+}
+
+func (p *OpsRampMetrics) calculateTraceOperationError(metricFamilySlice []*io_prometheus_client.MetricFamily) {
+ var labelMap map[string]string
+ uniqueLabelsMap := make(map[string][]prompb.Label)
+ uniqueFailedMap := make(map[string]float64)
+ uniqueSpansMap := make(map[string]float64)
+ for _, metricFamily := range metricFamilySlice {
+ if !p.re.MatchString(metricFamily.GetName()) {
+ continue
+ }
+ if metricFamily.GetName() == "trace_operations_failed" || metricFamily.GetName() == "trace_spans_count" {
+ for _, metric := range metricFamily.GetMetric() {
+ var labels []prompb.Label
+ for _, label := range metric.GetLabel() {
+ labels = append(labels, prompb.Label{
+ Name: label.GetName(),
+ Value: label.GetValue(),
+ })
+ }
+ key := "trace_operations_failed&trace_spans_count&"
+ labelSlice := metric.GetLabel()
+ sort.Slice(labelSlice, func(i, j int) bool {
+ return labelSlice[i].GetName()+labelSlice[i].GetValue() > labelSlice[j].GetName()+labelSlice[i].GetValue()
+ })
+ for _, label := range labelSlice {
+ key += label.GetName() + label.GetValue()
+ }
+ if metricFamily.GetName() == "trace_operations_failed" {
+ uniqueFailedMap[key] = *metric.Counter.Value
+ } else {
+ uniqueSpansMap[key] = *metric.Counter.Value
+ }
+ uniqueLabelsMap[key] = labels
+ }
+ }
+ }
+ for key, _ := range uniqueLabelsMap {
+ labelMap = ConvertLabelsToMap(uniqueLabelsMap[key])
+ p.GaugeWithLabels("trace_operations_error", labelMap, uniqueFailedMap[key]/uniqueSpansMap[key])
+ }
+}
+
+func (p *OpsRampMetrics) Push() (int, error) {
+ metricFamilySlice, err := p.promRegistry.Gather()
+ if err != nil {
+ return -1, err
+ }
+
+ p.calculateTraceOperationError(metricFamilySlice)
+
+ metricFamilySlice, err = p.promRegistry.Gather()
+ if err != nil {
+ return -1, err
+ }
+
+ presentTime := time.Now().UnixMilli()
+
+ var timeSeries []prompb.TimeSeries
+
+ for _, metricFamily := range metricFamilySlice {
+ if !p.re.MatchString(metricFamily.GetName()) {
+ continue
+ }
+ for _, metric := range metricFamily.GetMetric() {
+ labels := []prompb.Label{
+ {
+ Name: model.JobLabel,
+ Value: p.prefix,
+ },
+ {
+ Name: "hostname",
+ Value: hostname,
+ },
+ }
+ for _, label := range metric.GetLabel() {
+ labels = append(labels, prompb.Label{
+ Name: label.GetName(),
+ Value: label.GetValue(),
+ })
+ }
+
+ switch metricFamily.GetType() {
+ case io_prometheus_client.MetricType_COUNTER:
+ timeSeries = append(timeSeries, prompb.TimeSeries{
+ Labels: append(labels, prompb.Label{
+ Name: model.MetricNameLabel,
+ Value: metricFamily.GetName(),
+ }),
+ Samples: []prompb.Sample{
+ {
+ Value: metric.GetCounter().GetValue(),
+ Timestamp: presentTime,
+ },
+ },
+ })
+ case io_prometheus_client.MetricType_GAUGE:
+ timeSeries = append(timeSeries, prompb.TimeSeries{
+ Labels: append(labels, prompb.Label{
+ Name: model.MetricNameLabel,
+ Value: metricFamily.GetName(),
+ }),
+ Samples: []prompb.Sample{
+ {
+ Value: metric.GetGauge().GetValue(),
+ Timestamp: presentTime,
+ },
+ },
+ })
+ case io_prometheus_client.MetricType_HISTOGRAM:
+ // samples for all the buckets
+ for _, bucket := range metric.GetHistogram().GetBucket() {
+ timeSeries = append(timeSeries, prompb.TimeSeries{
+ Labels: append(labels, []prompb.Label{
+ {
+ Name: model.MetricNameLabel,
+ Value: metricFamily.GetName(),
+ },
+ {
+ Name: model.BucketLabel,
+ Value: fmt.Sprintf("%v", bucket.GetUpperBound()),
+ },
+ }...),
+ Samples: []prompb.Sample{
+ {
+ Value: float64(bucket.GetCumulativeCount()),
+ Timestamp: presentTime,
+ },
+ },
+ })
+ }
+ // samples for count and sum
+ timeSeries = append(timeSeries, prompb.TimeSeries{
+ Labels: append(labels, prompb.Label{
+ Name: model.MetricNameLabel,
+ Value: fmt.Sprintf("%s_sum", metricFamily.GetName()),
+ }),
+ Samples: []prompb.Sample{
+ {
+ Value: metric.GetHistogram().GetSampleSum(),
+ Timestamp: presentTime,
+ },
+ },
+ })
+ timeSeries = append(timeSeries, prompb.TimeSeries{
+ Labels: append(labels, prompb.Label{
+ Name: model.MetricNameLabel,
+ Value: fmt.Sprintf("%s_count", metricFamily.GetName()),
+ }),
+ Samples: []prompb.Sample{
+ {
+ Value: float64(metric.GetHistogram().GetSampleCount()),
+ Timestamp: presentTime,
+ },
+ },
+ })
+ case io_prometheus_client.MetricType_SUMMARY:
+ // samples for all the quantiles
+ for _, quantile := range metric.GetSummary().GetQuantile() {
+ timeSeries = append(timeSeries, prompb.TimeSeries{
+ Labels: append(labels, []prompb.Label{
+ {
+ Name: model.MetricNameLabel,
+ Value: metricFamily.GetName(),
+ },
+ {
+ Name: model.QuantileLabel,
+ Value: fmt.Sprintf("%v", quantile.GetQuantile()),
+ },
+ }...),
+ Samples: []prompb.Sample{
+ {
+ Value: quantile.GetValue(),
+ Timestamp: presentTime,
+ },
+ },
+ })
+ }
+ // samples for count and sum
+ timeSeries = append(timeSeries, prompb.TimeSeries{
+ Labels: append(labels, prompb.Label{
+ Name: model.MetricNameLabel,
+ Value: fmt.Sprintf("%s_sum", metricFamily.GetName()),
+ }),
+ Samples: []prompb.Sample{
+ {
+ Value: metric.GetSummary().GetSampleSum(),
+ Timestamp: presentTime,
+ },
+ },
+ })
+ timeSeries = append(timeSeries, prompb.TimeSeries{
+ Labels: append(labels, prompb.Label{
+ Name: model.MetricNameLabel,
+ Value: fmt.Sprintf("%s_count", metricFamily.GetName()),
+ }),
+ Samples: []prompb.Sample{
+ {
+ Value: float64(metric.GetSummary().GetSampleCount()),
+ Timestamp: presentTime,
+ },
+ },
+ })
+ }
+ }
+ }
+
+ request := prompb.WriteRequest{Timeseries: timeSeries}
+
+ out, err := proto.Marshal(&request)
+
+ if err != nil {
+ return -1, err
+ }
+
+ compressed := snappy.Encode(nil, out)
+
+ URL := fmt.Sprintf("%s/metricsql/api/v7/tenants/%s/metrics", strings.TrimRight(p.apiEndpoint, "/"), p.tenantID)
+
+ req, err := http.NewRequest(http.MethodPost, URL, bytes.NewBuffer(compressed))
+ if err != nil {
+ return -1, err
+ }
+
+ req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
+ req.Header.Set("Content-Encoding", "snappy")
+ req.Header.Set("Content-Type", "application/x-protobuf")
+
+ if !strings.Contains(p.oAuthToken.Scope, "metrics:write") {
+ return -1, fmt.Errorf(missingMetricsWriteScope)
+ }
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken))
+
+ resp, err := p.Send(req)
+ if err != nil {
+ return -1, err
+ }
+ defer resp.Body.Close()
+ // Depending on the version and configuration of the PGW, StatusOK or StatusAccepted may be returned.
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ p.Logger.Error().Logf("failed to parse response body Err: %v", err)
+ }
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
+ return resp.StatusCode, fmt.Errorf("unexpected status code %d while pushing: %s", resp.StatusCode, body)
+ }
+ p.Logger.Debug().Logf("metrics push response: %v", string(body))
+
+ return resp.StatusCode, nil
+}
+
+func (p *OpsRampMetrics) RenewOAuthToken() error {
+ p.oAuthToken = new(OpsRampAuthTokenResponse)
+
+ endpoint := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(p.authTokenEndpoint, "/"))
+
+ requestBody := strings.NewReader("client_id=" + p.apiKey + "&client_secret=" + p.apiSecret + "&grant_type=client_credentials")
+
+ req, err := http.NewRequest(http.MethodPost, endpoint, requestBody)
+ if err != nil {
+ return err
+ }
+ req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+ req.Header.Add("Accept", "application/json")
+ req.Header.Set("Connection", "close")
+
+ resp, err := p.Client.Do(req)
+ if err != nil {
+ return err
+ }
+
+ respBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ err = json.Unmarshal(respBody, p.oAuthToken)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *OpsRampMetrics) Send(request *http.Request) (*http.Response, error) {
+ response, err := p.Client.Do(request)
+ if err == nil && response != nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) {
+ return response, nil
+ }
+ if response != nil && response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token
+ p.RenewOAuthToken()
+ request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken))
+ response, err = p.Client.Do(request)
+ if err == nil && response != nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) {
+ return response, nil
+ }
+ }
+ return response, err
+}
diff --git a/metrics/prometheus.go b/metrics/prometheus.go
deleted file mode 100644
index 88fff63425..0000000000
--- a/metrics/prometheus.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package metrics
-
-import (
- "net/http"
- "sync"
-
- "github.com/gorilla/mux"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
- "github.com/prometheus/client_golang/prometheus/promhttp"
-
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
-)
-
-type PromMetrics struct {
- Config config.Config `inject:""`
- Logger logger.Logger `inject:""`
- // metrics keeps a record of all the registered metrics so we can increment
- // them by name
- metrics map[string]interface{}
- lock sync.Mutex
-}
-
-func (p *PromMetrics) Start() error {
- p.Logger.Debug().Logf("Starting PromMetrics")
- defer func() { p.Logger.Debug().Logf("Finished starting PromMetrics") }()
- pc, err := p.Config.GetPrometheusMetricsConfig()
- if err != nil {
- return err
- }
-
- p.metrics = make(map[string]interface{})
-
- muxxer := mux.NewRouter()
-
- muxxer.Handle("/metrics", promhttp.Handler())
- go http.ListenAndServe(pc.MetricsListenAddr, muxxer)
- return nil
-}
-
-// Register takes a name and a metric type. The type should be one of "counter",
-// "gauge", or "histogram"
-func (p *PromMetrics) Register(name string, metricType string) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- newmet, exists := p.metrics[name]
-
- // don't attempt to add the metric again as this will cause a panic
- if exists {
- return
- }
-
- switch metricType {
- case "counter":
- newmet = promauto.NewCounter(prometheus.CounterOpts{
- Name: name,
- Help: name,
- })
- case "gauge":
- newmet = promauto.NewGauge(prometheus.GaugeOpts{
- Name: name,
- Help: name,
- })
- case "histogram":
- newmet = promauto.NewHistogram(prometheus.HistogramOpts{
- Name: name,
- Help: name,
- })
- }
-
- p.metrics[name] = newmet
-}
-
-func (p *PromMetrics) IncrementCounter(name string) {
- if counterIface, ok := p.metrics[name]; ok {
- if counter, ok := counterIface.(prometheus.Counter); ok {
- counter.Inc()
- }
- }
-}
-func (p *PromMetrics) Gauge(name string, val float64) {
- if gaugeIface, ok := p.metrics[name]; ok {
- if gauge, ok := gaugeIface.(prometheus.Gauge); ok {
- gauge.Set(val)
- }
- }
-}
-func (p *PromMetrics) Histogram(name string, obs float64) {
- if histIface, ok := p.metrics[name]; ok {
- if hist, ok := histIface.(prometheus.Histogram); ok {
- hist.Observe(obs)
- }
- }
-}
diff --git a/metrics/prometheus_test.go b/metrics/prometheus_test.go
deleted file mode 100644
index b0c093caf5..0000000000
--- a/metrics/prometheus_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build all race
-
-package metrics
-
-import (
- "testing"
-
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/stretchr/testify/assert"
-)
-
-func TestMultipleRegistrations(t *testing.T) {
- p := &PromMetrics{
- Logger: &logger.MockLogger{},
- Config: &config.MockConfig{},
- }
-
- err := p.Start()
-
- assert.NoError(t, err)
-
- p.Register("test", "counter")
-
- p.Register("test", "counter")
-}
diff --git a/refinery.service b/refinery.service
index 01de188b20..331dd5e8e2 100644
--- a/refinery.service
+++ b/refinery.service
@@ -1,9 +1,9 @@
[Unit]
-Description=Refinery Honeycomb Trace-Aware Sampling Proxy
+Description=tracing-proxy Honeycomb Trace-Aware Sampling Proxy
After=network.target
[Service]
-ExecStart=/usr/bin/refinery -c /etc/refinery/refinery.toml -r /etc/refinery/rules.toml
+ExecStart=/usr/bin/tracing-proxy -c /etc/tracing-proxy/tracing-proxy.toml -r /etc/tracing-proxy/rules.toml
KillMode=process
Restart=on-failure
User=honeycomb
@@ -11,4 +11,4 @@ Group=honeycomb
LimitNOFILE=infinity
[Install]
-Alias=refinery refinery.service
+Alias=tracing-proxy tracing-proxy.service
diff --git a/refinery.upstart b/refinery.upstart
index 7b3b9e817e..e66e885aee 100644
--- a/refinery.upstart
+++ b/refinery.upstart
@@ -1,12 +1,12 @@
-# Upstart job for Refinery, the Honeycomb Trace-Aware Sampling Proxy
-# https://honeycomb.io/
+# Upstart job for tracing-proxy, the Honeycomb Trace-Aware Sampling Proxy
+# https://jirs5/
-description "Refinery Daemon"
-author "Ben Hartshorne "
+description "tracing-proxy Daemon"
+author "Ben Hartshorne "
start on runlevel [2345]
stop on runlevel [!2345]
respawn
-exec su -s /bin/sh -c 'exec "$0" "$@"' honeycomb -- /usr/bin/refinery -c /etc/refinery/refinery.toml -r /etc/refinery/rules.toml
+exec su -s /bin/sh -c 'exec "$0" "$@"' honeycomb -- /usr/bin/tracing-proxy -c /etc/tracing-proxy/tracing-proxy.toml -r /etc/tracing-proxy/rules.toml
diff --git a/route/errors.go b/route/errors.go
index 5e347eccff..bd42f72c0e 100644
--- a/route/errors.go
+++ b/route/errors.go
@@ -4,6 +4,8 @@ import (
"fmt"
"net/http"
"runtime/debug"
+
+ husky "github.com/opsramp/husky/otlp"
)
type handlerError struct {
@@ -33,6 +35,7 @@ var (
ErrUpstreamUnavailable = handlerError{nil, "upstream target unavailable", http.StatusServiceUnavailable, true, true}
ErrReqToEvent = handlerError{nil, "failed to parse event", http.StatusBadRequest, false, true}
ErrBatchToEvent = handlerError{nil, "failed to parse event within batch", http.StatusBadRequest, false, true}
+ ErrInvalidContentType = handlerError{nil, husky.ErrInvalidContentType.Message, husky.ErrInvalidContentType.HTTPStatusCode, false, true}
)
func (r *Router) handlerReturnWithError(w http.ResponseWriter, he handlerError, err error) {
@@ -66,7 +69,7 @@ func (r *Router) handlerReturnWithError(w http.ResponseWriter, he handlerError,
errmsg = ErrGenericMessage
}
- jsonErrMsg := []byte(`{"source":"refinery","error":"` + errmsg + `"}`)
+ jsonErrMsg := []byte(`{"source":"tracing-proxy","error":"` + errmsg + `"}`)
w.Write(jsonErrMsg)
}
diff --git a/route/errors_test.go b/route/errors_test.go
deleted file mode 100644
index 4351dec469..0000000000
--- a/route/errors_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build all race
-
-package route
-
-import (
- "errors"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/honeycombio/refinery/logger"
-)
-
-func TestHandlerReturnWithError(t *testing.T) {
- var w *httptest.ResponseRecorder
- var l *logger.MockLogger
- var router *Router
-
- l = &logger.MockLogger{}
- router = &Router{
- Logger: l,
- }
-
- w = httptest.NewRecorder()
- http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- router.handlerReturnWithError(w, ErrCaughtPanic, errors.New("oh no"))
- }).ServeHTTP(w, &http.Request{})
-
- if len(l.Events) != 1 {
- t.Fail()
- }
-
- e := l.Events[0]
-
- if _, ok := e.Fields["error.stack_trace"]; !ok {
- t.Error("expected fields to contain error.stack_trace", e.Fields)
- }
-}
diff --git a/route/middleware.go b/route/middleware.go
index e28eae0a29..4024f317cb 100644
--- a/route/middleware.go
+++ b/route/middleware.go
@@ -2,14 +2,13 @@ package route
import (
"context"
- "errors"
"fmt"
"math/rand"
"net/http"
"time"
"github.com/gorilla/mux"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/types"
)
// for generating request IDs
@@ -17,35 +16,22 @@ func init() {
rand.Seed(time.Now().UnixNano())
}
-func (r *Router) apiKeyChecker(next http.Handler) http.Handler {
+func (r *Router) queryTokenChecker(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- apiKey := req.Header.Get(types.APIKeyHeader)
- if apiKey == "" {
- apiKey = req.Header.Get(types.APIKeyHeaderShort)
- }
- if apiKey == "" {
- err := errors.New("no " + types.APIKeyHeader + " header found from within authing middleware")
+ requiredToken := r.Config.GetQueryAuthToken()
+ if requiredToken == "" {
+ err := fmt.Errorf("/query endpoint is not authorized for use (specify QueryAuthToken in config)")
r.handlerReturnWithError(w, ErrAuthNeeded, err)
- return
}
- allowedKeys, err := r.Config.GetAPIKeys()
- if err != nil {
- r.handlerReturnWithError(w, ErrConfigReadFailed, err)
+
+ token := req.Header.Get(types.QueryTokenHeader)
+ if token == requiredToken {
+ // if they're equal (including both blank) we're good
+ next.ServeHTTP(w, req)
return
}
- for _, key := range allowedKeys {
- if key == "*" {
- // all keys are allowed, it's all good
- next.ServeHTTP(w, req)
- return
- }
- if apiKey == key {
- // we're in the whitelist, it's all good
- next.ServeHTTP(w, req)
- return
- }
- }
- err = errors.New(fmt.Sprintf("api key %s not found in list of authed keys", apiKey))
+
+ err := fmt.Errorf("token %s found in %s not authorized for query", token, types.QueryTokenHeader)
r.handlerReturnWithError(w, ErrAuthNeeded, err)
})
}
@@ -78,7 +64,7 @@ func (r *Router) panicCatcher(next http.Handler) http.Handler {
})
}
-// requestLogger logs one line debug per request that comes through Refinery
+// requestLogger logs one line debug per request that comes through tracing-proxy
func (r *Router) requestLogger(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
arrivalTime := time.Now()
@@ -104,15 +90,12 @@ func (r *Router) requestLogger(next http.Handler) http.Handler {
}
func (r *Router) setResponseHeaders(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
-
- // Set content type header early so it's before any calls to WriteHeader
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { // Set content type header early so it's before any calls to WriteHeader
w.Header().Set("Content-Type", "application/json")
// Allow cross-origin API operation from browser js
w.Header().Set("Access-Control-Allow-Origin", "*")
next.ServeHTTP(w, req)
-
})
}
diff --git a/route/mock.go b/route/mock.go
new file mode 100644
index 0000000000..a2c8abd9fa
--- /dev/null
+++ b/route/mock.go
@@ -0,0 +1,17 @@
+package route
+
+import "google.golang.org/grpc/health/grpc_health_v1"
+
+type MockGRPCHealthWatchServer struct {
+ grpc_health_v1.Health_WatchServer
+ sentMessages []*grpc_health_v1.HealthCheckResponse
+}
+
+func (m *MockGRPCHealthWatchServer) Send(msg *grpc_health_v1.HealthCheckResponse) error {
+ m.sentMessages = append(m.sentMessages, msg)
+ return nil
+}
+
+func (m *MockGRPCHealthWatchServer) GetSentMessages() []*grpc_health_v1.HealthCheckResponse {
+ return m.sentMessages
+}
diff --git a/route/otlp_trace.go b/route/otlp_trace.go
new file mode 100644
index 0000000000..a1dfe2dc9a
--- /dev/null
+++ b/route/otlp_trace.go
@@ -0,0 +1,199 @@
+package route
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/opsramp/libtrace-go/proto/proxypb"
+ "github.com/opsramp/libtrace-go/transmission"
+ "net/http"
+ "time"
+
+ huskyotlp "github.com/opsramp/husky/otlp"
+ "github.com/opsramp/tracing-proxy/types"
+
+ collectortrace "github.com/opsramp/husky/proto/otlp/collector/trace/v1"
+)
+
+func (r *Router) postOTLP(w http.ResponseWriter, req *http.Request) {
+ ri := huskyotlp.GetRequestInfoFromHttpHeaders(req.Header)
+
+ if ri.ApiTenantId == "" {
+ ri.ApiTenantId, _ = r.Config.GetTenantId()
+ }
+ if ri.Dataset == "" {
+ ri.Dataset, _ = r.Config.GetDataset()
+ }
+
+ result, err := huskyotlp.TranslateTraceRequestFromReader(req.Body, ri)
+ if err != nil {
+ r.handlerReturnWithError(w, ErrUpstreamFailed, err)
+ return
+ }
+
+ if err := processTraceRequest(req.Context(), r, result.Batches, ri.Dataset, ri.ApiToken, ri.ApiTenantId); err != nil {
+ r.handlerReturnWithError(w, ErrUpstreamFailed, err)
+ }
+}
+
+func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) {
+ ri := huskyotlp.GetRequestInfoFromGrpcMetadata(ctx)
+
+ if ri.ApiTenantId == "" {
+ ri.ApiTenantId, _ = r.Config.GetTenantId()
+ }
+ if ri.Dataset == "" {
+ ri.Dataset, _ = r.Config.GetDataset()
+ }
+
+ r.Metrics.Increment(r.incomingOrPeer + "_router_batch")
+
+ result, err := huskyotlp.TranslateTraceRequest(req, ri)
+ if err != nil {
+ return nil, huskyotlp.AsGRPCError(err)
+ }
+
+ if err := processTraceRequest(ctx, r, result.Batches, ri.Dataset, ri.ApiToken, ri.ApiTenantId); err != nil {
+ return nil, huskyotlp.AsGRPCError(err)
+ }
+
+ return &collectortrace.ExportTraceServiceResponse{}, nil
+}
+
+func processTraceRequest(
+ ctx context.Context,
+ router *Router,
+ batches []huskyotlp.Batch,
+ datasetName string,
+ token string,
+ tenantID string) error {
+ var requestID types.RequestIDContextKey
+ apiHost, err := router.Config.GetOpsrampAPI()
+ if err != nil {
+ router.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch")
+ return err
+ }
+ datasetName, err = router.Config.GetDataset()
+ if err != nil {
+ router.Logger.Error().Logf("Unable to retrieve DataSet from config while processing OTLP batch")
+ return err
+ }
+
+ for _, batch := range batches {
+ for _, ev := range batch.Events {
+ event := &types.Event{
+ Context: ctx,
+ APIHost: apiHost,
+ APIToken: token,
+ APITenantId: tenantID,
+ Dataset: datasetName,
+ Environment: datasetName,
+ SampleRate: uint(ev.SampleRate),
+ Timestamp: ev.Timestamp,
+ Data: ev.Attributes,
+ }
+ if err = router.processEvent(event, requestID); err != nil {
+ router.Logger.Error().Logf("Error processing event: " + err.Error())
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTraceProxyServiceRequest) (*proxypb.ExportTraceProxyServiceResponse, error) {
+
+ r.Logger.Debug().Logf("Received Trace data from peer")
+ r.Metrics.Increment(r.incomingOrPeer + "_router_batch")
+
+ apiHost, err := r.Config.GetOpsrampAPI()
+ if err != nil {
+ r.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch")
+ return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get apihost", Status: "Failed"}, nil
+ }
+ dataset, _ := r.Config.GetDataset()
+ tenantId, _ := r.Config.GetTenantId()
+
+ var requestID types.RequestIDContextKey
+
+ for _, item := range in.Items {
+ timestamp, err := time.Parse(time.RFC3339Nano, item.Timestamp)
+ if err != nil {
+ r.Logger.Error().Logf("failed to parse timestamp: %v", err)
+ continue
+ }
+
+ var data map[string]interface{}
+ inrec, err := json.Marshal(item.Data)
+ if err != nil {
+ r.Logger.Error().Logf("failed to marshal: %v", err)
+ continue
+ }
+ err = json.Unmarshal(inrec, &data)
+ if err != nil {
+ r.Logger.Error().Logf("failed to unmarshal: %v", err)
+ continue
+ }
+
+ //Translate ResourceAttributes , SpanAttributes, EventAttributes from proto format to interface{}
+ attributes := make(map[string]interface{})
+ for _, kv := range item.Data.ResourceAttributes {
+ attributes[kv.Key] = extractKeyValue(kv.Value)
+ }
+ data["resourceAttributes"] = attributes
+
+ attributes = make(map[string]interface{})
+ for _, kv := range item.Data.SpanAttributes {
+ attributes[kv.Key] = extractKeyValue(kv.Value)
+ }
+ data["spanAttributes"] = attributes
+
+ attributes = make(map[string]interface{})
+ for _, kv := range item.Data.EventAttributes {
+ attributes[kv.Key] = extractKeyValue(kv.Value)
+ }
+ data["eventAttributes"] = attributes
+
+ //Type cast start and end time
+ data["startTime"] = item.Data.StartTime
+ data["endTime"] = item.Data.EndTime
+
+ event := &types.Event{
+ Context: ctx,
+ APIHost: apiHost,
+ APITenantId: tenantId,
+ Dataset: dataset,
+ Timestamp: timestamp,
+ Data: data,
+ }
+ if err = r.processEvent(event, requestID); err != nil {
+ r.Logger.Error().Logf("Error processing event: " + err.Error())
+ }
+ }
+ return &proxypb.ExportTraceProxyServiceResponse{Message: "Received Successfully by peer", Status: "Success"}, nil
+}
+
+func (r *Router) Status(context.Context, *proxypb.StatusRequest) (*proxypb.StatusResponse, error) {
+ return &proxypb.StatusResponse{
+ PeerActive: transmission.DefaultAvailability.Status(),
+ }, nil
+}
+
+func extractKeyValue(v *proxypb.AnyValue) string {
+ if x, ok := v.GetValue().(*proxypb.AnyValue_StringValue); ok {
+ return x.StringValue
+ } else if x, ok := v.GetValue().(*proxypb.AnyValue_IntValue); ok {
+ return fmt.Sprintf("%d", x.IntValue)
+ } else if x, ok := v.GetValue().(*proxypb.AnyValue_BoolValue); ok {
+ return fmt.Sprintf("%v", x.BoolValue)
+ } else if x, ok := v.GetValue().(*proxypb.AnyValue_DoubleValue); ok {
+ return fmt.Sprintf("%f", x.DoubleValue)
+ } else if x, ok := v.GetValue().(*proxypb.AnyValue_BytesValue); ok {
+ return fmt.Sprintf("%v", x.BytesValue)
+ } else if x, ok := v.GetValue().(*proxypb.AnyValue_ArrayValue); ok {
+ return x.ArrayValue.String()
+ } else if x, ok := v.GetValue().(*proxypb.AnyValue_KvlistValue); ok {
+ return x.KvlistValue.String()
+ }
+ return v.String()
+}
diff --git a/route/proxy.go b/route/proxy.go
index 7916a4f771..529581f9a8 100644
--- a/route/proxy.go
+++ b/route/proxy.go
@@ -3,7 +3,6 @@ package route
import (
"bytes"
"io"
- "io/ioutil"
"net/http"
"strings"
)
@@ -12,9 +11,9 @@ import (
// response, blocking until it gets one. This is used for all non-event traffic
// (eg team api key verification, markers, etc.)
func (r *Router) proxy(w http.ResponseWriter, req *http.Request) {
- r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_proxied")
+ r.Metrics.Increment(r.incomingOrPeer + "_router_proxied")
r.Logger.Debug().Logf("proxying request for %s", req.URL.Path)
- upstreamTarget, err := r.Config.GetHoneycombAPI()
+ upstreamTarget, err := r.Config.GetOpsrampAPI()
if err != nil {
w.WriteHeader(http.StatusServiceUnavailable)
io.WriteString(w, `{"error":"upstream target unavailable"}`)
@@ -25,7 +24,7 @@ func (r *Router) proxy(w http.ResponseWriter, req *http.Request) {
// let's copy the request over to a new one and
// dispatch it upstream
defer req.Body.Close()
- reqBod, _ := ioutil.ReadAll(req.Body)
+ reqBod, _ := io.ReadAll(req.Body)
buf := bytes.NewBuffer(reqBod)
upstreamReq, err := http.NewRequest(req.Method, upstreamTarget+req.URL.String(), buf)
if err != nil {
diff --git a/route/route.go b/route/route.go
index 9f17268c42..13e7a44236 100644
--- a/route/route.go
+++ b/route/route.go
@@ -7,35 +7,59 @@ import (
"encoding/json"
"errors"
"fmt"
+ proxypb "github.com/opsramp/libtrace-go/proto/proxypb"
+ "github.com/vmihailenco/msgpack/v5"
"io"
- "io/ioutil"
"math"
+ "net"
"net/http"
+ "net/url"
"strconv"
+ "strings"
"sync"
"time"
"github.com/gorilla/mux"
jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zstd"
- "github.com/vmihailenco/msgpack/v4"
-
- "github.com/honeycombio/refinery/collect"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/sharder"
- "github.com/honeycombio/refinery/transmit"
- "github.com/honeycombio/refinery/types"
+ "github.com/pelletier/go-toml/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/health/grpc_health_v1"
+ "google.golang.org/grpc/keepalive"
+ "gopkg.in/yaml.v2"
+
+ // grpc/gzip compressor, auto registers on import
+ _ "google.golang.org/grpc/encoding/gzip"
+
+ "github.com/opsramp/tracing-proxy/collect"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/sharder"
+ "github.com/opsramp/tracing-proxy/transmit"
+ "github.com/opsramp/tracing-proxy/types"
+
+ collectortrace "github.com/opsramp/husky/proto/otlp/collector/trace/v1"
)
const (
// numZstdDecoders is set statically here - we may make it into a config option
// A normal practice might be to use some multiple of the CPUs, but that goes south
// in kubernetes
- numZstdDecoders = 4
+ numZstdDecoders = 4
+ traceIDShortLength = 8
+ traceIDLongLength = 16
+ GRPCMessageSizeMax int = 5000000 // 5MB
+ defaultSampleRate = 1
+
+ resourceAttributesKey = "resourceAttributes"
+ spanAttributesKey = "spanAttributes"
+ eventAttributesKey = "eventAttributes"
+ unknownService = "unknown_service"
)
+var possibleServiceNames = []string{"service_name", "service.name"}
+
type Router struct {
Config config.Config `inject:""`
Logger logger.Logger `inject:""`
@@ -44,8 +68,7 @@ type Router struct {
PeerTransmission transmit.Transmission `inject:"peerTransmission"`
Sharder sharder.Sharder `inject:""`
Collector collect.Collector `inject:""`
- Metrics metrics.Metrics `inject:""`
-
+ Metrics metrics.Metrics `inject:"metrics"`
// version is set on startup so that the router may answer HTTP requests for
// the version
versionStr string
@@ -61,8 +84,14 @@ type Router struct {
zstdDecoders chan *zstd.Decoder
- server *http.Server
- doneWG sync.WaitGroup
+ server *http.Server
+ grpcServer *grpc.Server
+ doneWG sync.WaitGroup
+
+ // used to identify Router as a OTLP TraceServer
+ collectortrace.UnimplementedTraceServiceServer
+ proxypb.TraceProxyServiceServer
+ environmentCache *environmentCache
}
type BatchResponse struct {
@@ -106,6 +135,7 @@ func (r *Router) LnS(incomingOrPeer string) {
Timeout: time.Second * 10,
Transport: r.HTTPTransport,
}
+ r.environmentCache = newEnvironmentCache(r.Config.GetEnvironmentCacheTTL(), r.lookupEnvironment)
var err error
r.zstdDecoders, err = makeDecoders(numZstdDecoders)
@@ -133,30 +163,58 @@ func (r *Router) LnS(incomingOrPeer string) {
muxxer.HandleFunc("/panic", r.panic).Name("intentional panic")
muxxer.HandleFunc("/version", r.version).Name("report version info")
+ // require a local auth for query usage
+ queryMuxxer := muxxer.PathPrefix("/query/").Methods("GET").Subrouter()
+ queryMuxxer.Use(r.queryTokenChecker)
+
+ queryMuxxer.HandleFunc("/trace/{traceID}", r.debugTrace).Name("get debug information for given trace ID")
+ queryMuxxer.HandleFunc("/rules/{format}/{dataset}", r.getSamplerRules).Name("get formatted sampler rules for given dataset")
+ queryMuxxer.HandleFunc("/allrules/{format}", r.getAllSamplerRules).Name("get formatted sampler rules for all datasets")
+ queryMuxxer.HandleFunc("/configmetadata", r.getConfigMetadata).Name("get configuration metadata")
+
// require an auth header for events and batches
authedMuxxer := muxxer.PathPrefix("/1/").Methods("POST").Subrouter()
- authedMuxxer.Use(r.apiKeyChecker)
// handle events and batches
authedMuxxer.HandleFunc("/events/{datasetName}", r.event).Name("event")
authedMuxxer.HandleFunc("/batch/{datasetName}", r.batch).Name("batch")
+ // require an auth header for OTLP requests
+ otlpMuxxer := muxxer.PathPrefix("/v1/").Methods("POST").Subrouter()
+
+ // handle OTLP trace requests
+ otlpMuxxer.HandleFunc("/traces", r.postOTLP).Name("otlp")
+
// pass everything else through unmolested
muxxer.PathPrefix("/").HandlerFunc(r.proxy).Name("proxy")
- var listenAddr string
+ var listenAddr, grpcAddr, grpcPeerAddr string
if r.incomingOrPeer == "incoming" {
listenAddr, err = r.Config.GetListenAddr()
if err != nil {
r.iopLogger.Error().Logf("failed to get listen addr config: %s", err)
return
}
+ // GRPC listen addr is optional, err means addr was not empty and invalid
+ grpcAddr, err = r.Config.GetGRPCListenAddr()
+ if err != nil {
+ r.iopLogger.Error().Logf("failed to get grpc listen addr config: %s", err)
+ return
+ }
} else {
listenAddr, err = r.Config.GetPeerListenAddr()
if err != nil {
r.iopLogger.Error().Logf("failed to get peer listen addr config: %s", err)
return
}
+
+ // GRPC listen addr is optional, err means addr was not empty and invalid
+ grpcPeerAddr, err = r.Config.GetGRPCPeerListenAddr()
+ if err != nil {
+ r.iopLogger.Error().Logf("failed to get grpc listen addr config: %s", err)
+ return
+ }
+
}
r.iopLogger.Info().Logf("Listening on %s", listenAddr)
@@ -165,6 +223,51 @@ func (r *Router) LnS(incomingOrPeer string) {
Handler: muxxer,
}
+ if len(grpcAddr) > 0 {
+ l, err := net.Listen("tcp", grpcAddr)
+ if err != nil {
+ r.iopLogger.Error().Logf("failed to listen to grpc addr: " + grpcAddr)
+ }
+
+ r.iopLogger.Info().Logf("gRPC listening on %s", grpcAddr)
+ serverOpts := []grpc.ServerOption{
+ grpc.MaxSendMsgSize(GRPCMessageSizeMax), // default is math.MaxInt32
+ grpc.MaxRecvMsgSize(GRPCMessageSizeMax), // default is 4MB
+ grpc.KeepaliveParams(keepalive.ServerParameters{
+ MaxConnectionIdle: r.Config.GetGRPCMaxConnectionIdle(),
+ MaxConnectionAge: r.Config.GetGRPCMaxConnectionAge(),
+ MaxConnectionAgeGrace: r.Config.GetGRPCMaxConnectionAgeGrace(),
+ Time: r.Config.GetGRPCTime(),
+ Timeout: r.Config.GetGRPCTimeout(),
+ }),
+ }
+ r.grpcServer = grpc.NewServer(serverOpts...)
+ collectortrace.RegisterTraceServiceServer(r.grpcServer, r)
+ grpc_health_v1.RegisterHealthServer(r.grpcServer, r)
+ go r.grpcServer.Serve(l)
+ }
+
+ if len(grpcPeerAddr) > 0 {
+ l, err := net.Listen("tcp", grpcPeerAddr)
+ if err != nil {
+ r.iopLogger.Error().Logf("failed to listen to grpc peer addr: " + grpcPeerAddr)
+ }
+
+ r.iopLogger.Info().Logf("gRPC Peer listening on %s", grpcPeerAddr)
+ serverOpts := []grpc.ServerOption{
+ grpc.MaxSendMsgSize(GRPCMessageSizeMax), // default is math.MaxInt32
+ grpc.MaxRecvMsgSize(GRPCMessageSizeMax), // default is 4MB
+ grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: 10 * time.Second,
+ Timeout: 2 * time.Second,
+ MaxConnectionIdle: time.Minute,
+ }),
+ }
+ r.grpcServer = grpc.NewServer(serverOpts...)
+ proxypb.RegisterTraceProxyServiceServer(r.grpcServer, r)
+ go r.grpcServer.Serve(l)
+ }
+
r.doneWG.Add(1)
go func() {
defer r.doneWG.Done()
@@ -177,18 +280,23 @@ func (r *Router) LnS(incomingOrPeer string) {
}
func (r *Router) Stop() error {
- ctx, _ := context.WithTimeout(context.Background(), time.Minute)
+ ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+ defer cancel()
+
err := r.server.Shutdown(ctx)
if err != nil {
return err
}
+ if r.grpcServer != nil {
+ r.grpcServer.GracefulStop()
+ }
r.doneWG.Wait()
return nil
}
func (r *Router) alive(w http.ResponseWriter, req *http.Request) {
r.iopLogger.Debug().Logf("answered /x/alive check")
- w.Write([]byte(`{"source":"refinery","alive":"yes"}`))
+ w.Write([]byte(`{"source":"tracing-proxy","alive":"yes"}`))
}
func (r *Router) panic(w http.ResponseWriter, req *http.Request) {
@@ -196,12 +304,80 @@ func (r *Router) panic(w http.ResponseWriter, req *http.Request) {
}
func (r *Router) version(w http.ResponseWriter, req *http.Request) {
- w.Write([]byte(fmt.Sprintf(`{"source":"refinery","version":"%s"}`, r.versionStr)))
+ w.Write([]byte(fmt.Sprintf(`{"source":"tracing-proxy","version":"%s"}`, r.versionStr)))
+}
+
+func (r *Router) debugTrace(w http.ResponseWriter, req *http.Request) {
+ traceID := mux.Vars(req)["traceID"]
+ shard := r.Sharder.WhichShard(traceID)
+ w.Write([]byte(fmt.Sprintf(`{"traceID":"%s","node":"%s"}`, traceID, shard.GetAddress())))
+}
+
+func (r *Router) getSamplerRules(w http.ResponseWriter, req *http.Request) {
+ format := strings.ToLower(mux.Vars(req)["format"])
+ dataset := mux.Vars(req)["dataset"]
+ cfg, name, err := r.Config.GetSamplerConfigForDataset(dataset)
+ if err != nil {
+ w.Write([]byte(fmt.Sprintf("got error %v trying to fetch config for dataset %s\n", err, dataset)))
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ r.marshalToFormat(w, map[string]interface{}{name: cfg}, format)
+}
+
+func (r *Router) getAllSamplerRules(w http.ResponseWriter, req *http.Request) {
+ format := strings.ToLower(mux.Vars(req)["format"])
+ cfgs, err := r.Config.GetAllSamplerRules()
+ if err != nil {
+ w.Write([]byte(fmt.Sprintf("got error %v trying to fetch configs", err)))
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ r.marshalToFormat(w, cfgs, format)
+}
+
+func (r *Router) getConfigMetadata(w http.ResponseWriter, req *http.Request) {
+ cm := r.Config.GetConfigMetadata()
+ r.marshalToFormat(w, cm, "json")
+}
+
+func (r *Router) marshalToFormat(w http.ResponseWriter, obj interface{}, format string) {
+ var body []byte
+ var err error
+ switch format {
+ case "json":
+ body, err = json.Marshal(obj)
+ if err != nil {
+ w.Write([]byte(fmt.Sprintf("got error %v trying to marshal to json\n", err)))
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ case "toml":
+ body, err = toml.Marshal(obj)
+ if err != nil {
+ w.Write([]byte(fmt.Sprintf("got error %v trying to marshal to toml\n", err)))
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ case "yaml":
+ body, err = yaml.Marshal(obj)
+ if err != nil {
+ w.Write([]byte(fmt.Sprintf("got error %v trying to marshal to toml\n", err)))
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ default:
+ w.Write([]byte(fmt.Sprintf("invalid format '%s' when marshaling\n", format)))
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ w.Header().Set("Content-Type", "application/"+format)
+ w.Write(body)
}
// event is handler for /1/event/
func (r *Router) event(w http.ResponseWriter, req *http.Request) {
- r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_event")
+ r.Metrics.Increment(r.incomingOrPeer + "_router_event")
defer req.Body.Close()
bodyReader, err := r.getMaybeCompressedBody(req)
@@ -210,7 +386,7 @@ func (r *Router) event(w http.ResponseWriter, req *http.Request) {
return
}
- reqBod, err := ioutil.ReadAll(bodyReader)
+ reqBod, err := io.ReadAll(bodyReader)
if err != nil {
r.handlerReturnWithError(w, ErrPostBody, err)
return
@@ -233,9 +409,6 @@ func (r *Router) event(w http.ResponseWriter, req *http.Request) {
func (r *Router) requestToEvent(req *http.Request, reqBod []byte) (*types.Event, error) {
// get necessary bits out of the incoming event
apiKey := req.Header.Get(types.APIKeyHeader)
- if apiKey == "" {
- apiKey = req.Header.Get(types.APIKeyHeaderShort)
- }
sampleRate, err := strconv.Atoi(req.Header.Get(types.SampleRateHeader))
if err != nil {
sampleRate = 1
@@ -244,10 +417,17 @@ func (r *Router) requestToEvent(req *http.Request, reqBod []byte) (*types.Event,
vars := mux.Vars(req)
dataset := vars["datasetName"]
- apiHost, err := r.Config.GetHoneycombAPI()
+ apiHost, err := r.Config.GetOpsrampAPI()
if err != nil {
return nil, err
}
+
+ // get environment name - will be empty for legacy keys
+ environment, err := r.getEnvironmentName(apiKey)
+ if err != nil {
+ return nil, err
+ }
+
data := map[string]interface{}{}
err = unmarshal(req, bytes.NewReader(reqBod), &data)
if err != nil {
@@ -255,18 +435,19 @@ func (r *Router) requestToEvent(req *http.Request, reqBod []byte) (*types.Event,
}
return &types.Event{
- Context: req.Context(),
- APIHost: apiHost,
- APIKey: apiKey,
- Dataset: dataset,
- SampleRate: uint(sampleRate),
- Timestamp: eventTime,
- Data: data,
+ Context: req.Context(),
+ APIHost: apiHost,
+ APIKey: apiKey,
+ Dataset: dataset,
+ Environment: environment,
+ SampleRate: uint(sampleRate),
+ Timestamp: eventTime,
+ Data: data,
}, nil
}
func (r *Router) batch(w http.ResponseWriter, req *http.Request) {
- r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_batch")
+ r.Metrics.Increment(r.incomingOrPeer + "_router_batch")
defer req.Body.Close()
reqID := req.Context().Value(types.RequestIDContextKey{})
@@ -278,7 +459,7 @@ func (r *Router) batch(w http.ResponseWriter, req *http.Request) {
return
}
- reqBod, err := ioutil.ReadAll(bodyReader)
+ reqBod, err := io.ReadAll(bodyReader)
if err != nil {
r.handlerReturnWithError(w, ErrPostBody, err)
return
@@ -292,9 +473,17 @@ func (r *Router) batch(w http.ResponseWriter, req *http.Request) {
return
}
+ apiKey := req.Header.Get(types.APIKeyHeader)
+
+ // get environment name - will be empty for legacy keys
+ environment, err := r.getEnvironmentName(apiKey)
+ if err != nil {
+ r.handlerReturnWithError(w, ErrReqToEvent, err)
+ }
+
batchedResponses := make([]*BatchResponse, 0, len(batchedEvents))
for _, bev := range batchedEvents {
- ev, err := r.batchedEventToEvent(req, bev)
+ ev, err := r.batchedEventToEvent(req, bev, apiKey, environment)
if err != nil {
batchedResponses = append(
batchedResponses,
@@ -334,19 +523,42 @@ func (r *Router) processEvent(ev *types.Event, reqID interface{}) error {
debugLog := r.iopLogger.Debug().
WithField("request_id", reqID).
WithString("api_host", ev.APIHost).
- WithString("dataset", ev.Dataset)
+ WithString("dataset", ev.Dataset).
+ WithString("environment", ev.Environment)
+
+ // adding additional attributes to resource attributes
+ resAttr, ok := ev.Data[resourceAttributesKey].(map[string]interface{})
+ if !ok {
+ resAttr = map[string]interface{}{}
+ }
+ for key, value := range r.Config.GetAddAdditionalMetadata() {
+ if _, ok := resAttr[key]; !ok {
+ resAttr[key] = value
+ }
+ }
+ isUnknownService := true
+ for _, key := range possibleServiceNames {
+ if _, ok := resAttr[key]; ok {
+ isUnknownService = false
+ break
+ }
+ }
+ if isUnknownService {
+ resAttr[possibleServiceNames[0]] = unknownService
+ }
+ ev.Data[resourceAttributesKey] = resAttr
// extract trace ID, route to self or peer, pass on to collector
// TODO make trace ID field configurable
var traceID string
- if trID, ok := ev.Data["trace.trace_id"]; ok {
+ if trID, ok := ev.Data["traceTraceID"]; ok {
traceID = trID.(string)
} else if trID, ok := ev.Data["traceId"]; ok {
traceID = trID.(string)
}
if traceID == "" {
// not part of a trace. send along upstream
- r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_nonspan")
+ r.Metrics.Increment(r.incomingOrPeer + "_router_nonspan")
debugLog.WithString("api_host", ev.APIHost).
WithString("dataset", ev.Dataset).
Logf("sending non-trace event from batch")
@@ -358,7 +570,7 @@ func (r *Router) processEvent(ev *types.Event, reqID interface{}) error {
// ok, we're a span. Figure out if we should handle locally or pass on to a peer
targetShard := r.Sharder.WhichShard(traceID)
if r.incomingOrPeer == "incoming" && !targetShard.Equals(r.Sharder.MyShard()) {
- r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_peer")
+ r.Metrics.Increment(r.incomingOrPeer + "_router_peer")
debugLog.WithString("peer", targetShard.GetAddress()).
Logf("Sending span from batch to my peer")
ev.APIHost = targetShard.GetAddress()
@@ -382,12 +594,12 @@ func (r *Router) processEvent(ev *types.Event, reqID interface{}) error {
err = r.Collector.AddSpanFromPeer(span)
}
if err != nil {
- r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_dropped")
+ r.Metrics.Increment(r.incomingOrPeer + "_router_dropped")
debugLog.Logf("Dropping span from batch, channel full")
return err
}
- r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_span")
+ r.Metrics.Increment(r.incomingOrPeer + "_router_span")
debugLog.Logf("Accepting span from batch for collection into a trace")
return nil
}
@@ -430,12 +642,7 @@ func (r *Router) getMaybeCompressedBody(req *http.Request) (io.Reader, error) {
return reader, nil
}
-func (r *Router) batchedEventToEvent(req *http.Request, bev batchedEvent) (*types.Event, error) {
- apiKey := req.Header.Get(types.APIKeyHeader)
- if apiKey == "" {
- apiKey = req.Header.Get(types.APIKeyHeaderShort)
- }
-
+func (r *Router) batchedEventToEvent(req *http.Request, bev batchedEvent, apiKey string, environment string) (*types.Event, error) {
sampleRate := bev.SampleRate
if sampleRate == 0 {
sampleRate = 1
@@ -445,18 +652,19 @@ func (r *Router) batchedEventToEvent(req *http.Request, bev batchedEvent) (*type
// once for the entire batch instead of in every event.
vars := mux.Vars(req)
dataset := vars["datasetName"]
- apiHost, err := r.Config.GetHoneycombAPI()
+ apiHost, err := r.Config.GetOpsrampAPI()
if err != nil {
return nil, err
}
return &types.Event{
- Context: req.Context(),
- APIHost: apiHost,
- APIKey: apiKey,
- Dataset: dataset,
- SampleRate: uint(sampleRate),
- Timestamp: eventTime,
- Data: bev.Data,
+ Context: req.Context(),
+ APIHost: apiHost,
+ APIKey: apiKey,
+ Dataset: dataset,
+ Environment: environment,
+ SampleRate: uint(sampleRate),
+ Timestamp: eventTime,
+ Data: bev.Data,
}, nil
}
@@ -539,10 +747,156 @@ func makeDecoders(num int) (chan *zstd.Decoder, error) {
func unmarshal(r *http.Request, data io.Reader, v interface{}) error {
switch r.Header.Get("Content-Type") {
case "application/x-msgpack", "application/msgpack":
- return msgpack.NewDecoder(data).
- UseDecodeInterfaceLoose(true).
- Decode(v)
+ dec := msgpack.NewDecoder(data)
+ dec.UseLooseInterfaceDecoding(true)
+
+ return dec.Decode(v)
default:
return jsoniter.NewDecoder(data).Decode(v)
}
}
+
+type environmentCache struct {
+ mutex sync.RWMutex
+ items map[string]*cacheItem
+ ttl time.Duration
+ getFn func(string) (string, error)
+}
+
+func (r *Router) SetEnvironmentCache(ttl time.Duration, getFn func(string) (string, error)) {
+ r.environmentCache = newEnvironmentCache(ttl, getFn)
+}
+
+func newEnvironmentCache(ttl time.Duration, getFn func(string) (string, error)) *environmentCache {
+ return &environmentCache{
+ items: make(map[string]*cacheItem),
+ ttl: ttl,
+ getFn: getFn,
+ }
+}
+
+type cacheItem struct {
+ expiresAt time.Time
+ value string
+}
+
+// get queries the cached items, returning cache hits that have not expired.
+// Cache missed use the configured getFn to populate the cache.
+func (c *environmentCache) get(key string) (string, error) {
+ if item, ok := c.items[key]; ok {
+ if time.Now().Before(item.expiresAt) {
+ return item.value, nil
+ }
+ }
+
+ // get write lock early so we don't execute getFn in parallel so the
+ // the result will be cached before the next lock is aquired to prevent
+ // subsequent calls to getFn for the same key
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ // check if the cache has been populated while waiting for a write lock
+ if item, ok := c.items[key]; ok {
+ if time.Now().Before(item.expiresAt) {
+ return item.value, nil
+ }
+ }
+
+ val, err := c.getFn(key)
+ if err != nil {
+ return "", err
+ }
+
+ c.addItem(key, val, c.ttl)
+ return val, nil
+}
+
+// addItem create a new cache entry in the environment cache.
+// This is not thread-safe, and should only be used in tests
+func (c *environmentCache) addItem(key string, value string, ttl time.Duration) {
+ c.items[key] = &cacheItem{
+ expiresAt: time.Now().Add(ttl),
+ value: value,
+ }
+}
+
+type TeamInfo struct {
+ Slug string `json:"slug"`
+}
+
+type EnvironmentInfo struct {
+ Slug string `json:"slug"`
+ Name string `json:"name"`
+}
+
+type AuthInfo struct {
+ APIKeyAccess map[string]bool `json:"api_key_access"`
+ Team TeamInfo `json:"team"`
+ Environment EnvironmentInfo `json:"environment"`
+}
+
+func (r *Router) getEnvironmentName(apiKey string) (string, error) {
+ if apiKey == "" || types.IsLegacyAPIKey(apiKey) {
+ return "", nil
+ }
+
+ env, err := r.environmentCache.get(apiKey)
+ if err != nil {
+ return "", err
+ }
+ return env, nil
+}
+
+func (r *Router) lookupEnvironment(apiKey string) (string, error) {
+ apiEndpoint, err := r.Config.GetOpsrampAPI()
+ if err != nil {
+ return "", fmt.Errorf("failed to read Honeycomb API config value. %w", err)
+ }
+ authURL, err := url.Parse(apiEndpoint)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse Honeycomb API URL config value. %w", err)
+ }
+
+ authURL.Path = "/1/auth"
+ req, err := http.NewRequest("GET", authURL.String(), nil)
+ if err != nil {
+ return "", fmt.Errorf("failed to create AuthInfo request. %w", err)
+ }
+
+ req.Header.Set("x-Honeycomb-team", apiKey)
+
+ r.Logger.Debug().WithString("api_key", apiKey).WithString("endpoint", authURL.String()).Logf("Attempting to get environment name using API key")
+ resp, err := r.proxyClient.Do(req)
+ if err != nil {
+ return "", fmt.Errorf("failed sending AuthInfo request to Honeycomb API. %w", err)
+ }
+ defer resp.Body.Close()
+
+ switch {
+ case resp.StatusCode == http.StatusUnauthorized:
+ return "", fmt.Errorf("received 401 response for AuthInfo request from Honeycomb API - check your API key")
+ case resp.StatusCode > 299:
+ return "", fmt.Errorf("received %d response for AuthInfo request from Honeycomb API", resp.StatusCode)
+ }
+
+ authinfo := AuthInfo{}
+ if err := json.NewDecoder(resp.Body).Decode(&authinfo); err != nil {
+ return "", fmt.Errorf("failed to JSON decode of AuthInfo response from Honeycomb API")
+ }
+ r.Logger.Debug().WithString("environment", authinfo.Environment.Name).Logf("Got environment")
+ return authinfo.Environment.Name, nil
+}
+
+func (r *Router) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {
+ r.iopLogger.Debug().Logf("answered grpc_health_v1 check")
+ return &grpc_health_v1.HealthCheckResponse{
+ Status: grpc_health_v1.HealthCheckResponse_SERVING,
+ }, nil
+}
+
+func (r *Router) Watch(req *grpc_health_v1.HealthCheckRequest, server grpc_health_v1.Health_WatchServer) error {
+ r.iopLogger.Debug().Logf("serving grpc_health_v1 watch")
+ return server.Send(&grpc_health_v1.HealthCheckResponse{
+ Status: grpc_health_v1.HealthCheckResponse_SERVING,
+ })
+}
diff --git a/route/route_test.go b/route/route_test.go
deleted file mode 100644
index ee26527a98..0000000000
--- a/route/route_test.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// +build all race
-
-package route
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
- "time"
-
- "github.com/klauspost/compress/zstd"
- "github.com/vmihailenco/msgpack/v4"
-)
-
-func TestDecompression(t *testing.T) {
- payload := "payload"
- pReader := strings.NewReader(payload)
-
- decoders, err := makeDecoders(numZstdDecoders)
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
-
- router := &Router{zstdDecoders: decoders}
- req := &http.Request{
- Body: ioutil.NopCloser(pReader),
- Header: http.Header{},
- }
- reader, err := router.getMaybeCompressedBody(req)
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
-
- b, err := ioutil.ReadAll(reader)
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
- if string(b) != payload {
- t.Errorf("%s != %s", string(b), payload)
- }
-
- buf := &bytes.Buffer{}
- w := gzip.NewWriter(buf)
- _, err = w.Write([]byte(payload))
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
- w.Close()
-
- req.Body = ioutil.NopCloser(buf)
- req.Header.Set("Content-Encoding", "gzip")
- reader, err = router.getMaybeCompressedBody(req)
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
-
- b, err = ioutil.ReadAll(reader)
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
- if string(b) != payload {
- t.Errorf("%s != %s", string(b), payload)
- }
-
- buf = &bytes.Buffer{}
- zstdW, err := zstd.NewWriter(buf)
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
- _, err = zstdW.Write([]byte(payload))
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
- zstdW.Close()
-
- req.Body = ioutil.NopCloser(buf)
- req.Header.Set("Content-Encoding", "zstd")
- reader, err = router.getMaybeCompressedBody(req)
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
-
- b, err = ioutil.ReadAll(reader)
- if err != nil {
- t.Errorf("unexpected err: %s", err.Error())
- }
- if string(b) != payload {
- t.Errorf("%s != %s", string(b), payload)
- }
-}
-
-func unmarshalRequest(w *httptest.ResponseRecorder, content string, body io.Reader) {
- http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- var data map[string]interface{}
- err := unmarshal(r, r.Body, &data)
- if err != nil {
- w.WriteHeader(http.StatusBadRequest)
- w.Write([]byte(err.Error()))
- return
- }
-
- var traceID string
- if trID, ok := data["trace.trace_id"]; ok {
- traceID = trID.(string)
- } else if trID, ok := data["traceId"]; ok {
- traceID = trID.(string)
- }
-
- w.Write([]byte(traceID))
- }).ServeHTTP(w, &http.Request{
- Body: ioutil.NopCloser(body),
- Header: http.Header{
- "Content-Type": []string{content},
- },
- })
-}
-
-func unmarshalBatchRequest(w *httptest.ResponseRecorder, content string, body io.Reader) {
- http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- var e batchedEvent
- err := unmarshal(r, r.Body, &e)
-
- if err != nil {
- w.Write([]byte(err.Error()))
- w.WriteHeader(http.StatusBadRequest)
- return
- }
-
- w.Write([]byte(e.getEventTime().Format(time.RFC3339Nano)))
- }).ServeHTTP(w, &http.Request{
- Body: ioutil.NopCloser(body),
- Header: http.Header{
- "Content-Type": []string{content},
- },
- })
-}
-
-func TestUnmarshal(t *testing.T) {
- var w *httptest.ResponseRecorder
- var body io.Reader
- now := time.Now().UTC()
-
- w = httptest.NewRecorder()
- body = bytes.NewBufferString("")
- unmarshalRequest(w, "nope", body)
-
- if w.Code != http.StatusBadRequest {
- t.Error("Expecting", http.StatusBadRequest, "Received", w.Code)
- }
-
- w = httptest.NewRecorder()
- body = bytes.NewBufferString(`{"trace.trace_id": "test"}`)
- unmarshalRequest(w, "application/json", body)
-
- if b := w.Body.String(); b != "test" {
- t.Error("Expecting test")
- }
-
- w = httptest.NewRecorder()
- body = bytes.NewBufferString(`{"traceId": "test"}`)
- unmarshalRequest(w, "application/json; charset=utf-8", body)
-
- if b := w.Body.String(); b != "test" {
- t.Error("Expecting test")
- }
-
- w = httptest.NewRecorder()
- body = bytes.NewBufferString(fmt.Sprintf(`{"time": "%s"}`, now.Format(time.RFC3339Nano)))
- unmarshalBatchRequest(w, "application/json", body)
-
- if b := w.Body.String(); b != now.Format(time.RFC3339Nano) {
- t.Error("Expecting", now, "Received", b)
- }
-
- var buf *bytes.Buffer
- var e *msgpack.Encoder
- var in map[string]interface{}
- var err error
-
- w = httptest.NewRecorder()
- buf = &bytes.Buffer{}
- e = msgpack.NewEncoder(buf)
- in = map[string]interface{}{"trace.trace_id": "test"}
- err = e.Encode(in)
-
- if err != nil {
- t.Error(err)
- }
-
- body = buf
- unmarshalRequest(w, "application/msgpack", body)
-
- if b := w.Body.String(); b != "test" {
- t.Error("Expecting test")
- }
-
- w = httptest.NewRecorder()
- buf = &bytes.Buffer{}
- e = msgpack.NewEncoder(buf)
- in = map[string]interface{}{"traceId": "test"}
- err = e.Encode(in)
-
- if err != nil {
- t.Error(err)
- }
-
- body = buf
- unmarshalRequest(w, "application/msgpack", body)
-
- if b := w.Body.String(); b != "test" {
- t.Error("Expecting test")
- }
-
- w = httptest.NewRecorder()
- buf = &bytes.Buffer{}
- e = msgpack.NewEncoder(buf)
- in = map[string]interface{}{"time": now}
- err = e.Encode(in)
-
- if err != nil {
- t.Error(err)
- }
-
- body = buf
- unmarshalBatchRequest(w, "application/msgpack", body)
-
- if b := w.Body.String(); b != now.Format(time.RFC3339Nano) {
- t.Error("Expecting", now, "Received", b)
- }
-}
diff --git a/rules.toml b/rules.toml
deleted file mode 100644
index 9a87e24b29..0000000000
--- a/rules.toml
+++ /dev/null
@@ -1,6 +0,0 @@
-############################
-## Sampling Rules Config ##
-############################
-
-# Defaults for the rules configuration are set in file_config.go.
-# For an example file with various sampling methods and their configurations, see rules_complete.toml.
diff --git a/rules_complete.toml b/rules_complete.toml
deleted file mode 100644
index aabb5f7e06..0000000000
--- a/rules_complete.toml
+++ /dev/null
@@ -1,237 +0,0 @@
-############################
-## Sampling Rules Config ##
-############################
-
-# DryRun - If enabled, marks traces that would be dropped given current sampling rules,
-# and sends all traces regardless
-# DryRun = false
-
-# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept
-# DryRunFieldName = "refinery_kept"
-
-# DeterministicSampler is a section of the config for manipulating the
-# Deterministic Sampler implementation. This is the simplest sampling algorithm
-# - it is a static sample rate, choosing traces randomly to either keep or send
-# (at the appropriate rate). It is not influenced by the contents of the trace.
-Sampler = "DeterministicSampler"
-
-# SampleRate is the rate at which to sample. It indicates a ratio, where one
-# sample trace is kept for every n traces seen. For example, a SampleRate of 30
-# will keep 1 out of every 30 traces. The choice on whether to keep any specific
-# trace is random, so the rate is approximate.
-# Eligible for live reload.
-SampleRate = 1
-
-[dataset1]
-
- # Note: If your dataset name contains a space, you will have to escape the dataset name
- # using single quotes, such as ['dataset 1']
-
- # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler
- # implementation. This sampler collects the values of a number of fields from a
- # trace and uses them to form a key. This key is handed to the standard dynamic
- # sampler algorithm which generates a sample rate based on the frequency with
- # which that key has appeared in the previous ClearFrequencySec seconds. See
- # https://github.com/honeycombio/dynsampler-go for more detail on the mechanics
- # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from
- # that package.
- Sampler = "DynamicSampler"
-
- # SampleRate is the goal rate at which to sample. It indicates a ratio, where
- # one sample trace is kept for every n traces seen. For example, a SampleRate of
- # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic
- # sampler, who assigns a sample rate for each trace based on the fields selected
- # from that trace.
- # Eligible for live reload.
- SampleRate = 2
-
- # FieldList is a list of all the field names to use to form the key that will be
- # handed to the dynamic sampler. The cardinality of the combination of values
- # from all of these keys should be reasonable in the face of the frequency of
- # those keys. If the combination of fields in these keys essentially makes them
- # unique, the dynamic sampler will do no sampling. If the keys have too few
- # values, you won't get samples of the most interesting traces. A good key
- # selection will have consistent values for high frequency boring traffic and
- # unique values for outliers and interesting traffic. Including an error field
- # (or something like HTTP status code) is an excellent choice. As an example,
- # assuming 30 or so endpoints, a combination of HTTP endpoint and status code
- # would be a good set of keys in order to let you see accurately use of all
- # endpoints and call out when there is failing traffic to any endpoint. Field
- # names may come from any span in the trace.
- # Eligible for live reload.
- FieldList = ["request.method","response.status_code"]
-
- # UseTraceLength will add the number of spans in the trace in to the dynamic
- # sampler as part of the key. The number of spans is exact, so if there are
- # normally small variations in trace length you may want to leave this off. If
- # traces are consistent lengths and changes in trace length is a useful
- # indicator of traces you'd like to see in Honeycomb, set this to true.
- # Eligible for live reload.
- UseTraceLength = true
-
- # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field
- # to the root span of the trace containing the key used by the sampler to decide
- # the sample rate. This can be helpful in understanding why the sampler is
- # making certain decisions about sample rate and help you understand how to
- # better choose the sample rate key (aka the FieldList setting above) to use.
- AddSampleRateKeyToTrace = true
-
- # AddSampleRateKeyToTraceField is the name of the field the sampler will use
- # when adding the sample rate key to the trace. This setting is only used when
- # AddSampleRateKeyToTrace is true.
- AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key"
-
- # ClearFrequencySec is the name of the field the sampler will use to determine
- # the period over which it will calculate the sample rate. This setting defaults
- # to 30.
- # Eligible for live reload.
- ClearFrequencySec = 60
-
-[dataset2]
-
- # EMADynamicSampler is a section of the config for manipulating the Exponential
- # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler,
- # it attempts to average a given sample rate, weighting rare traffic and frequent
- # traffic differently so as to end up with the correct average.
- #
- # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended
- # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs
- # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential
- # Moving Average of counts seen per key, and adjusts this average at regular intervals.
- # The weight applied to more recent intervals is defined by `weight`, a number between
- # (0, 1) - larger values weight the average more toward recent observations. In other words,
- # a larger weight will cause sample rates more quickly adapt to traffic patterns,
- # while a smaller weight will result in sample rates that are less sensitive to bursts or drops
- # in traffic and thus more consistent over time.
- #
- # Keys that are not found in the EMA will always have a sample
- # rate of 1. Keys that occur more frequently will be sampled on a logarithmic
- # curve. In other words, every key will be represented at least once in any
- # given window and more frequent keys will have their sample rate
- # increased proportionally to wind up with the goal sample rate.
- Sampler = "EMADynamicSampler"
-
- # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where
- # one sample trace is kept for every n traces seen. For example, a SampleRate of
- # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic
- # sampler, who assigns a sample rate for each trace based on the fields selected
- # from that trace.
- # Eligible for live reload.
- GoalSampleRate = 2
-
- # FieldList is a list of all the field names to use to form the key that will be
- # handed to the dynamic sampler. The cardinality of the combination of values
- # from all of these keys should be reasonable in the face of the frequency of
- # those keys. If the combination of fields in these keys essentially makes them
- # unique, the dynamic sampler will do no sampling. If the keys have too few
- # values, you won't get samples of the most interesting traces. A good key
- # selection will have consistent values for high frequency boring traffic and
- # unique values for outliers and interesting traffic. Including an error field
- # (or something like HTTP status code) is an excellent choice. As an example,
- # assuming 30 or so endpoints, a combination of HTTP endpoint and status code
- # would be a good set of keys in order to let you see accurately use of all
- # endpoints and call out when there is failing traffic to any endpoint. Field
- # names may come from any span in the trace.
- # Eligible for live reload.
- FieldList = ["request.method","response.status_code"]
-
- # UseTraceLength will add the number of spans in the trace in to the dynamic
- # sampler as part of the key. The number of spans is exact, so if there are
- # normally small variations in trace length you may want to leave this off. If
- # traces are consistent lengths and changes in trace length is a useful
- # indicator of traces you'd like to see in Honeycomb, set this to true.
- # Eligible for live reload.
- UseTraceLength = true
-
- # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field
- # to the root span of the trace containing the key used by the sampler to decide
- # the sample rate. This can be helpful in understanding why the sampler is
- # making certain decisions about sample rate and help you understand how to
- # better choose the sample rate key (aka the FieldList setting above) to use.
- AddSampleRateKeyToTrace = true
-
- # AddSampleRateKeyToTraceField is the name of the field the sampler will use
- # when adding the sample rate key to the trace. This setting is only used when
- # AddSampleRateKeyToTrace is true.
- AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key"
-
- # AdjustmentInterval defines how often (in seconds) we adjust the moving average from
- # recent observations. Default 15s
- # Eligible for live reload.
- AdjustmentInterval = 15
-
- # Weight is a value between (0, 1) indicating the weighting factor used to adjust
- # the EMA. With larger values, newer data will influence the average more, and older
- # values will be factored out more quickly. In mathematical literature concerning EMA,
- # this is referred to as the `alpha` constant.
- # Default is 0.5
- # Eligible for live reload.
- Weight = 0.5
-
- # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA.
- # Once MaxKeys is reached, new keys will not be included in the sample rate map, but
- # existing keys will continue to be be counted. You can use this to keep the sample rate
- # map size under control.
- # Eligible for live reload
- MaxKeys = 0
-
- # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key
- # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to
- # decide what constitutes "zero". Keys with averages below this threshold will be removed
- # from the EMA. Default is the same as Weight, as this prevents a key with the smallest
- # integer value (1) from being aged out immediately. This value should generally be <= Weight,
- # unless you have very specific reasons to set it higher.
- # Eligible for live reload
- AgeOutValue = 0.5
-
- # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define
- # the burst detection threshold. If total counts observed for a given interval exceed the threshold
- # EMA is updated immediately, rather than waiting on the AdjustmentInterval.
- # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles,
- # burst detection will kick in.
- # Eligible for live reload
- BurstMultiple = 2.0
-
- # BurstDetectionDelay indicates the number of intervals to run after Start is called before
- # burst detection kicks in.
- # Defaults to 3
- # Eligible for live reload
- BurstDetectionDelay = 3
-
-[dataset3]
-
- Sampler = "DeterministicSampler"
- SampleRate = 10
-
-[dataset4]
-
- Sampler = "RulesBasedSampler"
-
- [[dataset4.rule]]
- name = "500 errors"
- SampleRate = 1
- [[dataset4.rule.condition]]
- field = "status_code"
- operator = "="
- value = 500
- [[dataset4.rule.condition]]
- field = "duration_ms"
- operator = ">="
- value = 1000.789
-
- [[dataset4.rule]]
- name = "drop 200 responses"
- drop = true
- [[dataset4.rule.condition]]
- field = "status_code"
- operator = "="
- value = 200
-
- [[dataset4.rule]]
- SampleRate = 10 # default when no rules match, if missing defaults to 1
-
-[dataset5]
-
- Sampler = "TotalThroughputSampler"
- GoalThroughputPerSec = 100
- FieldList = "[request.method]"
diff --git a/rules_complete.yaml b/rules_complete.yaml
new file mode 100644
index 0000000000..99cdc4ae03
--- /dev/null
+++ b/rules_complete.yaml
@@ -0,0 +1,258 @@
+############################
+## Sampling Rules Config ##
+############################
+
+# DryRun - If enabled, marks traces that would be dropped given current sampling rules,
+# and sends all traces regardless
+DryRun: false
+
+# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to trace_proxy_kept
+DryRunFieldName: trace_proxy_kept
+
+# DeterministicSampler is a section of the config for manipulating the
+# Deterministic Sampler implementation. This is the simplest sampling algorithm
+# - it is a static sample rate, choosing traces randomly to either keep or send
+# (at the appropriate rate). It is not influenced by the contents of the trace.
+Sampler: DeterministicSampler
+
+# SampleRate is the rate at which to sample. It indicates a ratio, where one
+# sample trace is kept for every n traces seen. For example, a SampleRate of 30
+# will keep 1 out of every 30 traces. The choice on whether to keep any specific
+# trace is random, so the rate is approximate.
+# Eligible for live reload.
+SampleRate: 1
+
+dataset1:
+
+ # Note: If your dataset name contains a space, you will have to escape the dataset name
+ # using single quotes, such as ['dataset 1']
+
+ # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler
+ # implementation. This sampler collects the values of a number of fields from a
+ # trace and uses them to form a key. This key is handed to the standard dynamic
+ # sampler algorithm which generates a sample rate based on the frequency with
+ # which that key has appeared in the previous ClearFrequencySec seconds.
+ Sampler: DynamicSampler
+
+ # SampleRate is the goal rate at which to sample. It indicates a ratio, where
+ # one sample trace is kept for every n traces seen. For example, a SampleRate of
+ # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic
+ # sampler, who assigns a sample rate for each trace based on the fields selected
+ # from that trace.
+ SampleRate: 2
+
+ # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler.
+ # The combination of values from all of these fields should reflect how interesting the trace is compared to
+ # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for
+ # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent
+ # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of
+ # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is
+ # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a
+ # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can
+ # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling
+ # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint.
+ # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of
+ # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces.
+ # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore
+ # interesting traces, like traces that experienced a `500`, might not be sampled.
+ # Field names may come from any span in the trace.
+ FieldList:
+ - request.method
+ - http.target
+ - response.status_code
+
+ # UseTraceLength will add the number of spans in the trace in to the dynamic
+ # sampler as part of the key. The number of spans is exact, so if there are
+ # normally small variations in trace length you may want to leave this off. If
+ # traces are consistent lengths and changes in trace length is a useful
+ # indicator of traces you'd like to see in OpsRamp, set this to true.
+ UseTraceLength: true
+
+ # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field
+ # to the root span of the trace containing the key used by the sampler to decide
+ # the sample rate. This can be helpful in understanding why the sampler is
+ # making certain decisions about sample rate and help you understand how to
+ # better choose the sample rate key (aka the FieldList setting above) to use.
+ AddSampleRateKeyToTrace: true
+
+ # AddSampleRateKeyToTraceField is the name of the field the sampler will use
+ # when adding the sample rate key to the trace. This setting is only used when
+ # AddSampleRateKeyToTrace is true.
+ AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key
+
+ # ClearFrequencySec is the name of the field the sampler will use to determine
+ # the period over which it will calculate the sample rate. This setting defaults
+ # to 30.
+ ClearFrequencySec: 60
+dataset2:
+
+ # EMADynamicSampler is a section of the config for manipulating the Exponential
+ # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler,
+ # it attempts to average a given sample rate, weighting rare traffic and frequent
+ # traffic differently so as to end up with the correct average.
+ #
+ # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended
+ # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs
+ # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential
+ # Moving Average of counts seen per key, and adjusts this average at regular intervals.
+ # The weight applied to more recent intervals is defined by `weight`, a number between
+ # (0, 1) - larger values weight the average more toward recent observations. In other words,
+ # a larger weight will cause sample rates more quickly adapt to traffic patterns,
+ # while a smaller weight will result in sample rates that are less sensitive to bursts or drops
+ # in traffic and thus more consistent over time.
+ #
+ # Keys that are not found in the EMA will always have a sample
+ # rate of 1. Keys that occur more frequently will be sampled on a logarithmic
+ # curve. In other words, every key will be represented at least once in any
+ # given window and more frequent keys will have their sample rate
+ # increased proportionally to wind up with the goal sample rate.
+ Sampler: EMADynamicSampler
+
+ # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where
+ # one sample trace is kept for every n traces seen. For example, a SampleRate of
+ # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic
+ # sampler, who assigns a sample rate for each trace based on the fields selected
+ # from that trace.
+ GoalSampleRate: 2
+
+ # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler.
+ # The combination of values from all of these fields should reflect how interesting the trace is compared to
+ # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for
+ # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent
+ # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of
+ # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is
+ # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a
+ # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can
+ # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling
+ # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint.
+ # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of
+ # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces.
+ # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore
+ # interesting traces, like traces that experienced a `500`, might not be sampled.
+ # Field names may come from any span in the trace.
+ FieldList:
+ - request.method
+ - http.target
+ - response.status_code
+
+ # UseTraceLength will add the number of spans in the trace in to the dynamic
+ # sampler as part of the key. The number of spans is exact, so if there are
+ # normally small variations in trace length you may want to leave this off. If
+ # traces are consistent lengths and changes in trace length is a useful
+ # indicator of traces you'd like to see in OpsRamp, set this to true.
+ UseTraceLength: true
+
+ # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field
+ # to the root span of the trace containing the key used by the sampler to decide
+ # the sample rate. This can be helpful in understanding why the sampler is
+ # making certain decisions about sample rate and help you understand how to
+ # better choose the sample rate key (aka the FieldList setting above) to use.
+ AddSampleRateKeyToTrace: true
+
+ # AddSampleRateKeyToTraceField is the name of the field the sampler will use
+ # when adding the sample rate key to the trace. This setting is only used when
+ # AddSampleRateKeyToTrace is true.
+ AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key
+
+ # AdjustmentInterval defines how often (in seconds) we adjust the moving average from
+ # recent observations. Default 15s
+ AdjustmentInterval: 15
+
+ # Weight is a value between (0, 1) indicating the weighting factor used to adjust
+ # the EMA. With larger values, newer data will influence the average more, and older
+ # values will be factored out more quickly. In mathematical literature concerning EMA,
+ # this is referred to as the `alpha` constant.
+ # Default is 0.5
+ Weight: 0.5
+
+ # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA.
+ # Once MaxKeys is reached, new keys will not be included in the sample rate map, but
+ # existing keys will continue to be be counted. You can use this to keep the sample rate
+ # map size under control.
+ MaxKeys: 0
+
+ # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key
+ # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to
+ # decide what constitutes "zero". Keys with averages below this threshold will be removed
+ # from the EMA. Default is the same as Weight, as this prevents a key with the smallest
+ # integer value (1) from being aged out immediately. This value should generally be <= Weight,
+ # unless you have very specific reasons to set it higher.
+ AgeOutValue: 0.5
+
+ # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define
+ # the burst detection threshold. If total counts observed for a given interval exceed the threshold
+ # EMA is updated immediately, rather than waiting on the AdjustmentInterval.
+ # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles,
+ # burst detection will kick in.
+ BurstMultiple: 2
+
+ # BurstDetectionDelay indicates the number of intervals to run after Start is called before
+ # burst detection kicks in.
+ # Defaults to 3
+ BurstDetectionDelay: 3
+dataset3:
+ Sampler: DeterministicSampler
+ SampleRate: 10
+dataset4:
+ Sampler: RulesBasedSampler
+ CheckNestedFields: false
+ rule:
+ - name: drop healthchecks
+ drop: true
+ condition:
+ - field: http.route
+ operator: '='
+ value: /health-check
+ - name: keep slow 500 errors
+ SampleRate: 1
+ condition:
+ - field: status_code
+ operator: '='
+ value: 500
+ - field: duration_ms
+ operator: '>='
+ value: 1000.789
+ - name: dynamically sample 200 responses
+ condition:
+ - field: status_code
+ operator: '='
+ value: 200
+ sampler:
+ EMADynamicSampler:
+ Sampler: EMADynamicSampler
+ GoalSampleRate: 15
+ FieldList:
+ - request.method
+ - request.route
+ AddSampleRateKeyToTrace: true
+ AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key
+ - name: dynamically sample 200 string responses
+ condition:
+ - field: status_code
+ operator: '='
+ value: '200'
+ datatype: int
+ sampler:
+ EMADynamicSampler:
+ Sampler: EMADynamicSampler
+ GoalSampleRate: 15
+ FieldList:
+ - request.method
+ - request.route
+ AddSampleRateKeyToTrace: true
+ AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key
+ - name: sample traces originating from a service
+ Scope: span
+ SampleRate: 5
+ condition:
+ - field: service name
+ operator: '='
+ value: users
+ - field: meta.span_type
+ operator: '='
+ value: root
+ - SampleRate: 10
+dataset5:
+ Sampler: TotalThroughputSampler
+ GoalThroughputPerSec: 100
+ FieldList: '[request.method]'
diff --git a/sample/deterministic.go b/sample/deterministic.go
index cfdd2b816a..3953499c81 100644
--- a/sample/deterministic.go
+++ b/sample/deterministic.go
@@ -2,11 +2,12 @@ package sample
import (
"crypto/sha1"
+ "encoding/binary"
"math"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/types"
)
// shardingSalt is a random bit to make sure we don't shard the same as any
@@ -34,17 +35,11 @@ func (d *DeterministicSampler) Start() error {
return nil
}
-func (d *DeterministicSampler) GetSampleRate(trace *types.Trace) (rate uint, keep bool) {
+func (d *DeterministicSampler) GetSampleRate(trace *types.Trace) (rate uint, keep bool, reason string) {
if d.sampleRate <= 1 {
- return 1, true
+ return 1, true, "deterministic/always"
}
sum := sha1.Sum([]byte(trace.TraceID + shardingSalt))
- v := bytesToUint32be(sum[:4])
- return uint(d.sampleRate), v <= d.upperBound
-}
-
-// bytesToUint32 takes a slice of 4 bytes representing a big endian 32 bit
-// unsigned value and returns the equivalent uint32.
-func bytesToUint32be(b []byte) uint32 {
- return uint32(b[3]) | (uint32(b[2]) << 8) | (uint32(b[1]) << 16) | (uint32(b[0]) << 24)
+ v := binary.BigEndian.Uint32(sum[:4])
+ return uint(d.sampleRate), v <= d.upperBound, "deterministic/chance"
}
diff --git a/sample/deterministic_test.go b/sample/deterministic_test.go
index 01d1f4af53..3067644da5 100644
--- a/sample/deterministic_test.go
+++ b/sample/deterministic_test.go
@@ -1,5 +1,3 @@
-// +build all race
-
package sample
import (
@@ -8,9 +6,9 @@ import (
"github.com/stretchr/testify/assert"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/types"
)
// TestInitialization tests that sample rates are consistently returned
@@ -52,9 +50,10 @@ func TestGetSampleRate(t *testing.T) {
ds.Start()
for i, tst := range tsts {
- rate, keep := ds.GetSampleRate(tst.trace)
+ rate, keep, reason := ds.GetSampleRate(tst.trace)
assert.Equal(t, uint(10), rate, "sample rate should be fixed")
assert.Equal(t, tst.sampled, keep, "%d: trace ID %s should be %v", i, tst.trace.TraceID, tst.sampled)
+ assert.Equal(t, "deterministic/chance", reason)
}
}
diff --git a/sample/dynamic.go b/sample/dynamic.go
index 1192ae3132..7ba9d996a3 100644
--- a/sample/dynamic.go
+++ b/sample/dynamic.go
@@ -5,10 +5,10 @@ import (
dynsampler "github.com/honeycombio/dynsampler-go"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
)
type DynamicSampler struct {
@@ -50,7 +50,7 @@ func (d *DynamicSampler) Start() error {
return nil
}
-func (d *DynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) {
+func (d *DynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool, string) {
key := d.key.buildAndAdd(trace)
rate := d.dynsampler.GetSampleRate(key)
if rate < 1 { // protect against dynsampler being broken even though it shouldn't be
@@ -64,10 +64,10 @@ func (d *DynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) {
"trace_id": trace.TraceID,
}).Logf("got sample rate and decision")
if shouldKeep {
- d.Metrics.IncrementCounter("dynsampler_num_kept")
+ d.Metrics.Increment("dynsampler_num_kept")
} else {
- d.Metrics.IncrementCounter("dynsampler_num_dropped")
+ d.Metrics.Increment("dynsampler_num_dropped")
}
d.Metrics.Histogram("dynsampler_sample_rate", float64(rate))
- return uint(rate), shouldKeep
+ return uint(rate), shouldKeep, "dynamic/" + key
}
diff --git a/sample/dynamic_ema.go b/sample/dynamic_ema.go
index f36ac5bcd3..21647512a5 100644
--- a/sample/dynamic_ema.go
+++ b/sample/dynamic_ema.go
@@ -5,10 +5,10 @@ import (
dynsampler "github.com/honeycombio/dynsampler-go"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
)
type EMADynamicSampler struct {
@@ -54,7 +54,7 @@ func (d *EMADynamicSampler) Start() error {
}
d.dynsampler.Start()
- // Register stastics this package will produce
+ // Register statistics this package will produce
d.Metrics.Register("dynsampler_num_dropped", "counter")
d.Metrics.Register("dynsampler_num_kept", "counter")
d.Metrics.Register("dynsampler_sample_rate", "histogram")
@@ -62,7 +62,7 @@ func (d *EMADynamicSampler) Start() error {
return nil
}
-func (d *EMADynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) {
+func (d *EMADynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool, string) {
key := d.key.buildAndAdd(trace)
rate := d.dynsampler.GetSampleRate(key)
if rate < 1 { // protect against dynsampler being broken even though it shouldn't be
@@ -76,10 +76,10 @@ func (d *EMADynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) {
"trace_id": trace.TraceID,
}).Logf("got sample rate and decision")
if shouldKeep {
- d.Metrics.IncrementCounter("dynsampler_num_kept")
+ d.Metrics.Increment("dynsampler_num_kept")
} else {
- d.Metrics.IncrementCounter("dynsampler_num_dropped")
+ d.Metrics.Increment("dynsampler_num_dropped")
}
d.Metrics.Histogram("dynsampler_sample_rate", float64(rate))
- return uint(rate), shouldKeep
+ return uint(rate), shouldKeep, "emadynamic/" + key
}
diff --git a/sample/dynamic_ema_test.go b/sample/dynamic_ema_test.go
index c295938038..f1c5bae133 100644
--- a/sample/dynamic_ema_test.go
+++ b/sample/dynamic_ema_test.go
@@ -1,14 +1,12 @@
-// +build all race
-
package sample
import (
"testing"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
"github.com/stretchr/testify/assert"
)
diff --git a/sample/dynamic_test.go b/sample/dynamic_test.go
index f472d234d3..06c59d95a7 100644
--- a/sample/dynamic_test.go
+++ b/sample/dynamic_test.go
@@ -1,14 +1,12 @@
-// +build all race
-
package sample
import (
"testing"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
"github.com/stretchr/testify/assert"
)
diff --git a/sample/rules.go b/sample/rules.go
index 01fa8b4172..535391cb58 100644
--- a/sample/rules.go
+++ b/sample/rules.go
@@ -1,19 +1,22 @@
package sample
import (
+ "encoding/json"
"math/rand"
"strings"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
+ "github.com/tidwall/gjson"
)
type RulesBasedSampler struct {
- Config *config.RulesBasedSamplerConfig
- Logger logger.Logger
- Metrics metrics.Metrics
+ Config *config.RulesBasedSamplerConfig
+ Logger logger.Logger
+ Metrics metrics.Metrics
+ samplers map[string]Sampler
}
func (s *RulesBasedSampler) Start() error {
@@ -24,119 +27,243 @@ func (s *RulesBasedSampler) Start() error {
s.Metrics.Register("rulessampler_num_kept", "counter")
s.Metrics.Register("rulessampler_sample_rate", "histogram")
+ s.samplers = make(map[string]Sampler)
+
+ // Check if any rule has a downstream sampler and create it
+ for _, rule := range s.Config.Rule {
+ for _, cond := range rule.Condition {
+ if err := cond.Init(); err != nil {
+ s.Logger.Debug().WithFields(map[string]interface{}{
+ "rule_name": rule.Name,
+ "condition": cond.String(),
+ }).Logf("error creating rule evaluation function: %s", err)
+ continue
+ }
+ }
+ if rule.Sampler != nil {
+ var sampler Sampler
+ if rule.Sampler.DynamicSampler != nil {
+ sampler = &DynamicSampler{Config: rule.Sampler.DynamicSampler, Logger: s.Logger, Metrics: s.Metrics}
+ } else if rule.Sampler.EMADynamicSampler != nil {
+ sampler = &EMADynamicSampler{Config: rule.Sampler.EMADynamicSampler, Logger: s.Logger, Metrics: s.Metrics}
+ } else if rule.Sampler.TotalThroughputSampler != nil {
+ sampler = &TotalThroughputSampler{Config: rule.Sampler.TotalThroughputSampler, Logger: s.Logger, Metrics: s.Metrics}
+ } else {
+ s.Logger.Debug().WithFields(map[string]interface{}{
+ "rule_name": rule.Name,
+ }).Logf("invalid or missing downstream sampler")
+ continue
+ }
+
+ err := sampler.Start()
+ if err != nil {
+ s.Logger.Debug().WithFields(map[string]interface{}{
+ "rule_name": rule.Name,
+ }).Logf("error creating downstream sampler: %s", err)
+ continue
+ }
+ s.samplers[rule.String()] = sampler
+ }
+ }
return nil
}
-func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep bool) {
+func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep bool, reason string) {
logger := s.Logger.Debug().WithFields(map[string]interface{}{
"trace_id": trace.TraceID,
})
for _, rule := range s.Config.Rule {
- var matched int
- rate := uint(rule.SampleRate)
- keep := !rule.Drop && rule.SampleRate > 0 && rand.Intn(rule.SampleRate) == 0
+ var matched bool
+ var reason string
+
+ switch rule.Scope {
+ case "span":
+ matched = ruleMatchesSpanInTrace(trace, rule, s.Config.CheckNestedFields)
+ reason = "rules/span/"
+ case "trace", "":
+ matched = ruleMatchesTrace(trace, rule, s.Config.CheckNestedFields)
+ reason = "rules/trace/"
+ default:
+ logger.WithFields(map[string]interface{}{
+ "rule_name": rule.Name,
+ "scope": rule.Scope,
+ }).Logf("invalid scope %s given for rule: %s", rule.Scope, rule.Name)
+ matched = true
+ reason = "rules/invalid scope/"
+ }
+
+ if matched {
+ var rate uint
+ var keep bool
+ var samplerReason string
+
+ if rule.Sampler != nil {
+ var sampler Sampler
+ var found bool
+ if sampler, found = s.samplers[rule.String()]; !found {
+ logger.WithFields(map[string]interface{}{
+ "rule_name": rule.Name,
+ }).Logf("could not find downstream sampler for rule: %s", rule.Name)
+ return 1, true, reason + "bad_rule:" + rule.Name
+ }
+ rate, keep, samplerReason = sampler.GetSampleRate(trace)
+ reason += rule.Name + ":" + samplerReason
+ } else {
+ rate = uint(rule.SampleRate)
+ keep = !rule.Drop && rule.SampleRate > 0 && rand.Intn(rule.SampleRate) == 0
+ reason += rule.Name
+ }
- // no condition signifies the default
- if rule.Condition == nil {
s.Metrics.Histogram("rulessampler_sample_rate", float64(rule.SampleRate))
if keep {
- s.Metrics.IncrementCounter("rulessampler_num_kept")
+ s.Metrics.Increment("rulessampler_num_kept")
} else {
- s.Metrics.IncrementCounter("dynsampler_num_dropped")
+ s.Metrics.Increment("rulessampler_num_dropped")
}
logger.WithFields(map[string]interface{}{
"rate": rate,
"keep": keep,
"drop_rule": rule.Drop,
}).Logf("got sample rate and decision")
- return rate, keep
+ return rate, keep, reason
}
+ }
+ return 1, true, "no rule matched"
+}
+
+func ruleMatchesTrace(t *types.Trace, rule *config.RulesBasedSamplerRule, checkNestedFields bool) bool {
+ // We treat a rule with no conditions as a match.
+ if rule.Condition == nil {
+ return true
+ }
+
+ var matched int
+
+ for _, condition := range rule.Condition {
+ span:
+ for _, span := range t.GetSpans() {
+ value, exists := extractValueFromSpan(span, condition, checkNestedFields)
+
+ if conditionMatchesValue(condition, value, exists) {
+ matched++
+ break span
+ }
+ }
+ }
+
+ return matched == len(rule.Condition)
+}
+
+func ruleMatchesSpanInTrace(trace *types.Trace, rule *config.RulesBasedSamplerRule, checkNestedFields bool) bool {
+ // We treat a rule with no conditions as a match.
+ if rule.Condition == nil {
+ return true
+ }
+
+ for _, span := range trace.GetSpans() {
+ ruleMatched := true
for _, condition := range rule.Condition {
- span:
- for _, span := range trace.GetSpans() {
- var match bool
- value, exists := span.Data[condition.Field]
-
- switch exists {
- case true:
- switch condition.Operator {
- case "exists":
- match = exists
- case "!=":
- if comparison, ok := compare(value, condition.Value); ok {
- match = comparison != equal
- }
- case "=":
- if comparison, ok := compare(value, condition.Value); ok {
- match = comparison == equal
- }
- case ">":
- if comparison, ok := compare(value, condition.Value); ok {
- match = comparison == more
- }
- case ">=":
- if comparison, ok := compare(value, condition.Value); ok {
- match = comparison == more || comparison == equal
- }
- case "<":
- if comparison, ok := compare(value, condition.Value); ok {
- match = comparison == less
- }
- case "<=":
- if comparison, ok := compare(value, condition.Value); ok {
- match = comparison == less || comparison == equal
- }
- case "starts-with":
- switch a := value.(type) {
- case string:
- switch b := condition.Value.(type) {
- case string:
- match = strings.HasPrefix(a, b)
- }
- }
- case "contains":
- switch a := value.(type) {
- case string:
- switch b := condition.Value.(type) {
- case string:
- match = strings.Contains(a, b)
- }
- }
- }
- case false:
- switch condition.Operator {
- case "not-exists":
- match = !exists
- }
- }
+ // whether this condition is matched by this span.
+ value, exists := extractValueFromSpan(span, condition, checkNestedFields)
- if match {
- matched++
- break span
- }
+ if !conditionMatchesValue(condition, value, exists) {
+ ruleMatched = false
+ break // if any condition fails, we can't possibly succeed, so exit inner loop early
}
}
+ // If this span was matched by every condition, then the rule as a whole
+ // matches (and we can return)
+ if ruleMatched {
+ return true
+ }
+ }
- if matched == len(rule.Condition) {
- s.Metrics.Histogram("rulessampler_sample_rate", float64(rule.SampleRate))
- if keep {
- s.Metrics.IncrementCounter("rulessampler_num_kept")
- } else {
- s.Metrics.IncrementCounter("dynsampler_num_dropped")
+ // if the rule didn't match above, then it doesn't match the trace.
+ return false
+}
+
+func extractValueFromSpan(span *types.Span, condition *config.RulesBasedSamplerCondition, checkNestedFields bool) (interface{}, bool) {
+ // whether this condition is matched by this span.
+ value, exists := span.Data[condition.Field]
+ if !exists && checkNestedFields {
+ jsonStr, err := json.Marshal(span.Data)
+ if err == nil {
+ result := gjson.Get(string(jsonStr), condition.Field)
+ if result.Exists() {
+ value = result.String()
+ exists = true
}
- logger.WithFields(map[string]interface{}{
- "rate": rate,
- "keep": keep,
- "drop_rule": rule.Drop,
- "rule_name": rule.Name,
- }).Logf("got sample rate and decision")
- return rate, keep
}
}
- return 1, true
+ return value, exists
+}
+
+func conditionMatchesValue(condition *config.RulesBasedSamplerCondition, value interface{}, exists bool) bool {
+ var match bool
+ switch exists {
+ case true:
+ switch condition.Operator {
+ case "exists":
+ match = exists
+ case "!=":
+ if comparison, ok := compare(value, condition.Value); ok {
+ match = comparison != equal
+ }
+ case "=":
+ if comparison, ok := compare(value, condition.Value); ok {
+ match = comparison == equal
+ }
+ case ">":
+ if comparison, ok := compare(value, condition.Value); ok {
+ match = comparison == more
+ }
+ case ">=":
+ if comparison, ok := compare(value, condition.Value); ok {
+ match = comparison == more || comparison == equal
+ }
+ case "<":
+ if comparison, ok := compare(value, condition.Value); ok {
+ match = comparison == less
+ }
+ case "<=":
+ if comparison, ok := compare(value, condition.Value); ok {
+ match = comparison == less || comparison == equal
+ }
+ case "starts-with":
+ switch a := value.(type) {
+ case string:
+ switch b := condition.Value.(type) {
+ case string:
+ match = strings.HasPrefix(a, b)
+ }
+ }
+ case "contains":
+ switch a := value.(type) {
+ case string:
+ switch b := condition.Value.(type) {
+ case string:
+ match = strings.Contains(a, b)
+ }
+ }
+ case "does-not-contain":
+ switch a := value.(type) {
+ case string:
+ switch b := condition.Value.(type) {
+ case string:
+ match = !strings.Contains(a, b)
+ }
+ }
+ }
+ case false:
+ switch condition.Operator {
+ case "not-exists":
+ match = !exists
+ }
+ }
+ return match
}
const (
@@ -146,6 +273,11 @@ const (
)
func compare(a, b interface{}) (int, bool) {
+ // a is the tracing data field value. This can be: float64, int64, bool, or string
+ // b is the Rule condition value. This can be: float64, int64, int, bool, or string
+ // Note: in YAML config parsing, the Value may be returned as int
+ // When comparing numeric values, we need to check across the 3 types: float64, int64, and int
+
if a == nil {
if b == nil {
return equal, true
@@ -161,6 +293,16 @@ func compare(a, b interface{}) (int, bool) {
switch at := a.(type) {
case int64:
switch bt := b.(type) {
+ case int:
+ i := int(at)
+ switch {
+ case i < bt:
+ return less, true
+ case i > bt:
+ return more, true
+ default:
+ return equal, true
+ }
case int64:
switch {
case at < bt:
@@ -183,6 +325,16 @@ func compare(a, b interface{}) (int, bool) {
}
case float64:
switch bt := b.(type) {
+ case int:
+ f := float64(bt)
+ switch {
+ case at < f:
+ return less, true
+ case at > f:
+ return more, true
+ default:
+ return equal, true
+ }
case int64:
f := float64(bt)
switch {
diff --git a/sample/rules_test.go b/sample/rules_test.go
deleted file mode 100644
index 4531f2a69a..0000000000
--- a/sample/rules_test.go
+++ /dev/null
@@ -1,466 +0,0 @@
-// +build all race
-
-package sample
-
-import (
- "testing"
-
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
- "github.com/stretchr/testify/assert"
-)
-
-type TestRulesData struct {
- Rules *config.RulesBasedSamplerConfig
- Spans []*types.Span
- ExpectedRate uint
- ExpectedKeep bool
-}
-
-func TestRules(t *testing.T) {
- data := []TestRulesData{
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "int64equals",
- SampleRate: 10,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "test",
- Operator: "=",
- Value: int64(1),
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "test": int64(1),
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 10,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "int64greaterthan",
- SampleRate: 10,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "test",
- Operator: ">",
- Value: int64(1),
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "test": int64(2),
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 10,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "int64lessthan",
- SampleRate: 10,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "test",
- Operator: "<",
- Value: int64(2),
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "test": int64(1),
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 10,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "int64float64lessthan",
- SampleRate: 10,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "test",
- Operator: "<",
- Value: 2.2,
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "test": int64(1),
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 10,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "rule that wont be hit",
- SampleRate: 0,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "test",
- Operator: ">",
- Value: 2.2,
- },
- },
- },
- {
- Name: "fallback",
- SampleRate: 10,
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "test": int64(1),
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 10,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "multiple matches",
- SampleRate: 10,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "test",
- Operator: "<=",
- Value: 2.2,
- },
- {
- Field: "test",
- Operator: ">=",
- Value: 2.2,
- },
- {
- Field: "test_two",
- Operator: "=",
- Value: true,
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "test": 2.2,
- "test_two": false,
- },
- },
- },
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "test_two": true,
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 10,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "drop",
- Drop: true,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "test",
- Operator: ">",
- Value: int64(2),
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "test": float64(3),
- },
- },
- },
- },
- ExpectedKeep: false,
- ExpectedRate: 0,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "drop everything",
- Drop: true,
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "test": int64(1),
- },
- },
- },
- },
- ExpectedKeep: false,
- ExpectedRate: 0,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "test multiple rules must all be matched",
- SampleRate: 4,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "first",
- Operator: "=",
- Value: int64(1),
- },
- {
- Field: "second",
- Operator: "=",
- Value: int64(2),
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "first": int64(1),
- },
- },
- },
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "first": int64(1),
- },
- },
- },
- },
- ExpectedKeep: true,
- // the trace does not match all the rules so we expect the default sample rate
- ExpectedRate: 1,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "not equal test",
- SampleRate: 4,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "first",
- Operator: "!=",
- Value: int64(10),
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "first": int64(9),
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 4,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "exists test",
- SampleRate: 4,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "first",
- Operator: "exists",
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "first": int64(9),
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 4,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "not exists test",
- SampleRate: 4,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "first",
- Operator: "not-exists",
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "second": int64(9),
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 4,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "starts with test",
- SampleRate: 4,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "first",
- Operator: "starts-with",
- Value: "honey",
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "first": "honeycomb",
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 4,
- },
- {
- Rules: &config.RulesBasedSamplerConfig{
- Rule: []*config.RulesBasedSamplerRule{
- {
- Name: "contains test",
- SampleRate: 4,
- Condition: []*config.RulesBasedSamplerCondition{
- {
- Field: "first",
- Operator: "contains",
- Value: "eyco",
- },
- },
- },
- },
- },
- Spans: []*types.Span{
- {
- Event: types.Event{
- Data: map[string]interface{}{
- "first": "honeycomb",
- },
- },
- },
- },
- ExpectedKeep: true,
- ExpectedRate: 4,
- },
- }
-
- for _, d := range data {
- sampler := &RulesBasedSampler{
- Config: d.Rules,
- Logger: &logger.NullLogger{},
- Metrics: &metrics.NullMetrics{},
- }
-
- trace := &types.Trace{}
-
- for _, span := range d.Spans {
- trace.AddSpan(span)
- }
-
- rate, keep := sampler.GetSampleRate(trace)
-
- assert.Equal(t, d.ExpectedRate, rate, d.Rules)
-
- // we can only test when we don't expect to keep the trace
- if !d.ExpectedKeep {
- assert.Equal(t, d.ExpectedKeep, keep, d.Rules)
- }
- }
-}
diff --git a/sample/sample.go b/sample/sample.go
index f312162f4c..201d8b625b 100644
--- a/sample/sample.go
+++ b/sample/sample.go
@@ -1,29 +1,37 @@
package sample
import (
+ "fmt"
"os"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
)
type Sampler interface {
- GetSampleRate(trace *types.Trace) (rate uint, keep bool)
+ GetSampleRate(trace *types.Trace) (rate uint, keep bool, reason string)
+ Start() error
}
// SamplerFactory is used to create new samplers with common (injected) resources
type SamplerFactory struct {
Config config.Config `inject:""`
Logger logger.Logger `inject:""`
- Metrics metrics.Metrics `inject:""`
+ Metrics metrics.Metrics `inject:"metrics"`
}
-// GetSamplerImplementationForDataset returns the sampler implementation for the dataset,
-// or nil if it is not defined
-func (s *SamplerFactory) GetSamplerImplementationForDataset(dataset string) Sampler {
- c, err := s.Config.GetSamplerConfigForDataset(dataset)
+// GetSamplerImplementationForKey returns the sampler implementation for the given
+// samplerKey (dataset for legacy keys, environment otherwise), or nil if it is not defined
+func (s *SamplerFactory) GetSamplerImplementationForKey(samplerKey string, isLegacyKey bool) Sampler {
+ if isLegacyKey {
+ if prefix := s.Config.GetDatasetPrefix(); prefix != "" {
+ samplerKey = fmt.Sprintf("%s.%s", prefix, samplerKey)
+ }
+ }
+
+ c, _, err := s.Config.GetSamplerConfigForDataset(samplerKey)
if err != nil {
return nil
}
@@ -32,31 +40,27 @@ func (s *SamplerFactory) GetSamplerImplementationForDataset(dataset string) Samp
switch c := c.(type) {
case *config.DeterministicSamplerConfig:
- ds := &DeterministicSampler{Config: c, Logger: s.Logger}
- ds.Start()
- sampler = ds
+ sampler = &DeterministicSampler{Config: c, Logger: s.Logger}
case *config.DynamicSamplerConfig:
- ds := &DynamicSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics}
- ds.Start()
- sampler = ds
+ sampler = &DynamicSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics}
case *config.EMADynamicSamplerConfig:
- ds := &EMADynamicSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics}
- ds.Start()
- sampler = ds
+ sampler = &EMADynamicSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics}
case *config.RulesBasedSamplerConfig:
- ds := &RulesBasedSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics}
- ds.Start()
- sampler = ds
+ sampler = &RulesBasedSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics}
case *config.TotalThroughputSamplerConfig:
- ds := &TotalThroughputSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics}
- ds.Start()
- sampler = ds
+ sampler = &TotalThroughputSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics}
default:
s.Logger.Error().Logf("unknown sampler type %T. Exiting.", c)
os.Exit(1)
}
- s.Logger.Debug().WithField("dataset", dataset).Logf("created implementation for sampler type %T", c)
+ err = sampler.Start()
+ if err != nil {
+ s.Logger.Debug().WithField("dataset", samplerKey).Logf("failed to start sampler")
+ return nil
+ }
+
+ s.Logger.Debug().WithField("dataset", samplerKey).Logf("created implementation for sampler type %T", c)
return sampler
}
diff --git a/sample/sample_test.go b/sample/sample_test.go
index 184c9dc239..ac411b2b8e 100644
--- a/sample/sample_test.go
+++ b/sample/sample_test.go
@@ -1,3 +1,106 @@
-// +build all race
-
package sample
+
+import (
+ "os"
+ "testing"
+
+ "github.com/facebookgo/inject"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDependencyInjection(t *testing.T) {
+ var g inject.Graph
+ err := g.Provide(
+ &inject.Object{Value: &SamplerFactory{}},
+
+ &inject.Object{Value: &config.MockConfig{}},
+ &inject.Object{Value: &logger.NullLogger{}},
+ &inject.Object{Value: &metrics.NullMetrics{}, Name: "metrics"},
+ )
+ if err != nil {
+ t.Error(err)
+ }
+ if err := g.Populate(); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDatasetPrefix(t *testing.T) {
+ tmpDir, err := os.MkdirTemp("", "")
+ assert.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+
+ configFile, err := os.CreateTemp(tmpDir, "*.toml")
+ assert.NoError(t, err)
+
+ _, err = configFile.Write([]byte(`
+ DatasetPrefix = "dataset"
+
+ [InMemCollector]
+ CacheCapacity=1000
+
+ [HoneycombMetrics]
+ MetricsHoneycombAPI="http://honeycomb.io"
+ MetricsAPIKey="1234"
+ MetricsDataset="testDatasetName"
+ MetricsReportingInterval=3
+
+ [HoneycombLogger]
+ LoggerHoneycombAPI="http://honeycomb.io"
+ LoggerAPIKey="1234"
+ LoggerDataset="loggerDataset"
+ `))
+ assert.NoError(t, err)
+ configFile.Close()
+
+ rulesFile, err := os.CreateTemp(tmpDir, "*.toml")
+ assert.NoError(t, err)
+
+ _, err = rulesFile.Write([]byte(`
+ Sampler = "DeterministicSampler"
+ SampleRate = 1
+
+ [production]
+ Sampler = "DeterministicSampler"
+ SampleRate = 10
+
+ [dataset.production]
+ Sampler = "DeterministicSampler"
+ SampleRate = 20
+ `))
+ assert.NoError(t, err)
+ rulesFile.Close()
+
+ c, err := config.NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {})
+ assert.NoError(t, err)
+
+ assert.Equal(t, "dataset", c.GetDatasetPrefix())
+
+ factory := SamplerFactory{Config: c, Logger: &logger.NullLogger{}, Metrics: &metrics.NullMetrics{}}
+
+ defaultSampler := &DeterministicSampler{
+ Config: &config.DeterministicSamplerConfig{SampleRate: 1},
+ Logger: &logger.NullLogger{},
+ }
+ defaultSampler.Start()
+
+ envSampler := &DeterministicSampler{
+ Config: &config.DeterministicSamplerConfig{SampleRate: 10},
+ Logger: &logger.NullLogger{},
+ }
+ envSampler.Start()
+
+ datasetSampler := &DeterministicSampler{
+ Config: &config.DeterministicSamplerConfig{SampleRate: 20},
+ Logger: &logger.NullLogger{},
+ }
+ datasetSampler.Start()
+
+ assert.Equal(t, defaultSampler, factory.GetSamplerImplementationForKey("unknown", false))
+ assert.Equal(t, defaultSampler, factory.GetSamplerImplementationForKey("unknown", true))
+ assert.Equal(t, envSampler, factory.GetSamplerImplementationForKey("production", false))
+ assert.Equal(t, datasetSampler, factory.GetSamplerImplementationForKey("production", true))
+}
diff --git a/sample/totalthroughput.go b/sample/totalthroughput.go
index 3752f34d25..e9c5818a9e 100644
--- a/sample/totalthroughput.go
+++ b/sample/totalthroughput.go
@@ -5,10 +5,10 @@ import (
dynsampler "github.com/honeycombio/dynsampler-go"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
)
type TotalThroughputSampler struct {
@@ -45,7 +45,7 @@ func (d *TotalThroughputSampler) Start() error {
}
d.dynsampler.Start()
- // Register stastics this package will produce
+ // Register statistics this package will produce
d.Metrics.Register("dynsampler_num_dropped", "counter")
d.Metrics.Register("dynsampler_num_kept", "counter")
d.Metrics.Register("dynsampler_sample_rate", "histogram")
@@ -53,7 +53,7 @@ func (d *TotalThroughputSampler) Start() error {
return nil
}
-func (d *TotalThroughputSampler) GetSampleRate(trace *types.Trace) (uint, bool) {
+func (d *TotalThroughputSampler) GetSampleRate(trace *types.Trace) (uint, bool, string) {
key := d.key.buildAndAdd(trace)
rate := d.dynsampler.GetSampleRate(key)
if rate < 1 { // protect against dynsampler being broken even though it shouldn't be
@@ -67,10 +67,10 @@ func (d *TotalThroughputSampler) GetSampleRate(trace *types.Trace) (uint, bool)
"trace_id": trace.TraceID,
}).Logf("got sample rate and decision")
if shouldKeep {
- d.Metrics.IncrementCounter("dynsampler_num_kept")
+ d.Metrics.Increment("dynsampler_num_kept")
} else {
- d.Metrics.IncrementCounter("dynsampler_num_dropped")
+ d.Metrics.Increment("dynsampler_num_dropped")
}
d.Metrics.Histogram("dynsampler_sample_rate", float64(rate))
- return uint(rate), shouldKeep
+ return uint(rate), shouldKeep, "totalthroughput/" + key
}
diff --git a/sample/totalthroughput_test.go b/sample/totalthroughput_test.go
index edef7d42a0..02f6bf16dd 100644
--- a/sample/totalthroughput_test.go
+++ b/sample/totalthroughput_test.go
@@ -1,14 +1,12 @@
-// +build all race
-
package sample
import (
"testing"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
"github.com/stretchr/testify/assert"
)
diff --git a/sample/trace_key.go b/sample/trace_key.go
index 80d0c6f1e5..bef5309f13 100644
--- a/sample/trace_key.go
+++ b/sample/trace_key.go
@@ -5,7 +5,7 @@ import (
"sort"
"strconv"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/types"
)
type traceKey struct {
diff --git a/sample/trace_key_test.go b/sample/trace_key_test.go
index e074b8fce8..8f8bffd933 100644
--- a/sample/trace_key_test.go
+++ b/sample/trace_key_test.go
@@ -1,11 +1,9 @@
-// +build all race
-
package sample
import (
"testing"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/types"
"github.com/stretchr/testify/assert"
)
diff --git a/service/debug/debug_service.go b/service/debug/debug_service.go
index c9f743bd38..c5d1cb1f57 100644
--- a/service/debug/debug_service.go
+++ b/service/debug/debug_service.go
@@ -14,7 +14,7 @@ import (
"sync"
"syscall"
- "github.com/honeycombio/refinery/config"
+ "github.com/opsramp/tracing-proxy/config"
metrics "github.com/rcrowley/go-metrics"
"github.com/rcrowley/go-metrics/exp"
"github.com/sirupsen/logrus"
diff --git a/sharder/deterministic.go b/sharder/deterministic.go
index 8b77ecb135..ad8e00d4c2 100644
--- a/sharder/deterministic.go
+++ b/sharder/deterministic.go
@@ -2,6 +2,7 @@ package sharder
import (
"crypto/sha1"
+ "encoding/binary"
"fmt"
"math"
"net"
@@ -10,15 +11,20 @@ import (
"sync"
"time"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/internal/peer"
- "github.com/honeycombio/refinery/logger"
+ "github.com/dgryski/go-wyhash"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/internal/peer"
+ "github.com/opsramp/tracing-proxy/logger"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
-// shardingSalt is a random bit to make sure we don't shard the same as any
-// other sharding that uses the trace ID (eg deterministic sampling)
-const shardingSalt = "gf4LqTwcJ6PEj2vO"
+// These are random bits to make sure we differentiate between different
+// hash cases even if we use the same value (traceID).
+const (
+ shardingSalt = "gf4LqTwcJ6PEj2vO"
+ peerSeed uint64 = 6789531204236
+)
// DetShard implements Shard
type DetShard struct {
@@ -27,6 +33,11 @@ type DetShard struct {
port string
}
+type hashShard struct {
+ uhash uint64
+ shardIndex int
+}
+
func (d *DetShard) Equals(other Shard) bool {
otherDetshard, ok := other.(*DetShard)
if !ok {
@@ -73,13 +84,32 @@ func (d *DetShard) String() string {
return d.GetAddress()
}
+// GetHashesFor generates a number of hashShards for a given DetShard by repeatedly hashing the
+// seed with itself. The intent is to generate a repeatable pseudo-random sequence.
+func (d *DetShard) GetHashesFor(index int, n int, seed uint64) []hashShard {
+ hashes := make([]hashShard, 0)
+ addr := d.GetAddress()
+ for i := 0; i < n; i++ {
+ hashes = append(hashes, hashShard{
+ uhash: wyhash.Hash([]byte(addr), seed),
+ shardIndex: index,
+ })
+ // generate another seed from the previous seed; we want this to be the same
+ // sequence for everything.
+ seed = wyhash.Hash([]byte("anything"), seed)
+ }
+ return hashes
+}
+
type DeterministicSharder struct {
Config config.Config `inject:""`
Logger logger.Logger `inject:""`
Peers peer.Peers `inject:""`
- myShard *DetShard
- peers []*DetShard
+ myShard *DetShard
+ peers []*DetShard
+ hashes []hashShard
+ shardFunc func(traceID string) Shard
peerLock sync.RWMutex
}
@@ -96,6 +126,21 @@ func (d *DeterministicSharder) Start() error {
}
})
+ // this isn't runtime-reloadable because it would
+ // reassign nearly every trace to a new shard.
+ strat, err := d.Config.GetPeerManagementStrategy()
+ if err != nil {
+ return errors.Wrap(err, "failed to get peer management strategy")
+ }
+ switch strat {
+ case "legacy", "":
+ d.shardFunc = d.WhichShardLegacy
+ case "hash":
+ d.shardFunc = d.WhichShardHashed
+ default:
+ return fmt.Errorf("unknown PeerManagementStrategy '%s'", strat)
+ }
+
// Try up to 5 times to find myself in the peer list before giving up
var found bool
var selfIndexIntoPeerList int
@@ -106,7 +151,9 @@ func (d *DeterministicSharder) Start() error {
}
// get my listen address for peer traffic for the Port number
- listenAddr, err := d.Config.GetPeerListenAddr()
+ //listenAddr, err := d.Config.GetPeerListenAddr() //Temporarily removed http peer listen addr, only grpc listener
+ listenAddr, err := d.Config.GetGRPCPeerListenAddr()
+
if err != nil {
return errors.Wrap(err, "failed to get listen addr config")
}
@@ -116,33 +163,58 @@ func (d *DeterministicSharder) Start() error {
}
d.Logger.Debug().Logf("picked up local peer port of %s", localPort)
- // get my local interfaces
- localAddrs, err := net.InterfaceAddrs()
- if err != nil {
- return errors.Wrap(err, "failed to get local interface list to initialize sharder")
+ var localIPs []string
+
+ // If RedisIdentifier is an IP, use as localIPs value.
+ if redisIdentifier, err := d.Config.GetRedisIdentifier(); err == nil && redisIdentifier != "" {
+ if ip := net.ParseIP(redisIdentifier); ip != nil {
+ d.Logger.Debug().Logf("Using RedisIdentifier as public IP: %s", redisIdentifier)
+ localIPs = []string{redisIdentifier}
+ }
+ }
+
+ // Otherwise, get my local interfaces' IPs.
+ if len(localIPs) == 0 {
+ localAddrs, err := net.InterfaceAddrs()
+ if err != nil {
+ return errors.Wrap(err, "failed to get local interface list to initialize sharder")
+ }
+ localIPs = make([]string, len(localAddrs))
+ for i, addr := range localAddrs {
+ addrStr := addr.String()
+ ip, _, err := net.ParseCIDR(addrStr)
+ if err != nil {
+ return errors.Wrap(err, fmt.Sprintf("failed to parse CIDR for local IP %s", addrStr))
+ }
+ localIPs[i] = ip.String()
+ }
}
// go through peer list, resolve each address, see if any of them match any
- // local interface. Note that this assumes only one instance of Refinery per
+ // local interface. Note that this assumes only one instance of tracing-proxy per
// host can run.
for i, peerShard := range d.peers {
- d.Logger.Debug().WithField("peer", peerShard).WithField("self", localAddrs).Logf("Considering peer looking for self")
+ d.Logger.Debug().WithFields(logrus.Fields{
+ "peer": peerShard,
+ "self": localIPs,
+ }).Logf("Considering peer looking for self")
peerIPList, err := net.LookupHost(peerShard.ipOrHost)
if err != nil {
// TODO something better than fail to start if peer is missing
return errors.Wrap(err, fmt.Sprintf("couldn't resolve peer hostname %s", peerShard.ipOrHost))
}
for _, peerIP := range peerIPList {
- for _, localIP := range localAddrs {
- ipAddr, _, err := net.ParseCIDR(localIP.String())
- if err != nil {
- return errors.Wrap(err, fmt.Sprintf("failed to parse CIDR for local IP %s", localIP.String()))
- }
- if peerIP == ipAddr.String() {
+ for _, ipAddr := range localIPs {
+ if peerIP == ipAddr {
if peerShard.port == localPort {
d.Logger.Debug().WithField("peer", peerShard).Logf("Found myself in peer list")
found = true
selfIndexIntoPeerList = i
+ } else {
+ d.Logger.Debug().WithFields(logrus.Fields{
+ "peer": peerShard,
+ "expectedPort": localPort,
+ }).Logf("Peer port mismatch")
}
}
}
@@ -178,9 +250,10 @@ func (d *DeterministicSharder) loadPeerList() error {
return errors.New("refusing to load empty peer list")
}
- // turn my peer list into a list of shards
- newPeers := make([]*DetShard, 0, len(peerList))
- for _, peer := range peerList {
+ // turn the peer list into a list of shards
+ // and a list of hashes
+ newPeers := make([]*DetShard, len(peerList))
+ for ix, peer := range peerList {
peerURL, err := url.Parse(peer)
if err != nil {
return errors.Wrap(err, "couldn't parse peer as a URL")
@@ -190,13 +263,43 @@ func (d *DeterministicSharder) loadPeerList() error {
ipOrHost: peerURL.Hostname(),
port: peerURL.Port(),
}
- newPeers = append(newPeers, peerShard)
+ newPeers[ix] = peerShard
}
- // the redis peer discovery already sorts its content. Does every backend?
- // well, it's not too much work, let's sort it one more time.
+ // make sure the list is in a stable, comparable order
sort.Sort(SortableShardList(newPeers))
+ // In general, the variation in the traffic assigned to a randomly partitioned space is
+ // controlled by the number of partitions. PartitionCount controls the minimum number
+ // of partitions used to control node assignment when we use the "hash" strategy.
+ // When there's a small number of partitions, the two-layer hash strategy can end up giving
+ // one partition a disproportionate fraction of the traffic. So we create a large number of
+ // random partitions and then assign (potentially) multiple partitions to individual nodes.
+ // We're asserting that if we randomly divide the space among at this many partitions, the variation
+ // between them is likely to be acceptable. (As this is random, there might be exceptions.)
+ // The reason not to make this value much larger, say 1000, is that finding the right partition
+ // is linear -- O(number of partitions) and so we want it to be as small as possible
+ // while still being big enough.
+ // PartitionCount, therefore, is the smallest value that we believe will yield reasonable
+ // distribution between nodes. We divide it by the number of nodes using integer division
+ // and add 1 to get partitionsPerPeer. We then actually create (nNodes*partitionsPerPeer)
+ // partitions, which will always be greater than or equal to partitionCount.
+ // Examples: if we have 6 nodes, then partitionsPerPeer will be 9, and we will create
+ // 54 partitions. If we have 85 nodes, then partitionsPerPeer will be 1, and we will create
+ // 85 partitions.
+ const partitionCount = 50
+ // now build the hash list;
+ // We make a list of hash value and an index to a peer.
+ hashes := make([]hashShard, 0)
+ partitionsPerPeer := partitionCount/len(peerList) + 1
+ for ix := range newPeers {
+ hashes = append(hashes, newPeers[ix].GetHashesFor(ix, partitionsPerPeer, peerSeed)...)
+ }
+ // now sort the hash list by hash value so we can search it efficiently
+ sort.Slice(hashes, func(i, j int) bool {
+ return hashes[i].uhash < hashes[j].uhash
+ })
+
// if the peer list changed, load the new list
d.peerLock.RLock()
if !SortableShardList(d.peers).Equals(newPeers) {
@@ -204,6 +307,7 @@ func (d *DeterministicSharder) loadPeerList() error {
d.peerLock.RUnlock()
d.peerLock.Lock()
d.peers = newPeers
+ d.hashes = hashes
d.peerLock.Unlock()
} else {
d.peerLock.RUnlock()
@@ -216,22 +320,58 @@ func (d *DeterministicSharder) MyShard() Shard {
}
func (d *DeterministicSharder) WhichShard(traceID string) Shard {
+ return d.shardFunc(traceID)
+}
+
+// WhichShardLegacy is the original sharding decider. It uses sha1, which is
+// slow and not well-distributed, and also simply partitions the sharding
+// space into N evenly-divided buckets, which means that on every change in
+// shard count, half of the traces get reassigned (which leads to broken traces).
+// We leave it here to avoid disrupting things and provide a fallback if needed,
+// but the intent is eventually to delete this.
+func (d *DeterministicSharder) WhichShardLegacy(traceID string) Shard {
d.peerLock.RLock()
defer d.peerLock.RUnlock()
// add in the sharding salt to ensure the sh1sum is spread differently from
// others that use the same algorithm
sum := sha1.Sum([]byte(traceID + shardingSalt))
- v := bytesToUint32be(sum[:4])
+ v := binary.BigEndian.Uint32(sum[:4])
portion := math.MaxUint32 / len(d.peers)
index := v / uint32(portion)
+ // #454 -- index can get out of range if v is close to 0xFFFFFFFF and portion would be non-integral.
+ // Consider revisiting this with a different sharding mechanism if we rework our scaling behavior.
+ if index >= uint32(len(d.peers)) {
+ index = 0
+ }
+
return d.peers[index]
}
-// bytesToUint32 takes a slice of 4 bytes representing a big endian 32 bit
-// unsigned value and returns the equivalent uint32.
-func bytesToUint32be(b []byte) uint32 {
- return uint32(b[3]) | (uint32(b[2]) << 8) | (uint32(b[1]) << 16) | (uint32(b[0]) << 24)
+// WhichShardHashed calculates which shard we want by keeping a list of partitions. Each
+// partition has a different hash value and a map from partition to a given shard.
+// We take the traceID and calculate a hash for each partition, using the partition
+// hash as the seed for the trace hash. Whichever one has the highest value is the
+// partition we use, which determines the shard we use.
+// This is O(N) where N is the number of partitions, but because we use an efficient hash,
+// (as opposed to SHA1) it executes in 1 uSec for 50 partitions, so it works out to about
+// the same cost as the legacy sharder.
+func (d *DeterministicSharder) WhichShardHashed(traceID string) Shard {
+ d.peerLock.RLock()
+ defer d.peerLock.RUnlock()
+
+ tid := []byte(traceID)
+
+ bestix := 0
+ var maxHash uint64
+ for _, hash := range d.hashes {
+ h := wyhash.Hash(tid, hash.uhash)
+ if h > maxHash {
+ maxHash = h
+ bestix = hash.shardIndex
+ }
+ }
+ return d.peers[bestix]
}
diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go
index 49336a8f5e..da44379dd9 100644
--- a/sharder/deterministic_test.go
+++ b/sharder/deterministic_test.go
@@ -1,13 +1,14 @@
-// +build all race
-
package sharder
import (
+ "context"
+ "fmt"
+ "math/rand"
"testing"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/internal/peer"
- "github.com/honeycombio/refinery/logger"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/internal/peer"
+ "github.com/opsramp/tracing-proxy/logger"
"github.com/stretchr/testify/assert"
)
@@ -27,7 +28,52 @@ func TestWhichShard(t *testing.T) {
GetPeersVal: peers,
PeerManagementType: "file",
}
- filePeers, err := peer.NewPeers(config)
+ done := make(chan struct{})
+ defer close(done)
+ filePeers, err := peer.NewPeers(context.Background(), config, done)
+ assert.Equal(t, nil, err)
+ sharder := DeterministicSharder{
+ Config: config,
+ Logger: &logger.NullLogger{},
+ Peers: filePeers,
+ }
+
+ assert.NoError(t, sharder.Start(),
+ "starting deterministic sharder should not error")
+
+ shard := sharder.WhichShard(traceID)
+ assert.Contains(t, peers, shard.GetAddress(),
+ "should select a peer for a trace")
+
+ config.GetPeersVal = []string{}
+ config.ReloadConfig()
+ assert.Equal(t, shard.GetAddress(), sharder.WhichShard(traceID).GetAddress(),
+ "should select the same peer if peer list becomes empty")
+}
+
+func TestWhichShardAtEdge(t *testing.T) {
+ const (
+ selfAddr = "127.0.0.1:8081"
+ traceID = "RCIVNUNA" // carefully chosen (by trying over a billion times) to hash in WhichShard to 0xFFFFFFFF
+ )
+
+ // The algorithm in WhichShard works correctly for divisors of 2^32-1. The prime factorization of that includes
+ // 1, 3, 5, 17, so we need something other than 3 to be sure that this test would fail.
+ // It was tested (and failed) without the additional conditional.
+ peers := []string{
+ "http://" + selfAddr,
+ "http://2.2.2.2:8081",
+ "http://3.3.3.3:8081",
+ "http://4.4.4.4:8081",
+ }
+ config := &config.MockConfig{
+ GetPeerListenAddrVal: selfAddr,
+ GetPeersVal: peers,
+ PeerManagementType: "file",
+ }
+ done := make(chan struct{})
+ defer close(done)
+ filePeers, err := peer.NewPeers(context.Background(), config, done)
assert.Equal(t, nil, err)
sharder := DeterministicSharder{
Config: config,
@@ -47,3 +93,313 @@ func TestWhichShard(t *testing.T) {
assert.Equal(t, shard.GetAddress(), sharder.WhichShard(traceID).GetAddress(),
"should select the same peer if peer list becomes empty")
}
+
+// GenID returns a random hex string of length numChars
+func GenID(numChars int) string {
+ const charset = "abcdef0123456789"
+
+ id := make([]byte, numChars)
+ for i := 0; i < numChars; i++ {
+ id[i] = charset[rand.Intn(len(charset))]
+ }
+ return string(id)
+}
+
+func BenchmarkShardBulk(b *testing.B) {
+ const (
+ selfAddr = "127.0.0.1:8081"
+ traceID = "test"
+ )
+
+ const npeers = 11
+ peers := []string{
+ "http://" + selfAddr,
+ }
+ for i := 1; i < npeers; i++ {
+ peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i))
+ }
+ config := &config.MockConfig{
+ GetPeerListenAddrVal: selfAddr,
+ GetPeersVal: peers,
+ PeerManagementType: "file",
+ PeerManagementStrategy: "legacy",
+ }
+ done := make(chan struct{})
+ defer close(done)
+ filePeers, err := peer.NewPeers(context.Background(), config, done)
+ assert.Equal(b, nil, err)
+ sharder := DeterministicSharder{
+ Config: config,
+ Logger: &logger.NullLogger{},
+ Peers: filePeers,
+ }
+
+ assert.NoError(b, sharder.Start(), "starting deterministic sharder should not error")
+
+ const ntraces = 10
+ ids := make([]string, ntraces)
+ for i := 0; i < ntraces; i++ {
+ ids[i] = GenID(32)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ sharder.WhichShard(ids[i%ntraces])
+ }
+}
+
+func TestShardBulk(t *testing.T) {
+ const (
+ selfAddr = "127.0.0.1:8081"
+ traceID = "test"
+ )
+
+ // this test should work for all strategies and a wide range of peer counts
+ for _, strategy := range []string{"legacy", "hash"} {
+ for i := 0; i < 5; i++ {
+ npeers := i*10 + 5
+ t.Run(fmt.Sprintf("bulk npeers=%d", npeers), func(t *testing.T) {
+ peers := []string{
+ "http://" + selfAddr,
+ }
+ for i := 1; i < npeers; i++ {
+ peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i))
+ }
+
+ config := &config.MockConfig{
+ GetPeerListenAddrVal: selfAddr,
+ GetPeersVal: peers,
+ PeerManagementType: "file",
+ PeerManagementStrategy: strategy,
+ }
+ done := make(chan struct{})
+ defer close(done)
+ filePeers, err := peer.NewPeers(context.Background(), config, done)
+ assert.Equal(t, nil, err)
+ sharder := DeterministicSharder{
+ Config: config,
+ Logger: &logger.NullLogger{},
+ Peers: filePeers,
+ }
+
+ assert.NoError(t, sharder.Start(), "starting sharder should not error")
+
+ const ntraces = 1000
+ ids := make([]string, ntraces)
+ for i := 0; i < ntraces; i++ {
+ ids[i] = GenID(32)
+ }
+
+ results := make(map[string]int)
+ for i := 0; i < ntraces; i++ {
+ s := sharder.WhichShardHashed(ids[i])
+ results[s.GetAddress()]++
+ }
+ min := ntraces
+ max := 0
+ for _, r := range results {
+ if r < min {
+ min = r
+ }
+ if r > max {
+ max = r
+ }
+ }
+
+ // This is probabalistic, so could fail, but shouldn't be flaky as long as
+ // expectedResult is at least 20 or so.
+ expectedResult := ntraces / npeers
+ assert.Greater(t, expectedResult*2, max, "expected smaller max, got %d: %v", max, results)
+ assert.NotEqual(t, 0, min, "expected larger min, got %d: %v", min, results)
+ })
+ }
+ }
+}
+
+func TestShardDrop(t *testing.T) {
+ const (
+ selfAddr = "127.0.0.1:8081"
+ traceID = "test"
+ )
+
+ for i := 0; i < 5; i++ {
+ npeers := i*10 + 5
+ t.Run(fmt.Sprintf("drop npeers=%d", npeers), func(t *testing.T) {
+ peers := []string{
+ "http://" + selfAddr,
+ }
+ for i := 1; i < npeers; i++ {
+ peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i))
+ }
+
+ config := &config.MockConfig{
+ GetPeerListenAddrVal: selfAddr,
+ GetPeersVal: peers,
+ PeerManagementType: "file",
+ PeerManagementStrategy: "hash",
+ }
+ done := make(chan struct{})
+ defer close(done)
+ filePeers, err := peer.NewPeers(context.Background(), config, done)
+ assert.Equal(t, nil, err)
+ sharder := DeterministicSharder{
+ Config: config,
+ Logger: &logger.NullLogger{},
+ Peers: filePeers,
+ }
+
+ assert.NoError(t, sharder.Start(), "starting sharder should not error")
+
+ type placement struct {
+ id string
+ shard string
+ }
+
+ const ntraces = 1000
+ placements := make([]placement, ntraces)
+ for i := 0; i < ntraces; i++ {
+ placements[i].id = GenID(32)
+ }
+
+ results := make(map[string]int)
+ for i := 0; i < ntraces; i++ {
+ s := sharder.WhichShard(placements[i].id)
+ results[s.GetAddress()]++
+ placements[i].shard = s.GetAddress()
+ }
+
+ // reach in and delete one of the peers, then reshard
+ config.GetPeersVal = config.GetPeersVal[1:]
+ sharder.loadPeerList()
+
+ results = make(map[string]int)
+ nDiff := 0
+ for i := 0; i < ntraces; i++ {
+ s := sharder.WhichShardHashed(placements[i].id)
+ results[s.GetAddress()]++
+ if s.GetAddress() != placements[i].shard {
+ nDiff++
+ }
+ }
+
+ expected := ntraces / (npeers - 1)
+ assert.Greater(t, expected*2, nDiff)
+ assert.Less(t, expected/2, nDiff)
+ })
+ }
+}
+
+func TestShardAddHash(t *testing.T) {
+ const (
+ selfAddr = "127.0.0.1:8081"
+ traceID = "test"
+ )
+
+ for i := 0; i < 5; i++ {
+ npeers := i*10 + 7
+ t.Run(fmt.Sprintf("add npeers=%d", npeers), func(t *testing.T) {
+ peers := []string{
+ "http://" + selfAddr,
+ }
+ for i := 1; i < npeers; i++ {
+ peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i))
+ }
+
+ config := &config.MockConfig{
+ GetPeerListenAddrVal: selfAddr,
+ GetPeersVal: peers,
+ PeerManagementType: "file",
+ PeerManagementStrategy: "hash",
+ }
+ done := make(chan struct{})
+ defer close(done)
+ filePeers, err := peer.NewPeers(context.Background(), config, done)
+ assert.Equal(t, nil, err)
+ sharder := DeterministicSharder{
+ Config: config,
+ Logger: &logger.NullLogger{},
+ Peers: filePeers,
+ }
+
+ assert.NoError(t, sharder.Start(), "starting sharder should not error")
+
+ type placement struct {
+ id string
+ shard string
+ }
+
+ const ntraces = 1000
+ placements := make([]placement, ntraces)
+ for i := 0; i < ntraces; i++ {
+ placements[i].id = GenID(32)
+ }
+
+ results := make(map[string]int)
+ for i := 0; i < ntraces; i++ {
+ s := sharder.WhichShardHashed(placements[i].id)
+ results[s.GetAddress()]++
+ placements[i].shard = s.GetAddress()
+ }
+
+ // reach in and add a peer, then reshard
+ config.GetPeersVal = append(config.GetPeersVal, "http://2.2.2.255/:8081")
+ sharder.loadPeerList()
+
+ results = make(map[string]int)
+ nDiff := 0
+ for i := 0; i < ntraces; i++ {
+ s := sharder.WhichShard(placements[i].id)
+ results[s.GetAddress()]++
+ if s.GetAddress() != placements[i].shard {
+ nDiff++
+ }
+ }
+ expected := ntraces / (npeers - 1)
+ assert.Greater(t, expected*2, nDiff)
+ assert.Less(t, expected/2, nDiff)
+ })
+ }
+}
+
+func BenchmarkDeterministicShard(b *testing.B) {
+ const (
+ selfAddr = "127.0.0.1:8081"
+ traceID = "test"
+ )
+ for _, strat := range []string{"legacy", "hash"} {
+ for i := 0; i < 5; i++ {
+ npeers := i*10 + 4
+ b.Run(fmt.Sprintf("benchmark_deterministic_%s_%d", strat, npeers), func(b *testing.B) {
+ peers := []string{
+ "http://" + selfAddr,
+ }
+ for i := 1; i < npeers; i++ {
+ peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i))
+ }
+ config := &config.MockConfig{
+ GetPeerListenAddrVal: selfAddr,
+ GetPeersVal: peers,
+ PeerManagementType: "file",
+ PeerManagementStrategy: strat,
+ }
+ done := make(chan struct{})
+ defer close(done)
+ filePeers, err := peer.NewPeers(context.Background(), config, done)
+ assert.Equal(b, nil, err)
+ sharder := DeterministicSharder{
+ Config: config,
+ Logger: &logger.NullLogger{},
+ Peers: filePeers,
+ }
+
+ assert.NoError(b, sharder.Start(),
+ "starting deterministic sharder should not error")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ sharder.WhichShard(traceID)
+ }
+ })
+ }
+ }
+}
diff --git a/sharder/sharder.go b/sharder/sharder.go
index 5349ec792b..1df37257bd 100644
--- a/sharder/sharder.go
+++ b/sharder/sharder.go
@@ -4,14 +4,14 @@ import (
"fmt"
"os"
- "github.com/honeycombio/refinery/config"
+ "github.com/opsramp/tracing-proxy/config"
)
-// Shard repreesents a single instance of Refinery.
+// Shard repreesents a single instance of tracing-proxy.
type Shard interface {
Equals(Shard) bool
// GetAddress returns a string suitable for use in building a URL, eg
- // http://refinery-1234:8080 or https://10.2.3.4
+ // http://tracing-proxy-1234:8080 or https://10.2.3.4
GetAddress() string
}
diff --git a/sharder/sharder_test.go b/sharder/sharder_test.go
index f2a5fde6ee..d286617db2 100644
--- a/sharder/sharder_test.go
+++ b/sharder/sharder_test.go
@@ -1,3 +1 @@
-// +build all race
-
package sharder
diff --git a/sharder/single.go b/sharder/single.go
index e2003a29df..8dd1072625 100644
--- a/sharder/single.go
+++ b/sharder/single.go
@@ -1,7 +1,7 @@
package sharder
import (
- "github.com/honeycombio/refinery/logger"
+ "github.com/opsramp/tracing-proxy/logger"
)
// SingleShard implements the Shard interface
diff --git a/start.sh b/start.sh
new file mode 100755
index 0000000000..9dc43a9b7b
--- /dev/null
+++ b/start.sh
@@ -0,0 +1,127 @@
+#!/bin/bash
+
+CLUSTERINFO_PATH='/config/data/infra_clusterinfo.json'
+ELASTICACHE_PATH='/config/data/infra_elasticache.json'
+
+# Sample Format for ${ELASTICACHE_PATH}
+# {
+# "elasticache": {
+# "host": "some_url",
+# "host_ro": "some_url",
+# "port": 6379,
+# "username": "test_user",
+# "password": "xxxxxx",
+# "tls_mode": true,
+# "cluster_mode": "false"
+# }
+# }
+# Sample Format for ${ELASTICACHE_PATH} in case of multi region
+# {
+# "elasticache": [{
+# "us-west-2": {
+# "host": "some_url",
+# "host_ro": "some_url",
+# "port": 6379,
+# "username": "",
+# "password": "xxxxxx",
+# "tls_mode": true,
+# "cluster_mode": "false"
+# },
+# "us-east-2": {
+# "host": "some_url",
+# "host_ro": "some_url",
+# "port": 6379,
+# "username": "",
+# "password": "xxxxxx",
+# "tls_mode": true,
+# "cluster_mode": "false"
+# }
+# }]
+# }
+
+OPSRAMP_CREDS_PATH='/config/data/config_tracing-proxy.json'
+
+# Sample Format for ${OPSRAMP_CREDS_PATH}
+# {
+# "tracing-proxy": {
+# "traces_api": "test.opsramp.net",
+# "metrics_api": "test.opsramp.net",
+# "auth_api": "test.opsramp.net",
+# "key": "sdjfnsakdflasdflksjdkfjsdklfjals",
+# "secret": "***REMOVED***",
+# "tenant_id": "123e-fsdf-4r234r-dfbfsdbg"
+# }
+# }
+
+
+TRACE_PROXY_CONFIG='/etc/tracing-proxy/final_config.yaml'
+TRACE_PROXY_RULES='/etc/tracing-proxy/final_rules.yaml'
+
+# make copy of the config.yaml & rules.yaml to make sure it works if config maps are mounted
+cp /etc/tracing-proxy/config.yaml ${TRACE_PROXY_CONFIG}
+cp /etc/tracing-proxy/rules.yaml ${TRACE_PROXY_RULES}
+
+if [ -r ${CLUSTERINFO_PATH} ]; then
+
+ CURRENT_REGION=$(jq <${CLUSTERINFO_PATH} -r .clusterinfo.CURRENT_REGION)
+ READ_WRITE_REGION=$(jq <${CLUSTERINFO_PATH} -r .clusterinfo.READ_WRITE_REGION)
+
+ while [ "${CURRENT_REGION}" != "${READ_WRITE_REGION}" ]; do sleep 30; done
+fi
+
+if [ -r ${ELASTICACHE_PATH} ]; then
+ # check if the configuration is a object or array
+ TYPE=$(jq <${ELASTICACHE_PATH} -r .elasticache | jq 'if type=="array" then true else false end')
+ if [ "${TYPE}" = true ]; then
+
+ if [ -r ${CLUSTERINFO_PATH} ]; then
+
+ CURRENT_REGION=$(jq <${CLUSTERINFO_PATH} -r .clusterinfo.CURRENT_REGION)
+
+ CREDS=$(jq <${ELASTICACHE_PATH} -r .elasticache[0].\""${CURRENT_REGION}"\")
+
+ REDIS_HOST=$(echo "${CREDS}" | jq -r '(.host)+":"+(.port|tostring)')
+ REDIS_USERNAME=$(echo "${CREDS}" | jq -r .username)
+ REDIS_PASSWORD=$(echo "${CREDS}" | jq -r .password)
+ REDIS_TLS_MODE=$(echo "${CREDS}" | jq -r .tls_mode | tr '[:upper:]' '[:lower:]')
+
+ sed -i "s//${REDIS_HOST}/g" ${TRACE_PROXY_CONFIG}
+ sed -i "s//${REDIS_USERNAME}/g" ${TRACE_PROXY_CONFIG}
+ sed -i "s//${REDIS_PASSWORD}/g" ${TRACE_PROXY_CONFIG}
+ sed -i "s//${REDIS_TLS_MODE}/g" ${TRACE_PROXY_CONFIG}
+
+ fi
+
+
+ else
+ REDIS_HOST=$(jq <${ELASTICACHE_PATH} -r '(.elasticache.host)+":"+(.elasticache.port|tostring)')
+ REDIS_USERNAME=$(jq <${ELASTICACHE_PATH} -r .elasticache.username)
+ REDIS_PASSWORD=$(jq <${ELASTICACHE_PATH} -r .elasticache.password)
+ REDIS_TLS_MODE=$(jq <${ELASTICACHE_PATH} -r .elasticache.tls_mode | tr '[:upper:]' '[:lower:]')
+
+ sed -i "s//${REDIS_HOST}/g" ${TRACE_PROXY_CONFIG}
+ sed -i "s//${REDIS_USERNAME}/g" ${TRACE_PROXY_CONFIG}
+ sed -i "s//${REDIS_PASSWORD}/g" ${TRACE_PROXY_CONFIG}
+ sed -i "s//${REDIS_TLS_MODE}/g" ${TRACE_PROXY_CONFIG}
+ fi
+fi
+
+if [ -r ${OPSRAMP_CREDS_PATH} ]; then
+
+ TRACES_API=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".traces_api')
+ METRICS_API=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".metrics_api')
+ AUTH_API=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".auth_api')
+ KEY=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".key')
+ SECRET=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".secret')
+ TENANT_ID=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".tenant_id')
+
+ sed -i "s**${TRACES_API}*g" ${TRACE_PROXY_CONFIG}
+ sed -i "s**${METRICS_API}*g" ${TRACE_PROXY_CONFIG}
+ sed -i "s**${AUTH_API}*g" ${TRACE_PROXY_CONFIG}
+ sed -i "s//${KEY}/g" ${TRACE_PROXY_CONFIG}
+ sed -i "s//${SECRET}/g" ${TRACE_PROXY_CONFIG}
+ sed -i "s//${TENANT_ID}/g" ${TRACE_PROXY_CONFIG}
+fi
+
+# start the application
+exec /usr/bin/tracing-proxy -c /etc/tracing-proxy/final_config.yaml -r /etc/tracing-proxy/final_rules.yaml
diff --git a/tools/loadtest/.gitignore b/tools/loadtest/.gitignore
new file mode 100644
index 0000000000..af7a074c2b
--- /dev/null
+++ b/tools/loadtest/.gitignore
@@ -0,0 +1,4 @@
+.direnv
+.tool-versions
+__*
+.DS_Store
\ No newline at end of file
diff --git a/transmit/mock.go b/transmit/mock.go
index c018131a96..5a3fe6bcc0 100644
--- a/transmit/mock.go
+++ b/transmit/mock.go
@@ -3,7 +3,7 @@ package transmit
import (
"sync"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/types"
)
type MockTransmission struct {
diff --git a/transmit/transmit.go b/transmit/transmit.go
index 280009656d..e8fd7476ac 100644
--- a/transmit/transmit.go
+++ b/transmit/transmit.go
@@ -2,19 +2,21 @@ package transmit
import (
"context"
+ "fmt"
+ "os"
"sync"
- libhoney "github.com/honeycombio/libhoney-go"
- "github.com/honeycombio/libhoney-go/transmission"
+ "github.com/opsramp/libtrace-go"
+ "github.com/opsramp/libtrace-go/transmission"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
- "github.com/honeycombio/refinery/types"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
+ "github.com/opsramp/tracing-proxy/types"
)
type Transmission interface {
- // Enqueue accepts a single event and schedules it for transmission to Honeycomb
+ // Enqueue accepts a single event and schedules it for transmission to Opsramp
EnqueueEvent(ev *types.Event)
EnqueueSpan(ev *types.Span)
// Flush flushes the in-flight queue of all events and spans
@@ -22,23 +24,22 @@ type Transmission interface {
}
const (
- counterEnqueueErrors = "enqueue_errors"
- counterResponse20x = "response_20x"
- counterResponseErrorsAPI = "response_errors_api"
- counterResponseErrorsPeer = "response_errors_peer"
+ counterEnqueueErrors = "enqueue_errors"
+ counterResponse20x = "response_20x"
+ counterResponseErrors = "response_errors"
)
type DefaultTransmission struct {
Config config.Config `inject:""`
Logger logger.Logger `inject:""`
- Metrics metrics.Metrics `inject:""`
+ Metrics metrics.Metrics `inject:"metrics"`
Version string `inject:"version"`
- LibhClient *libhoney.Client
+ LibhClient *libtrace.Client
// Type is peer or upstream, and used only for naming metrics
Name string
- builder *libhoney.Builder
+ builder *libtrace.Builder
responseCanceler context.CancelFunc
}
@@ -50,26 +51,52 @@ func (d *DefaultTransmission) Start() error {
// upstreamAPI doesn't get set when the client is initialized, because
// it can be reloaded from the config file while live
- upstreamAPI, err := d.Config.GetHoneycombAPI()
+ upstreamAPI, err := d.Config.GetOpsrampAPI()
if err != nil {
return err
}
+
+ if d.Config.GetAddHostMetadataToTrace() {
+ if hostname, err := os.Hostname(); err == nil && hostname != "" {
+ // add hostname to spans
+ d.LibhClient.AddResourceField("meta.local_hostname", hostname)
+ }
+ }
+ for key, value := range d.Config.GetAddAdditionalMetadata() {
+ if !d.LibhClient.CheckResourceField(key) {
+ d.LibhClient.AddResourceField(key, value)
+ }
+ }
+
d.builder = d.LibhClient.NewBuilder()
d.builder.APIHost = upstreamAPI
once.Do(func() {
- libhoney.UserAgentAddition = "refinery/" + d.Version
+ libtrace.UserAgentAddition = "tracing-proxy/" + d.Version
})
d.Metrics.Register(d.Name+counterEnqueueErrors, "counter")
d.Metrics.Register(d.Name+counterResponse20x, "counter")
- d.Metrics.Register(d.Name+counterResponseErrorsAPI, "counter")
- d.Metrics.Register(d.Name+counterResponseErrorsPeer, "counter")
+ d.Metrics.Register(d.Name+counterResponseErrors, "counter")
processCtx, canceler := context.WithCancel(context.Background())
d.responseCanceler = canceler
go d.processResponses(processCtx, d.LibhClient.TxResponses())
+ // get proxy details
+ proxyConfig := d.Config.GetProxyConfig()
+
+ proxyUrl := ""
+ if proxyConfig.Host != "" && proxyConfig.Protocol != "" {
+ proxyUrl = fmt.Sprintf("%s://%s:%d/", proxyConfig.Protocol, proxyConfig.Host, proxyConfig.Port)
+ if proxyConfig.Username != "" && proxyConfig.Password != "" {
+ proxyUrl = fmt.Sprintf("%s://%s:%s@%s:%d", proxyConfig.Protocol, proxyConfig.Username, proxyConfig.Password, proxyConfig.Host, proxyConfig.Port)
+ d.Logger.Debug().Logf("Using Authentication for ProxyConfiguration Communication for Traces")
+ }
+ os.Setenv("HTTPS_PROXY", proxyUrl)
+ os.Setenv("HTTP_PROXY", proxyUrl)
+ }
+
// listen for config reloads
d.Config.RegisterReloadCallback(d.reloadTransmissionBuilder)
return nil
@@ -77,10 +104,10 @@ func (d *DefaultTransmission) Start() error {
func (d *DefaultTransmission) reloadTransmissionBuilder() {
d.Logger.Debug().Logf("reloading transmission config")
- upstreamAPI, err := d.Config.GetHoneycombAPI()
+ upstreamAPI, err := d.Config.GetOpsrampAPI()
if err != nil {
// log and skip reload
- d.Logger.Error().Logf("Failed to reload Honeycomb API when reloading configs:", err)
+ d.Logger.Error().Logf("Failed to reload Opsramp API when reloading configs:", err)
}
builder := d.LibhClient.NewBuilder()
builder.APIHost = upstreamAPI
@@ -94,10 +121,25 @@ func (d *DefaultTransmission) EnqueueEvent(ev *types.Event) {
Logf("transmit sending event")
libhEv := d.builder.NewEvent()
libhEv.APIHost = ev.APIHost
- libhEv.WriteKey = ev.APIKey
+ //libhEv.WriteKey = ev.APIKey
libhEv.Dataset = ev.Dataset
libhEv.SampleRate = ev.SampleRate
libhEv.Timestamp = ev.Timestamp
+ libhEv.APIToken = ev.APIToken
+ libhEv.APITenantId = ev.APITenantId
+ // metadata is used to make error logs more helpful when processing libhoney responses
+ metadata := map[string]any{
+ "api_host": ev.APIHost,
+ "dataset": ev.Dataset,
+ "environment": ev.Environment,
+ }
+
+ for _, k := range d.Config.GetAdditionalErrorFields() {
+ if v, ok := ev.Data[k]; ok {
+ metadata[k] = v
+ }
+ }
+ libhEv.Metadata = metadata
for k, v := range ev.Data {
libhEv.AddField(k, v)
@@ -105,12 +147,13 @@ func (d *DefaultTransmission) EnqueueEvent(ev *types.Event) {
err := libhEv.SendPresampled()
if err != nil {
- d.Metrics.IncrementCounter(d.Name + counterEnqueueErrors)
+ d.Metrics.Increment(d.Name + counterEnqueueErrors)
d.Logger.Error().
WithString("error", err.Error()).
WithField("request_id", ev.Context.Value(types.RequestIDContextKey{})).
WithString("dataset", ev.Dataset).
WithString("api_host", ev.APIHost).
+ WithString("environment", ev.Environment).
Logf("failed to enqueue event")
}
}
@@ -138,39 +181,34 @@ func (d *DefaultTransmission) processResponses(
ctx context.Context,
responses chan transmission.Response,
) {
- honeycombAPI, _ := d.Config.GetHoneycombAPI()
for {
select {
case r := <-responses:
if r.Err != nil || r.StatusCode > 202 {
- var apiHost, dataset, evType, target string
- if metadata, ok := r.Metadata.(map[string]string); ok {
- apiHost = metadata["api_host"]
- dataset = metadata["dataset"]
- evType = metadata["type"]
- target = metadata["target"]
+ var apiHost, dataset, environment string
+ if metadata, ok := r.Metadata.(map[string]any); ok {
+ apiHost = metadata["api_host"].(string)
+ dataset = metadata["dataset"].(string)
+ environment = metadata["environment"].(string)
}
log := d.Logger.Error().WithFields(map[string]interface{}{
"status_code": r.StatusCode,
"api_host": apiHost,
"dataset": dataset,
- "event_type": evType,
- "target": target,
+ "environment": environment,
})
+ for _, k := range d.Config.GetAdditionalErrorFields() {
+ if v, ok := r.Metadata.(map[string]any)[k]; ok {
+ log = log.WithField(k, v)
+ }
+ }
if r.Err != nil {
log = log.WithField("error", r.Err.Error())
}
- log.Logf("non-20x response when sending event")
- if honeycombAPI == apiHost {
- // if the API host matches the configured honeycomb API,
- // count it as an API error
- d.Metrics.IncrementCounter(d.Name + counterResponseErrorsAPI)
- } else {
- // otherwise, it's probably a peer error
- d.Metrics.IncrementCounter(d.Name + counterResponseErrorsPeer)
- }
+ log.Logf("error when sending event")
+ d.Metrics.Increment(d.Name + counterResponseErrors)
} else {
- d.Metrics.IncrementCounter(d.Name + counterResponse20x)
+ d.Metrics.Increment(d.Name + counterResponse20x)
}
case <-ctx.Done():
return
diff --git a/transmit/transmit_test.go b/transmit/transmit_test.go
index 3d35fc2303..a662828163 100644
--- a/transmit/transmit_test.go
+++ b/transmit/transmit_test.go
@@ -1,15 +1,14 @@
-// +build all race
-
package transmit
import (
"testing"
- "github.com/honeycombio/refinery/config"
- "github.com/honeycombio/refinery/logger"
- "github.com/honeycombio/refinery/metrics"
+ "github.com/facebookgo/inject"
+ "github.com/opsramp/tracing-proxy/config"
+ "github.com/opsramp/tracing-proxy/logger"
+ "github.com/opsramp/tracing-proxy/metrics"
- libhoney "github.com/honeycombio/libhoney-go"
+ "github.com/opsramp/libtrace-go"
"github.com/stretchr/testify/assert"
)
@@ -18,12 +17,30 @@ func TestDefaultTransmissionUpdatesUserAgentAdditionAfterStart(t *testing.T) {
Config: &config.MockConfig{},
Logger: &logger.NullLogger{},
Metrics: &metrics.NullMetrics{},
- LibhClient: &libhoney.Client{},
+ LibhClient: &libtrace.Client{},
Version: "test",
}
- assert.Equal(t, libhoney.UserAgentAddition, "")
+ assert.Equal(t, libtrace.UserAgentAddition, "")
err := transmission.Start()
assert.Nil(t, err)
- assert.Equal(t, libhoney.UserAgentAddition, "refinery/test")
+ assert.Equal(t, libtrace.UserAgentAddition, "tracing-proxy/test")
+}
+
+func TestDependencyInjection(t *testing.T) {
+ var g inject.Graph
+ err := g.Provide(
+ &inject.Object{Value: &DefaultTransmission{}},
+
+ &inject.Object{Value: &config.MockConfig{}},
+ &inject.Object{Value: &logger.NullLogger{}},
+ &inject.Object{Value: &metrics.NullMetrics{}, Name: "metrics"},
+ &inject.Object{Value: "test", Name: "version"},
+ )
+ if err != nil {
+ t.Error(err)
+ }
+ if err := g.Populate(); err != nil {
+ t.Error(err)
+ }
}
diff --git a/types/event.go b/types/event.go
index 0ef2157889..a2f1b92648 100644
--- a/types/event.go
+++ b/types/event.go
@@ -6,11 +6,11 @@ import (
)
const (
- APIKeyHeader = "X-Honeycomb-Team"
- // libhoney-js uses this
- APIKeyHeaderShort = "X-Hny-Team"
- SampleRateHeader = "X-Honeycomb-Samplerate"
- TimestampHeader = "X-Honeycomb-Event-Time"
+ APIKeyHeader = "X-OpsRamp-Team"
+
+ SampleRateHeader = "X-OpsRamp-Samplerate"
+ TimestampHeader = "X-OpsRamp-Event-Time"
+ QueryTokenHeader = "X-OpsRamp-Refinery-Query"
)
// used to put a request ID into the request context for logging
@@ -18,17 +18,20 @@ type RequestIDContextKey struct{}
// event is not part of a trace - it's an event that showed up with no trace ID
type Event struct {
- Context context.Context
- APIHost string
- APIKey string
- Dataset string
- SampleRate uint
- Timestamp time.Time
- Data map[string]interface{}
+ Context context.Context
+ APIHost string
+ APIKey string
+ APIToken string
+ APITenantId string
+ Dataset string
+ Environment string
+ SampleRate uint
+ Timestamp time.Time
+ Data map[string]interface{}
}
// Trace isn't something that shows up on the wire; it gets created within
-// Refinery. Traces are not thread-safe; only one goroutine should be working
+// tracing-proxy. Traces are not thread-safe; only one goroutine should be working
// with a trace object at a time.
type Trace struct {
APIHost string
@@ -46,28 +49,127 @@ type Trace struct {
SendBy time.Time
// StartTime is the server time when the first span arrived for this trace.
- // Used to calculate how long traces spend sitting in Refinery
+ // Used to calculate how long traces spend sitting in tracing-proxy
StartTime time.Time
- HasRootSpan bool
+ // ArrivalTime is the server time when the first span arrived for this trace.
+ // Used to calculate how long traces spend sitting in Refinery
+ ArrivalTime time.Time
+
+ RootSpan *Span
+
+ // DataSize is the sum of the DataSize of spans that are added.
+ // It's used to help expire the most expensive traces.
+ DataSize int
// spans is the list of spans in this trace
spans []*Span
+
+ // totalImpact is the sum of the trace's cacheImpact; if this value is 0
+ // it is recalculated during CacheImpact(), otherwise this value is
+ // returned. We reset it to 0 when adding spans so it gets recalculated.
+ // This is used to memoize the impact calculation so that it doesn't get
+ // calculated over and over during a sort.
+ totalImpact int
}
// AddSpan adds a span to this trace
func (t *Trace) AddSpan(sp *Span) {
+ // We've done all the work to know this is a trace we are putting in our cache, so
+ // now is when we can calculate the size of it so that our cache size management
+ // code works properly.
+ sp.ArrivalTime = time.Now()
+ sp.DataSize = sp.GetDataSize()
+ t.DataSize += sp.DataSize
t.spans = append(t.spans, sp)
+ t.totalImpact = 0
+}
+
+// CacheImpact calculates an abstract value for something we're calling cache impact, which is
+// the sum of the CacheImpact of all of the spans in a trace. We use it to order traces
+// so we can eject the ones that having the most impact on the cache size, but balancing that
+// against preferring to keep newer spans.
+func (t *Trace) CacheImpact(traceTimeout time.Duration) int {
+ if t.totalImpact == 0 {
+ for _, sp := range t.GetSpans() {
+ t.totalImpact += sp.CacheImpact(traceTimeout)
+ }
+ }
+ return t.totalImpact
}
-// GetSpans returns the list of spans in this trace
+// GetSpans returns the list of descendants in this trace
func (t *Trace) GetSpans() []*Span {
return t.spans
+}
+
+// DescendantCount gets the number of descendants of all kinds currently in this trace
+func (t *Trace) DescendantCount() uint {
+ return uint(len(t.spans))
+}
+
+func (t *Trace) GetSamplerKey() (string, bool) {
+ if IsLegacyAPIKey(t.APIKey) {
+ return t.Dataset, true
+ }
+
+ env := ""
+ for _, sp := range t.GetSpans() {
+ if sp.Event.Environment != "" {
+ env = sp.Event.Environment
+ break
+ }
+ }
+ return env, false
}
// Span is an event that shows up with a trace ID, so will be part of a Trace
type Span struct {
Event
- TraceID string
+ TraceID string
+ DataSize int
+ ArrivalTime time.Time
+}
+
+// GetDataSize computes the size of the Data element of the Span.
+// Note that it's not the full size of the span, but we're mainly using this for
+// relative ordering, not absolute calculations.
+func (sp *Span) GetDataSize() int {
+ total := 0
+ // the data types we should be getting from JSON are:
+ // float64, int64, bool, string
+ for _, v := range sp.Data {
+ switch v.(type) {
+ case bool:
+ total += 1
+ case float64, int64, int:
+ total += 8
+ case string, []byte:
+ total += len(v.(string))
+ default:
+ total += 8 // catchall
+ }
+ }
+ return total
+}
+
+// cacheImpactFactor controls how much more we weigh older spans compared to newer ones;
+// setting this to 1 means they're not weighted by duration
+const cacheImpactFactor = 4
+
+// CacheImpact calculates an abstract value for something we're calling cache impact, which is
+// the product of the size of the span and a factor related to the amount of time the span
+// has been stored in the cache, based on the TraceTimeout value.
+func (sp *Span) CacheImpact(traceTimeout time.Duration) int {
+ // multiplier will be a value from 1-cacheImpactFactor, depending on how long the
+ // span has been in the cache compared to the traceTimeout. It might go higher
+ // during the brief period between traceTimeout and the time when the span is sent.
+ multiplier := int(cacheImpactFactor*time.Since(sp.ArrivalTime)/traceTimeout) + 1
+ // We can assume DataSize was set when the span was added.
+ return multiplier * sp.DataSize
+}
+
+func IsLegacyAPIKey(apiKey string) bool {
+ return len(apiKey) == 32
}
diff --git a/types/event_test.go b/types/event_test.go
new file mode 100644
index 0000000000..28dbe8b661
--- /dev/null
+++ b/types/event_test.go
@@ -0,0 +1,83 @@
+package types
+
+import (
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestSpan_GetDataSize(t *testing.T) {
+ tests := []struct {
+ name string
+ numInts int
+ numStrings int
+ want int
+ }{
+ {"all ints small", 10, 0, 80},
+ {"all ints large", 100, 0, 800},
+ {"all strings small", 0, 10, 45},
+ {"all strings large", 0, 100, 4950},
+ {"mixed small", 10, 10, 125},
+ {"mixed large", 100, 100, 5750},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ sp := &Span{
+ TraceID: tt.name,
+ Event: Event{
+ Data: make(map[string]any),
+ },
+ }
+ for i := 0; i < tt.numInts; i++ {
+ sp.Data[tt.name+"int"+strconv.Itoa(i)] = i
+ }
+ for i := 0; i < tt.numStrings; i++ {
+ sp.Data[tt.name+"str"+strconv.Itoa(i)] = strings.Repeat("x", i)
+ }
+ if got := sp.GetDataSize(); got != tt.want {
+ t.Errorf("Span.CalculateSize() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+// These benchmarks were just to verify that the size calculation is acceptable
+// even on big spans. The P99 for normal (20-field) spans shows that it will take ~1
+// microsecond (on an m1 laptop) but a 1000-field span (extremely rare!) will take
+// ~10 microseconds. Since these happen once per span, when adding it to a trace,
+// we don't expect this to be a performance issue.
+func BenchmarkSpan_CalculateSizeSmall(b *testing.B) {
+ sp := &Span{
+ Event: Event{
+ Data: make(map[string]any),
+ },
+ }
+ for i := 0; i < 10; i++ {
+ sp.Data["int"+strconv.Itoa(i)] = i
+ }
+ for i := 0; i < 10; i++ {
+ sp.Data["str"+strconv.Itoa(i)] = strings.Repeat("x", i)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ sp.GetDataSize()
+ }
+}
+
+func BenchmarkSpan_CalculateSizeLarge(b *testing.B) {
+ sp := &Span{
+ Event: Event{
+ Data: make(map[string]any),
+ },
+ }
+ for i := 0; i < 500; i++ {
+ sp.Data["int"+strconv.Itoa(i)] = i
+ }
+ for i := 0; i < 500; i++ {
+ sp.Data["str"+strconv.Itoa(i)] = strings.Repeat("x", i)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ sp.GetDataSize()
+ }
+}