From 91f3f75ba78c78486d882d3fbf96ab902baa54f3 Mon Sep 17 00:00:00 2001 From: leonardo Date: Mon, 28 Dec 2020 23:54:44 +0000 Subject: [PATCH 001/351] Add redis password and TLS basic configuration to peer management. (#201) --- README.md | 6 +++--- config/config.go | 8 ++++++++ config/config_test.go | 18 +++++++++++++++++- config/file_config.go | 18 ++++++++++++++++++ config/mock.go | 16 ++++++++++++++++ config_complete.toml | 10 ++++++++++ internal/peer/redis.go | 35 +++++++++++++++++++++++++++++------ 7 files changed, 101 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index ec797cffcb..7c50297ee7 100644 --- a/README.md +++ b/README.md @@ -56,10 +56,10 @@ To enable the redis-based config: When launched in redis-config mode, Refinery needs a redis host to use for managing the list of peers in the Refinery cluster. This hostname and port can be specified in one of two ways: -- set the `REFINERY_REDIS_HOST` environment variable -- set the `RedisHost` field in the config file +- set the `REFINERY_REDIS_HOST` environment variable (and optionally the `REFINERY_REDIS_PASSWORD` environment variable) +- set the `RedisHost` field in the config file (and optionally the `RedisPassword` field in the config file) -The redis host should be a hostname and a port, for example `redis.mydomain.com:6379`. The example config file has `localhost:6379` which obviously will not work with more than one host. +The redis host should be a hostname and a port, for example `redis.mydomain.com:6379`. The example config file has `localhost:6379` which obviously will not work with more than one host. When TLS is required to connect to the redis instance set the `UseTLS` config to `true`. ## How sampling decisions are made diff --git a/config/config.go b/config/config.go index ff7f114454..85f8780dfe 100644 --- a/config/config.go +++ b/config/config.go @@ -37,6 +37,14 @@ type Config interface { // management. GetRedisHost() (string, error) + // GetRedisPassword returns the password of a Redis instance to use for peer + // management. + GetRedisPassword() (string, error) + + // GetUseTLS returns true when TLS must be enabled to dial the Redis instance to + // use for peer management. + GetUseTLS() (bool, error) + // GetHoneycombAPI returns the base URL (protocol, hostname, and port) of // the upstream Honeycomb API server GetHoneycombAPI() (string, error) diff --git a/config/config_test.go b/config/config_test.go index 8ed47f0193..a4470d3661 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestRedisEnvVar(t *testing.T) { +func TestRedisHostEnvVar(t *testing.T) { host := "redis.magic:1337" os.Setenv("REFINERY_REDIS_HOST", host) defer os.Unsetenv("REFINERY_REDIS_HOST") @@ -28,6 +28,22 @@ func TestRedisEnvVar(t *testing.T) { } } +func TestRedisPasswordEnvVar(t *testing.T) { + password := ***REMOVED*** + os.Setenv("REFINERY_REDIS_PASSWORD", password) + defer os.Unsetenv("REFINERY_REDIS_PASSWORD") + + c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) + + if err != nil { + t.Error(err) + } + + if d, _ := c.GetRedisPassword(); d != password { + t.Error("received", d, "expected", password) + } +} + func TestReload(t *testing.T) { tmpDir, err := ioutil.TempDir("", "") assert.NoError(t, err) diff --git a/config/file_config.go b/config/file_config.go index f06b85f53e..48e55e0349 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -106,6 +106,8 @@ type PeerManagementConfig struct { Type string `validate:"required,oneof= file redis"` Peers []string `validate:"dive,url"` RedisHost string + RedisPassword string + UseTLS bool IdentifierInterfaceName string UseIPV6Identifier bool RedisIdentifier string @@ -116,11 +118,13 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c := viper.New() c.BindEnv("PeerManagement.RedisHost", "REFINERY_REDIS_HOST") + c.BindEnv("PeerManagement.RedisPassword", "REFINERY_REDIS_PASSWORD") c.SetDefault("ListenAddr", "0.0.0.0:8080") c.SetDefault("PeerListenAddr", "0.0.0.0:8081") c.SetDefault("APIKeys", []string{"*"}) c.SetDefault("PeerManagement.Peers", []string{"http://127.0.0.1:8081"}) c.SetDefault("PeerManagement.Type", "file") + c.SetDefault("PeerManagement.UseTLS", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) c.SetDefault("HoneycombAPI", "https://api.honeycomb.io") c.SetDefault("Logger", "logrus") @@ -403,6 +407,20 @@ func (f *fileConfig) GetRedisHost() (string, error) { return f.config.GetString("PeerManagement.RedisHost"), nil } +func (f *fileConfig) GetRedisPassword() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.config.GetString("PeerManagement.RedisPassword"), nil +} + +func (f *fileConfig) GetUseTLS() (bool, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.config.GetBool("PeerManagement.UseTLS"), nil +} + func (f *fileConfig) GetIdentifierInterfaceName() (string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index 6064bffd97..16f2e3b47b 100644 --- a/config/mock.go +++ b/config/mock.go @@ -35,6 +35,10 @@ type MockConfig struct { GetPeersVal []string GetRedisHostErr error GetRedisHostVal string + GetRedisPasswordErr error + GetRedisPasswordVal string + GetUseTLSErr error + GetUseTLSVal bool GetSamplerTypeErr error GetSamplerTypeVal interface{} GetMetricsTypeErr error @@ -150,6 +154,18 @@ func (m *MockConfig) GetRedisHost() (string, error) { return m.GetRedisHostVal, m.GetRedisHostErr } +func (m *MockConfig) GetRedisPassword() (string, error) { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.GetRedisPasswordVal, m.GetRedisPasswordErr +} +func (m *MockConfig) GetUseTLS() (bool, error) { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.GetUseTLSVal, m.GetUseTLSErr +} func (m *MockConfig) GetMetricsType() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index 8e37a591d5..e965ce9a77 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -126,6 +126,16 @@ Metrics = "honeycomb" # Not eligible for live reload. # RedisHost = "localhost:6379" +# RedisPassword is the password used to connect to redis for peer cluster membership management. +# If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes +# precedence and this value is ignored. +# Not eligible for live reload. +# RedisPassword = "" + +# UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. +# Not eligible for live reload. +# UseTLS = false + # IdentifierInterfaceName is optional. By default, when using RedisHost, Refinery will use # the local hostname to identify itself to other peers in Redis. If your environment # requires that you use IPs as identifiers (for example, if peers can't resolve eachother diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 1e947f90fa..70ba3c3d3b 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -2,6 +2,7 @@ package peer import ( "context" + "crypto/tls" "errors" "fmt" "net" @@ -51,18 +52,14 @@ func newRedisPeers(c config.Config) (Peers, error) { redisHost = "localhost:6379" } + options := buildOptions(c) pool := &redis.Pool{ MaxIdle: 3, MaxActive: 30, IdleTimeout: 5 * time.Minute, Wait: true, Dial: func() (redis.Conn, error) { - return redis.Dial( - "tcp", redisHost, - redis.DialReadTimeout(1*time.Second), - redis.DialConnectTimeout(1*time.Second), - redis.DialDatabase(0), // TODO enable multiple databases for multiple samproxies - ) + return redis.Dial("tcp", redisHost, options...) }, } @@ -167,6 +164,32 @@ func (p *redisPeers) watchPeers() { } } +func buildOptions(c config.Config) []redis.DialOption { + options := []redis.DialOption{ + redis.DialReadTimeout(1 * time.Second), + redis.DialConnectTimeout(1 * time.Second), + redis.DialDatabase(0), // TODO enable multiple databases for multiple samproxies + } + + password, _ := c.GetRedisPassword() + if password != "" { + options = append(options, redis.DialPassword(password)) + } + + useTLS, _ := c.GetUseTLS() + if useTLS { + tlsConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + } + options = append(options, + redis.DialTLSConfig(tlsConfig), + redis.DialTLSSkipVerify(true), + redis.DialUseTLS(true)) + } + + return options +} + func publicAddr(c config.Config) (string, error) { // compute the public version of my peer listen address listenAddr, _ := c.GetPeerListenAddr() From acf076355aec6331ecb77d8dda179f52ae103252 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Tue, 19 Jan 2021 16:12:43 +0000 Subject: [PATCH 002/351] Add /debug/trace/{traceID} endpoint (#204) Add a diagnostics HTTP GET endpoint that returns the debug trace information, including the assigned node address using the configured sharding algorithm. The endpoint can be called like this: http://localhost:9000/debug/trace/123abc The response is a JSON object two two properties and looks like the following: {"traceID":"123abc","node":"http://localhost:12345"} --- route/route.go | 7 +++++++ route/route_test.go | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/route/route.go b/route/route.go index 9f17268c42..3dbdcf93ff 100644 --- a/route/route.go +++ b/route/route.go @@ -132,6 +132,7 @@ func (r *Router) LnS(incomingOrPeer string) { muxxer.HandleFunc("/alive", r.alive).Name("local health") muxxer.HandleFunc("/panic", r.panic).Name("intentional panic") muxxer.HandleFunc("/version", r.version).Name("report version info") + muxxer.HandleFunc("/debug/trace/{traceID}", r.debugTrace).Name("get debug information for given trace ID") // require an auth header for events and batches authedMuxxer := muxxer.PathPrefix("/1/").Methods("POST").Subrouter() @@ -199,6 +200,12 @@ func (r *Router) version(w http.ResponseWriter, req *http.Request) { w.Write([]byte(fmt.Sprintf(`{"source":"refinery","version":"%s"}`, r.versionStr))) } +func (r *Router) debugTrace(w http.ResponseWriter, req *http.Request) { + traceID := mux.Vars(req)["traceID"] + shard := r.Sharder.WhichShard(traceID) + w.Write([]byte(fmt.Sprintf(`{"traceID":"%s","node":"%s"}`, traceID, shard.GetAddress()))) +} + // event is handler for /1/event/ func (r *Router) event(w http.ResponseWriter, req *http.Request) { r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_event") diff --git a/route/route_test.go b/route/route_test.go index ee26527a98..c4e75a94ca 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -14,6 +14,8 @@ import ( "testing" "time" + "github.com/gorilla/mux" + "github.com/honeycombio/refinery/sharder" "github.com/klauspost/compress/zstd" "github.com/vmihailenco/msgpack/v4" ) @@ -234,3 +236,35 @@ func TestUnmarshal(t *testing.T) { t.Error("Expecting", now, "Received", b) } } + +func TestDebugTrace(t *testing.T) { + req, _ := http.NewRequest("GET", "/debug/trace/123abcdef", nil) + req = mux.SetURLVars(req, map[string]string{"traceID": "123abcdef"}) + + rr := httptest.NewRecorder() + router := &Router{ + Sharder: &TestSharder{}, + } + + router.debugTrace(rr, req) + if body := rr.Body.String(); body != `{"traceID":"123abcdef","node":"http://localhost:12345"}` { + t.Error(body) + } +} + +type TestSharder struct{} + +func (s *TestSharder) MyShard() sharder.Shard { return nil } + +func (s *TestSharder) WhichShard(string) sharder.Shard { + return &TestShard{ + addr: "http://localhost:12345", + } +} + +type TestShard struct { + addr string +} + +func (s *TestShard) Equals(other sharder.Shard) bool { return true } +func (s *TestShard) GetAddress() string { return s.addr } From 5dcb4af9e13785dfd7980c79d36c649899e3efb8 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Mon, 25 Jan 2021 12:33:53 +0000 Subject: [PATCH 003/351] Add OTLP ingest (#202) Adds OTLP ingest support to refinery. Initially this PR adds ingest support but does not add support for sending events to peer refinery processes or to the Honeycomb ingest API over gRPC and continues to use HTTP/JSON for that communication. The primary use-case for adding OTLP here is for ingesting spans directly from OTel SDKs and Collector using OTLP over gRPC. This will allow users of OpenTelemetry to send data to refinery without having to use a custom span exporter and instead use the SDK provided OTLP exporter. --- config/config.go | 4 + config/file_config.go | 19 +- config/mock.go | 8 + config_complete.toml | 7 + go.mod | 4 + go.sum | 44 + internal/opentelemetry-proto-gen/README.md | 3 + .../collector/logs/v1/logs_service.pb.go | 213 +++ .../collector/logs/v1/logs_service.pb.gw.go | 163 ++ .../metrics/v1/metrics_service.pb.go | 213 +++ .../metrics/v1/metrics_service.pb.gw.go | 163 ++ .../collector/trace/v1/trace_config.pb.go | 361 ++++ .../collector/trace/v1/trace_service.pb.go | 213 +++ .../collector/trace/v1/trace_service.pb.gw.go | 163 ++ .../common/v1/common.pb.go | 430 +++++ .../logs/v1/logs.pb.go | 448 +++++ .../metrics/experimental/configservice.pb.go | 423 +++++ .../metrics/v1/metrics.pb.go | 1501 +++++++++++++++++ .../resource/v1/resource.pb.go | 100 ++ .../trace/v1/trace.pb.go | 815 +++++++++ route/route.go | 277 ++- route/route_test.go | 123 ++ types/event.go | 1 + 23 files changed, 5690 insertions(+), 6 deletions(-) create mode 100644 internal/opentelemetry-proto-gen/README.md create mode 100644 internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go create mode 100644 internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go create mode 100644 internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go create mode 100644 internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go create mode 100644 internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go create mode 100644 internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go create mode 100644 internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go create mode 100644 internal/opentelemetry-proto-gen/common/v1/common.pb.go create mode 100644 internal/opentelemetry-proto-gen/logs/v1/logs.pb.go create mode 100644 internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go create mode 100644 internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go create mode 100644 internal/opentelemetry-proto-gen/resource/v1/resource.pb.go create mode 100644 internal/opentelemetry-proto-gen/trace/v1/trace.pb.go diff --git a/config/config.go b/config/config.go index 85f8780dfe..09d341c0e4 100644 --- a/config/config.go +++ b/config/config.go @@ -25,6 +25,10 @@ type Config interface { // peer traffic GetPeerListenAddr() (string, error) + // GetGRPCListenAddr returns the address and port on which to listen for + // incoming events over gRPC + GetGRPCListenAddr() (string, error) + // GetAPIKeys returns a list of Honeycomb API keys GetAPIKeys() ([]string, error) diff --git a/config/file_config.go b/config/file_config.go index 48e55e0349..dc18a6fce5 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -53,8 +53,9 @@ func (r *RulesBasedSamplerConfig) String() string { } type configContents struct { - ListenAddr string `validate:"required"` - PeerListenAddr string `validate:"required"` + ListenAddr string `validate:"required"` + PeerListenAddr string `validate:"required"` + GRPCListenAddr string APIKeys []string `validate:"required"` HoneycombAPI string `validate:"required,url"` Logger string `validate:"required,oneof= logrus honeycomb"` @@ -379,6 +380,20 @@ func (f *fileConfig) GetPeerListenAddr() (string, error) { return f.conf.PeerListenAddr, nil } +func (f *fileConfig) GetGRPCListenAddr() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + // GRPC listen addr is optional, only check value is valid if not empty + if f.conf.GRPCListenAddr != "" { + _, _, err := net.SplitHostPort(f.conf.GRPCListenAddr) + if err != nil { + return "", err + } + } + return f.conf.GRPCListenAddr, nil +} + func (f *fileConfig) GetAPIKeys() ([]string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index 16f2e3b47b..16a41747db 100644 --- a/config/mock.go +++ b/config/mock.go @@ -22,6 +22,8 @@ type MockConfig struct { GetListenAddrVal string GetPeerListenAddrErr error GetPeerListenAddrVal string + GetGRPCListenAddrErr error + GetGRPCListenAddrVal string GetLoggerTypeErr error GetLoggerTypeVal string GetHoneycombLoggerConfigErr error @@ -114,6 +116,12 @@ func (m *MockConfig) GetPeerListenAddr() (string, error) { return m.GetPeerListenAddrVal, m.GetPeerListenAddrErr } +func (m *MockConfig) GetGRPCListenAddr() (string, error) { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.GetGRPCListenAddrVal, m.GetGRPCListenAddrErr +} func (m *MockConfig) GetLoggerType() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index e965ce9a77..7ff5ae0f69 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -9,6 +9,13 @@ # Not eligible for live reload. ListenAddr = "0.0.0.0:8080" +# GRPCListenAddr is the IP and port on which to listen for incoming events over +# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put +# something like nginx in front to do the decryption. +# Should be of the form 0.0.0.0:9090 +# Not eligible for live reload. +GRPCListenAddr = "0.0.0.0:9090" + # PeerListenAddr is the IP and port on which to listen for traffic being # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL # put something like nginx in front to do the decryption. Must be different from diff --git a/go.mod b/go.mod index 276bd77a6b..e0f94849c6 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,10 @@ require ( github.com/garyburd/redigo v1.6.0 github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible + github.com/gogo/protobuf v1.3.1 + github.com/golang/protobuf v1.4.3 github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d + github.com/grpc-ecosystem/grpc-gateway v1.12.1 github.com/hashicorp/golang-lru v0.5.1 github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/libhoney-go v1.12.4 @@ -35,6 +38,7 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6 // indirect golang.org/x/text v0.3.3 // indirect + google.golang.org/grpc v1.32.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.57.0 // indirect diff --git a/go.sum b/go.sum index aed211cfef..40ad21cbe9 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,7 @@ github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -29,8 +30,10 @@ github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -41,6 +44,9 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= @@ -80,6 +86,8 @@ github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+Pugk github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -90,13 +98,23 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -111,6 +129,8 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -148,6 +168,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= @@ -204,6 +225,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -215,6 +238,7 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -312,6 +336,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0O golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -353,6 +378,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -361,6 +387,7 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -369,6 +396,7 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -388,10 +416,24 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= @@ -410,11 +452,13 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/internal/opentelemetry-proto-gen/README.md b/internal/opentelemetry-proto-gen/README.md new file mode 100644 index 0000000000..5cfc56f03c --- /dev/null +++ b/internal/opentelemetry-proto-gen/README.md @@ -0,0 +1,3 @@ +# OTLP Protobuf Definitions + +The definitions can be found [here](https://github.com/open-telemetry/opentelemetry-proto/tree/59c488bfb8fb6d0458ad6425758b70259ff4a2bd). \ No newline at end of file diff --git a/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go new file mode 100644 index 0000000000..0ee0db23ce --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/logs/v1/logs_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/logs/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportLogsServiceRequest struct { + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportLogsServiceRequest) Reset() { *m = ExportLogsServiceRequest{} } +func (m *ExportLogsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportLogsServiceRequest) ProtoMessage() {} +func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8e3bf87aaa43acd4, []int{0} +} +func (m *ExportLogsServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportLogsServiceRequest.Unmarshal(m, b) +} +func (m *ExportLogsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportLogsServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportLogsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportLogsServiceRequest.Merge(m, src) +} +func (m *ExportLogsServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportLogsServiceRequest.Size(m) +} +func (m *ExportLogsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportLogsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportLogsServiceRequest proto.InternalMessageInfo + +func (m *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs { + if m != nil { + return m.ResourceLogs + } + return nil +} + +type ExportLogsServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportLogsServiceResponse) Reset() { *m = ExportLogsServiceResponse{} } +func (m *ExportLogsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportLogsServiceResponse) ProtoMessage() {} +func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8e3bf87aaa43acd4, []int{1} +} +func (m *ExportLogsServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportLogsServiceResponse.Unmarshal(m, b) +} +func (m *ExportLogsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportLogsServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportLogsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportLogsServiceResponse.Merge(m, src) +} +func (m *ExportLogsServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportLogsServiceResponse.Size(m) +} +func (m *ExportLogsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportLogsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportLogsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportLogsServiceRequest)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest") + proto.RegisterType((*ExportLogsServiceResponse)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/logs/v1/logs_service.proto", fileDescriptor_8e3bf87aaa43acd4) +} + +var fileDescriptor_8e3bf87aaa43acd4 = []byte{ + // 263 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xc8, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0xcf, 0xc9, 0x4f, 0x2f, 0xd6, 0x2f, + 0x33, 0x04, 0xd3, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0x45, 0x42, 0xaa, + 0x28, 0x3a, 0x21, 0x82, 0x7a, 0x70, 0x9d, 0x7a, 0x20, 0x1d, 0x7a, 0x65, 0x86, 0x52, 0x6a, 0xd8, + 0x2c, 0x40, 0x36, 0x16, 0xa2, 0x53, 0x29, 0x8b, 0x4b, 0xc2, 0xb5, 0xa2, 0x20, 0xbf, 0xa8, 0xc4, + 0x27, 0x3f, 0xbd, 0x38, 0x18, 0x62, 0x53, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x90, 0x1f, + 0x17, 0x6f, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x51, 0x72, 0x6a, 0x3c, 0x48, 0x8b, 0x04, 0xa3, 0x02, + 0xb3, 0x06, 0xb7, 0x91, 0xa6, 0x1e, 0x36, 0x27, 0x40, 0x2d, 0xd6, 0x0b, 0x82, 0xea, 0x00, 0x99, + 0x17, 0xc4, 0x53, 0x84, 0xc4, 0x53, 0x92, 0xe6, 0x92, 0xc4, 0x62, 0x57, 0x71, 0x41, 0x7e, 0x5e, + 0x71, 0xaa, 0xd1, 0x5c, 0x46, 0x2e, 0x6e, 0x24, 0x71, 0xa1, 0x5e, 0x46, 0x2e, 0x36, 0x88, 0x6a, + 0x21, 0x7b, 0x3d, 0xa2, 0xfc, 0xac, 0x87, 0xcb, 0x23, 0x52, 0x0e, 0xe4, 0x1b, 0x00, 0x71, 0x9d, + 0x12, 0x83, 0x53, 0x1b, 0x23, 0x97, 0x46, 0x66, 0x3e, 0x71, 0x06, 0x39, 0x09, 0x20, 0x99, 0x11, + 0x00, 0x52, 0x13, 0xc0, 0x18, 0xe5, 0x96, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, + 0xab, 0x0f, 0x32, 0x45, 0x17, 0x11, 0x3b, 0x28, 0x86, 0xea, 0x42, 0xe2, 0x2a, 0x3d, 0x35, 0x4f, + 0x3f, 0x1d, 0x4b, 0x9a, 0x48, 0x62, 0x03, 0xcb, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x49, + 0xa7, 0x2f, 0x4a, 0x43, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LogsServiceClient is the client API for LogsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LogsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) +} + +type logsServiceClient struct { + cc *grpc.ClientConn +} + +func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient { + return &logsServiceClient{cc} +} + +func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) { + out := new(ExportLogsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LogsServiceServer is the server API for LogsService service. +type LogsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) +} + +// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedLogsServiceServer struct { +} + +func (*UnimplementedLogsServiceServer) Export(ctx context.Context, req *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) { + s.RegisterService(&_LogsService_serviceDesc, srv) +} + +func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportLogsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LogsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LogsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService", + HandlerType: (*LogsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _LogsService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto", +} diff --git a/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go new file mode 100644 index 0000000000..8003733add --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/logs/v1/logs_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client LogsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportLogsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server LogsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportLogsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterLogsServiceHandlerServer registers the http handlers for service LogsService to "mux". +// UnaryRPC :call LogsServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterLogsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LogsServiceServer) error { + + mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_LogsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterLogsServiceHandlerFromEndpoint is same as RegisterLogsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterLogsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterLogsServiceHandler(ctx, mux, conn) +} + +// RegisterLogsServiceHandler registers the http handlers for service LogsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterLogsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterLogsServiceHandlerClient(ctx, mux, NewLogsServiceClient(conn)) +} + +// RegisterLogsServiceHandlerClient registers the http handlers for service LogsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LogsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LogsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LogsServiceClient" to call the correct interceptors. +func RegisterLogsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LogsServiceClient) error { + + mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_LogsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_LogsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "logs"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_LogsService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go new file mode 100644 index 0000000000..2fe6fe69b0 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/metrics/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportMetricsServiceRequest struct { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } +func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceRequest) ProtoMessage() {} +func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_75fb6015e6e64798, []int{0} +} +func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) +} +func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) +} +func (m *ExportMetricsServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) +} +func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo + +func (m *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics { + if m != nil { + return m.ResourceMetrics + } + return nil +} + +type ExportMetricsServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } +func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceResponse) ProtoMessage() {} +func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_75fb6015e6e64798, []int{1} +} +func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) +} +func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) +} +func (m *ExportMetricsServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) +} +func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest") + proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/metrics/v1/metrics_service.proto", fileDescriptor_75fb6015e6e64798) +} + +var fileDescriptor_75fb6015e6e64798 = []byte{ + // 264 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xcb, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x07, 0x89, 0x66, 0x26, 0x17, 0xeb, + 0x97, 0x19, 0xc2, 0x98, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0xa5, 0x42, + 0x1a, 0x28, 0xfa, 0x21, 0x82, 0x7a, 0x70, 0xfd, 0x7a, 0x50, 0x4d, 0x7a, 0x65, 0x86, 0x52, 0x3a, + 0xd8, 0x6c, 0xc2, 0x34, 0x1f, 0x62, 0x84, 0x52, 0x25, 0x97, 0xb4, 0x6b, 0x45, 0x41, 0x7e, 0x51, + 0x89, 0x2f, 0x44, 0x38, 0x18, 0x62, 0x6b, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x50, 0x14, + 0x97, 0x40, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x51, 0x72, 0x6a, 0x3c, 0x54, 0xa3, 0x04, 0xa3, 0x02, + 0xb3, 0x06, 0xb7, 0x91, 0xbe, 0x1e, 0x36, 0x17, 0x21, 0xdc, 0xa1, 0x17, 0x04, 0xd5, 0x07, 0x35, + 0x38, 0x88, 0xbf, 0x08, 0x55, 0x40, 0x49, 0x8e, 0x4b, 0x06, 0xbb, 0xd5, 0xc5, 0x05, 0xf9, 0x79, + 0xc5, 0xa9, 0x46, 0x6b, 0x18, 0xb9, 0xf8, 0x50, 0xa5, 0x84, 0x66, 0x32, 0x72, 0xb1, 0x41, 0xf4, + 0x08, 0xb9, 0xea, 0x11, 0x1b, 0x22, 0x7a, 0x78, 0x3c, 0x28, 0xe5, 0x46, 0xa9, 0x31, 0x10, 0xc7, + 0x2a, 0x31, 0x38, 0xf5, 0x33, 0x72, 0x69, 0x67, 0xe6, 0x13, 0x6d, 0x9c, 0x93, 0x30, 0xaa, 0x49, + 0x01, 0x20, 0x95, 0x01, 0x8c, 0x51, 0x9e, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, + 0xb9, 0xfa, 0x20, 0xb3, 0x74, 0x11, 0x51, 0x89, 0x62, 0xb4, 0x2e, 0x24, 0x62, 0xd3, 0x53, 0xf3, + 0xf4, 0xd3, 0xb1, 0xa7, 0xa4, 0x24, 0x36, 0xb0, 0x12, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xaa, 0xdd, 0xdf, 0x49, 0x7c, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) { + out := new(ExportMetricsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +type MetricsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) +} + +// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct { +} + +func (*UnimplementedMetricsServiceServer) Export(ctx context.Context, req *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportMetricsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _MetricsService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto", +} diff --git a/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go new file mode 100644 index 0000000000..8158c98a62 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportMetricsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server MetricsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportMetricsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux". +// UnaryRPC :call MetricsServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_MetricsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMetricsServiceHandler(ctx, mux, conn) +} + +// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn)) +} + +// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MetricsServiceClient" to call the correct interceptors. +func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_MetricsService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go new file mode 100644 index 0000000000..aa4bfb6b00 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go @@ -0,0 +1,361 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/trace/v1/trace_config.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// How spans should be sampled: +// - Always off +// - Always on +// - Always follow the parent Span's decision (off if no parent). +type ConstantSampler_ConstantDecision int32 + +const ( + ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 + ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 + ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 +) + +var ConstantSampler_ConstantDecision_name = map[int32]string{ + 0: "ALWAYS_OFF", + 1: "ALWAYS_ON", + 2: "ALWAYS_PARENT", +} + +var ConstantSampler_ConstantDecision_value = map[string]int32{ + "ALWAYS_OFF": 0, + "ALWAYS_ON": 1, + "ALWAYS_PARENT": 2, +} + +func (x ConstantSampler_ConstantDecision) String() string { + return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) +} + +func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{1, 0} +} + +// Global configuration of the trace service. All fields must be specified, or +// the default (zero) values will be used for each type. +type TraceConfig struct { + // The global default sampler used to make decisions on span sampling. + // + // Types that are valid to be assigned to Sampler: + // *TraceConfig_ConstantSampler + // *TraceConfig_TraceIdRatioBased + // *TraceConfig_RateLimitingSampler + Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` + // The global default max number of attributes per span. + MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfTimedEvents int64 `protobuf:"varint,5,opt,name=max_number_of_timed_events,json=maxNumberOfTimedEvents,proto3" json:"max_number_of_timed_events,omitempty"` + // The global default max number of attributes per timed event. + MaxNumberOfAttributesPerTimedEvent int64 `protobuf:"varint,6,opt,name=max_number_of_attributes_per_timed_event,json=maxNumberOfAttributesPerTimedEvent,proto3" json:"max_number_of_attributes_per_timed_event,omitempty"` + // The global default max number of link entries per span. + MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` + // The global default max number of attributes per span. + MaxNumberOfAttributesPerLink int64 `protobuf:"varint,8,opt,name=max_number_of_attributes_per_link,json=maxNumberOfAttributesPerLink,proto3" json:"max_number_of_attributes_per_link,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceConfig) Reset() { *m = TraceConfig{} } +func (m *TraceConfig) String() string { return proto.CompactTextString(m) } +func (*TraceConfig) ProtoMessage() {} +func (*TraceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{0} +} +func (m *TraceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceConfig.Unmarshal(m, b) +} +func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) +} +func (m *TraceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceConfig.Merge(m, src) +} +func (m *TraceConfig) XXX_Size() int { + return xxx_messageInfo_TraceConfig.Size(m) +} +func (m *TraceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TraceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceConfig proto.InternalMessageInfo + +type isTraceConfig_Sampler interface { + isTraceConfig_Sampler() +} + +type TraceConfig_ConstantSampler struct { + ConstantSampler *ConstantSampler `protobuf:"bytes,1,opt,name=constant_sampler,json=constantSampler,proto3,oneof" json:"constant_sampler,omitempty"` +} +type TraceConfig_TraceIdRatioBased struct { + TraceIdRatioBased *TraceIdRatioBased `protobuf:"bytes,2,opt,name=trace_id_ratio_based,json=traceIdRatioBased,proto3,oneof" json:"trace_id_ratio_based,omitempty"` +} +type TraceConfig_RateLimitingSampler struct { + RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof" json:"rate_limiting_sampler,omitempty"` +} + +func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} +func (*TraceConfig_TraceIdRatioBased) isTraceConfig_Sampler() {} +func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} + +func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { + if m != nil { + return m.Sampler + } + return nil +} + +func (m *TraceConfig) GetConstantSampler() *ConstantSampler { + if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { + return x.ConstantSampler + } + return nil +} + +func (m *TraceConfig) GetTraceIdRatioBased() *TraceIdRatioBased { + if x, ok := m.GetSampler().(*TraceConfig_TraceIdRatioBased); ok { + return x.TraceIdRatioBased + } + return nil +} + +func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { + if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { + return x.RateLimitingSampler + } + return nil +} + +func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { + if m != nil { + return m.MaxNumberOfAttributes + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfTimedEvents() int64 { + if m != nil { + return m.MaxNumberOfTimedEvents + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAttributesPerTimedEvent() int64 { + if m != nil { + return m.MaxNumberOfAttributesPerTimedEvent + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfLinks() int64 { + if m != nil { + return m.MaxNumberOfLinks + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAttributesPerLink() int64 { + if m != nil { + return m.MaxNumberOfAttributesPerLink + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TraceConfig) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TraceConfig_ConstantSampler)(nil), + (*TraceConfig_TraceIdRatioBased)(nil), + (*TraceConfig_RateLimitingSampler)(nil), + } +} + +// Sampler that always makes a constant decision on span sampling. +type ConstantSampler struct { + Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } +func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } +func (*ConstantSampler) ProtoMessage() {} +func (*ConstantSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{1} +} +func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) +} +func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) +} +func (m *ConstantSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConstantSampler.Merge(m, src) +} +func (m *ConstantSampler) XXX_Size() int { + return xxx_messageInfo_ConstantSampler.Size(m) +} +func (m *ConstantSampler) XXX_DiscardUnknown() { + xxx_messageInfo_ConstantSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo + +func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { + if m != nil { + return m.Decision + } + return ConstantSampler_ALWAYS_OFF +} + +// Sampler that tries to uniformly sample traces with a given ratio. +// The ratio of sampling a trace is equal to that of the specified ratio. +type TraceIdRatioBased struct { + // The desired ratio of sampling. Must be within [0.0, 1.0]. + SamplingRatio float64 `protobuf:"fixed64,1,opt,name=samplingRatio,proto3" json:"samplingRatio,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceIdRatioBased) Reset() { *m = TraceIdRatioBased{} } +func (m *TraceIdRatioBased) String() string { return proto.CompactTextString(m) } +func (*TraceIdRatioBased) ProtoMessage() {} +func (*TraceIdRatioBased) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{2} +} +func (m *TraceIdRatioBased) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceIdRatioBased.Unmarshal(m, b) +} +func (m *TraceIdRatioBased) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceIdRatioBased.Marshal(b, m, deterministic) +} +func (m *TraceIdRatioBased) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceIdRatioBased.Merge(m, src) +} +func (m *TraceIdRatioBased) XXX_Size() int { + return xxx_messageInfo_TraceIdRatioBased.Size(m) +} +func (m *TraceIdRatioBased) XXX_DiscardUnknown() { + xxx_messageInfo_TraceIdRatioBased.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceIdRatioBased proto.InternalMessageInfo + +func (m *TraceIdRatioBased) GetSamplingRatio() float64 { + if m != nil { + return m.SamplingRatio + } + return 0 +} + +// Sampler that tries to sample with a rate per time window. +type RateLimitingSampler struct { + // Rate per second. + Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } +func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } +func (*RateLimitingSampler) ProtoMessage() {} +func (*RateLimitingSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5936aa8fa6443e6f, []int{3} +} +func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) +} +func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) +} +func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitingSampler.Merge(m, src) +} +func (m *RateLimitingSampler) XXX_Size() int { + return xxx_messageInfo_RateLimitingSampler.Size(m) +} +func (m *RateLimitingSampler) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo + +func (m *RateLimitingSampler) GetQps() int64 { + if m != nil { + return m.Qps + } + return 0 +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) + proto.RegisterType((*TraceConfig)(nil), "opentelemetry.proto.trace.v1.TraceConfig") + proto.RegisterType((*ConstantSampler)(nil), "opentelemetry.proto.trace.v1.ConstantSampler") + proto.RegisterType((*TraceIdRatioBased)(nil), "opentelemetry.proto.trace.v1.TraceIdRatioBased") + proto.RegisterType((*RateLimitingSampler)(nil), "opentelemetry.proto.trace.v1.RateLimitingSampler") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/trace/v1/trace_config.proto", fileDescriptor_5936aa8fa6443e6f) +} + +var fileDescriptor_5936aa8fa6443e6f = []byte{ + // 519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4d, 0x6b, 0xdb, 0x40, + 0x10, 0x86, 0xad, 0xb8, 0xf9, 0x9a, 0xe0, 0x44, 0x5e, 0x37, 0x45, 0x94, 0x40, 0x53, 0x51, 0xa8, + 0x2f, 0x96, 0x70, 0x7a, 0x28, 0xed, 0xa1, 0x60, 0xe7, 0xb3, 0x60, 0x1c, 0xa3, 0x18, 0x4a, 0x7d, + 0x59, 0x56, 0xf2, 0x5a, 0x5d, 0x2a, 0xed, 0xba, 0xab, 0xb5, 0x49, 0x2f, 0x3d, 0xf5, 0x1f, 0xf5, + 0x0f, 0x16, 0xad, 0x54, 0xd9, 0x72, 0x12, 0x41, 0x6f, 0x9a, 0x79, 0xf7, 0x7d, 0x66, 0x06, 0x8f, + 0x07, 0x5c, 0x31, 0xa7, 0x5c, 0xd1, 0x88, 0xc6, 0x54, 0xc9, 0x9f, 0xee, 0x5c, 0x0a, 0x25, 0x5c, + 0x25, 0x49, 0x40, 0xdd, 0x65, 0x37, 0xfb, 0xc0, 0x81, 0xe0, 0x33, 0x16, 0x3a, 0x5a, 0x43, 0x27, + 0x25, 0x43, 0x96, 0x74, 0xf4, 0x3b, 0x67, 0xd9, 0xb5, 0x7f, 0x6f, 0xc3, 0xc1, 0x38, 0x0d, 0xce, + 0xb5, 0x07, 0x4d, 0xc0, 0x0c, 0x04, 0x4f, 0x14, 0xe1, 0x0a, 0x27, 0x24, 0x9e, 0x47, 0x54, 0x5a, + 0xc6, 0xa9, 0xd1, 0x3e, 0x38, 0xeb, 0x38, 0x55, 0x20, 0xe7, 0x3c, 0x77, 0xdd, 0x65, 0xa6, 0x9b, + 0x9a, 0x77, 0x14, 0x94, 0x53, 0xc8, 0x87, 0xe7, 0x59, 0x7f, 0x6c, 0x8a, 0x25, 0x51, 0x4c, 0x60, + 0x9f, 0x24, 0x74, 0x6a, 0x6d, 0x69, 0xbe, 0x5b, 0xcd, 0xd7, 0x4d, 0x7e, 0x9e, 0x7a, 0xa9, 0xaf, + 0x9f, 0xda, 0x6e, 0x6a, 0x5e, 0x53, 0x6d, 0x26, 0x51, 0x08, 0xc7, 0x92, 0x28, 0x8a, 0x23, 0x16, + 0x33, 0xc5, 0x78, 0x58, 0x0c, 0x51, 0xd7, 0x45, 0xba, 0xd5, 0x45, 0x3c, 0xa2, 0xe8, 0x20, 0x77, + 0xae, 0x06, 0x69, 0xc9, 0x87, 0x69, 0xf4, 0x1e, 0xac, 0x98, 0xdc, 0x63, 0xbe, 0x88, 0x7d, 0x2a, + 0xb1, 0x98, 0x61, 0xa2, 0x94, 0x64, 0xfe, 0x42, 0xd1, 0xc4, 0x7a, 0x76, 0x6a, 0xb4, 0xeb, 0xde, + 0x71, 0x4c, 0xee, 0x87, 0x5a, 0xbe, 0x9d, 0xf5, 0x0a, 0x11, 0x7d, 0x84, 0x97, 0x65, 0xa3, 0x62, + 0x31, 0x9d, 0x62, 0xba, 0xa4, 0x5c, 0x25, 0xd6, 0xb6, 0xb6, 0xbe, 0x58, 0xb3, 0x8e, 0x53, 0xf9, + 0x52, 0xab, 0x68, 0x0c, 0xed, 0xa7, 0x8a, 0xe2, 0x39, 0x95, 0xeb, 0x28, 0x6b, 0x47, 0x93, 0xec, + 0x47, 0x9b, 0x18, 0x51, 0xb9, 0xc2, 0xa2, 0x0e, 0xb4, 0xca, 0xd4, 0x88, 0xf1, 0xef, 0x89, 0xb5, + 0xab, 0x01, 0xe6, 0x1a, 0x60, 0x90, 0xe6, 0xd1, 0x35, 0xbc, 0xae, 0x6c, 0x22, 0x75, 0x5b, 0x7b, + 0xda, 0x7c, 0xf2, 0x54, 0xf5, 0x94, 0xd4, 0xdf, 0x87, 0xdd, 0xfc, 0xd7, 0xb1, 0xff, 0x18, 0x70, + 0xb4, 0xb1, 0x41, 0x68, 0x02, 0x7b, 0x53, 0x1a, 0xb0, 0x84, 0x09, 0xae, 0x57, 0xf0, 0xf0, 0xec, + 0xd3, 0x7f, 0xad, 0x60, 0x11, 0x5f, 0xe4, 0x14, 0xaf, 0xe0, 0xd9, 0x17, 0x60, 0x6e, 0xaa, 0xe8, + 0x10, 0xa0, 0x37, 0xf8, 0xd2, 0xfb, 0x7a, 0x87, 0x6f, 0xaf, 0xae, 0xcc, 0x1a, 0x6a, 0xc0, 0xfe, + 0xbf, 0x78, 0x68, 0x1a, 0xa8, 0x09, 0x8d, 0x3c, 0x1c, 0xf5, 0xbc, 0xcb, 0xe1, 0xd8, 0xdc, 0xb2, + 0x3f, 0x40, 0xf3, 0xc1, 0x5a, 0xa2, 0x37, 0xd0, 0xd0, 0x53, 0x31, 0x1e, 0xea, 0xac, 0xee, 0xdd, + 0xf0, 0xca, 0x49, 0xfb, 0x2d, 0xb4, 0x1e, 0x59, 0x36, 0x64, 0x42, 0xfd, 0xc7, 0x3c, 0xd1, 0x96, + 0xba, 0x97, 0x7e, 0xf6, 0x7f, 0xc1, 0x2b, 0x26, 0x2a, 0xe7, 0xee, 0x9b, 0x6b, 0x7f, 0xe0, 0x51, + 0x2a, 0x8d, 0x8c, 0xc9, 0x75, 0xc8, 0xd4, 0xb7, 0x85, 0xef, 0x04, 0x22, 0xd6, 0x17, 0xa3, 0xb3, + 0x3a, 0x19, 0x25, 0x56, 0x27, 0x3b, 0x20, 0x21, 0xe5, 0x6e, 0x28, 0xdc, 0x40, 0x44, 0x11, 0x0d, + 0x94, 0x90, 0xc5, 0x45, 0xf1, 0x77, 0xf4, 0x83, 0x77, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x53, + 0xba, 0x65, 0xf8, 0x78, 0x04, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go new file mode 100644 index 0000000000..425527cdb4 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ExportTraceServiceRequest struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } +func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceRequest) ProtoMessage() {} +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_192a962890318cf4, []int{0} +} +func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) +} +func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) +} +func (m *ExportTraceServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceRequest.Size(m) +} +func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo + +func (m *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { + if m != nil { + return m.ResourceSpans + } + return nil +} + +type ExportTraceServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } +func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceResponse) ProtoMessage() {} +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_192a962890318cf4, []int{1} +} +func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) +} +func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) +} +func (m *ExportTraceServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceResponse.Size(m) +} +func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportTraceServiceRequest)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest") + proto.RegisterType((*ExportTraceServiceResponse)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/collector/trace/v1/trace_service.proto", fileDescriptor_192a962890318cf4) +} + +var fileDescriptor_192a962890318cf4 = []byte{ + // 265 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xca, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x2f, 0x29, 0x4a, 0x4c, 0x4e, 0xd5, + 0x2f, 0x33, 0x84, 0x30, 0xe2, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0xf5, 0xc0, 0xca, 0x84, + 0xd4, 0x50, 0xf4, 0x42, 0x04, 0xf5, 0xe0, 0x7a, 0xf5, 0xc0, 0x5a, 0xf4, 0xca, 0x0c, 0xa5, 0x34, + 0xb0, 0xd9, 0x81, 0x6a, 0x32, 0x44, 0xb3, 0x52, 0x3e, 0x97, 0xa4, 0x6b, 0x45, 0x41, 0x7e, 0x51, + 0x49, 0x08, 0x48, 0x30, 0x18, 0x62, 0x5b, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x50, 0x10, + 0x17, 0x5f, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x11, 0xc8, 0x21, 0x05, 0x89, 0x79, 0xc5, 0x12, 0x8c, + 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0xda, 0x7a, 0xd8, 0xdc, 0x01, 0xb3, 0x5d, 0x2f, 0x08, 0xaa, 0x27, + 0x18, 0xa4, 0x25, 0x88, 0xb7, 0x08, 0x99, 0xab, 0x24, 0xc3, 0x25, 0x85, 0xcd, 0xc2, 0xe2, 0x82, + 0xfc, 0xbc, 0xe2, 0x54, 0xa3, 0x45, 0x8c, 0x5c, 0x3c, 0xc8, 0x12, 0x42, 0x13, 0x19, 0xb9, 0xd8, + 0x20, 0xea, 0x85, 0x1c, 0xf5, 0x88, 0xf3, 0xbd, 0x1e, 0x4e, 0x0f, 0x49, 0x39, 0x51, 0x62, 0x04, + 0xc4, 0x89, 0x4a, 0x0c, 0x4e, 0x9d, 0x8c, 0x5c, 0x9a, 0x99, 0xf9, 0x44, 0x1a, 0xe5, 0x24, 0x88, + 0x6c, 0x4a, 0x00, 0x48, 0x55, 0x00, 0x63, 0x94, 0x7b, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, + 0x72, 0x7e, 0xae, 0x3e, 0xc8, 0x1c, 0x5d, 0x44, 0x64, 0xa1, 0x18, 0xab, 0x0b, 0x89, 0xba, 0xf4, + 0xd4, 0x3c, 0xfd, 0x74, 0x6c, 0xa9, 0x24, 0x89, 0x0d, 0xac, 0xc0, 0x18, 0x10, 0x00, 0x00, 0xff, + 0xff, 0xc1, 0x6e, 0x1a, 0x15, 0x56, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { + out := new(ExportTraceServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) +} + +// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (*UnimplementedTraceServiceServer) Export(ctx context.Context, req *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportTraceServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _TraceService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", +} diff --git a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go new file mode 100644 index 0000000000..1da38f1cd2 --- /dev/null +++ b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". +// UnaryRPC :call TraceServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/internal/opentelemetry-proto-gen/common/v1/common.pb.go b/internal/opentelemetry-proto-gen/common/v1/common.pb.go new file mode 100644 index 0000000000..dd951ce8e0 --- /dev/null +++ b/internal/opentelemetry-proto-gen/common/v1/common.pb.go @@ -0,0 +1,430 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/common/v1/common.proto + +package v1 + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AnyValue struct { + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "null". + // + // Types that are valid to be assigned to Value: + // *AnyValue_StringValue + // *AnyValue_BoolValue + // *AnyValue_IntValue + // *AnyValue_DoubleValue + // *AnyValue_ArrayValue + // *AnyValue_KvlistValue + Value isAnyValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AnyValue) Reset() { *m = AnyValue{} } +func (m *AnyValue) String() string { return proto.CompactTextString(m) } +func (*AnyValue) ProtoMessage() {} +func (*AnyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{0} +} +func (m *AnyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AnyValue.Unmarshal(m, b) +} +func (m *AnyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AnyValue.Marshal(b, m, deterministic) +} +func (m *AnyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AnyValue.Merge(m, src) +} +func (m *AnyValue) XXX_Size() int { + return xxx_messageInfo_AnyValue.Size(m) +} +func (m *AnyValue) XXX_DiscardUnknown() { + xxx_messageInfo_AnyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AnyValue proto.InternalMessageInfo + +type isAnyValue_Value interface { + isAnyValue_Value() +} + +type AnyValue_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` +} +type AnyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` +} +type AnyValue_IntValue struct { + IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` +} +type AnyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` +} +type AnyValue_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof" json:"array_value,omitempty"` +} +type AnyValue_KvlistValue struct { + KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof" json:"kvlist_value,omitempty"` +} + +func (*AnyValue_StringValue) isAnyValue_Value() {} +func (*AnyValue_BoolValue) isAnyValue_Value() {} +func (*AnyValue_IntValue) isAnyValue_Value() {} +func (*AnyValue_DoubleValue) isAnyValue_Value() {} +func (*AnyValue_ArrayValue) isAnyValue_Value() {} +func (*AnyValue_KvlistValue) isAnyValue_Value() {} + +func (m *AnyValue) GetValue() isAnyValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AnyValue) GetStringValue() string { + if x, ok := m.GetValue().(*AnyValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *AnyValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AnyValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *AnyValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AnyValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AnyValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*AnyValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *AnyValue) GetArrayValue() *ArrayValue { + if x, ok := m.GetValue().(*AnyValue_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (m *AnyValue) GetKvlistValue() *KeyValueList { + if x, ok := m.GetValue().(*AnyValue_KvlistValue); ok { + return x.KvlistValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AnyValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AnyValue_StringValue)(nil), + (*AnyValue_BoolValue)(nil), + (*AnyValue_IntValue)(nil), + (*AnyValue_DoubleValue)(nil), + (*AnyValue_ArrayValue)(nil), + (*AnyValue_KvlistValue)(nil), + } +} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +type ArrayValue struct { + // Array of values. The array may be empty (contain 0 elements). + Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ArrayValue) Reset() { *m = ArrayValue{} } +func (m *ArrayValue) String() string { return proto.CompactTextString(m) } +func (*ArrayValue) ProtoMessage() {} +func (*ArrayValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{1} +} +func (m *ArrayValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ArrayValue.Unmarshal(m, b) +} +func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic) +} +func (m *ArrayValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArrayValue.Merge(m, src) +} +func (m *ArrayValue) XXX_Size() int { + return xxx_messageInfo_ArrayValue.Size(m) +} +func (m *ArrayValue) XXX_DiscardUnknown() { + xxx_messageInfo_ArrayValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ArrayValue proto.InternalMessageInfo + +func (m *ArrayValue) GetValues() []*AnyValue { + if m != nil { + return m.Values + } + return nil +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +type KeyValueList struct { + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyValueList) Reset() { *m = KeyValueList{} } +func (m *KeyValueList) String() string { return proto.CompactTextString(m) } +func (*KeyValueList) ProtoMessage() {} +func (*KeyValueList) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{2} +} +func (m *KeyValueList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyValueList.Unmarshal(m, b) +} +func (m *KeyValueList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyValueList.Marshal(b, m, deterministic) +} +func (m *KeyValueList) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValueList.Merge(m, src) +} +func (m *KeyValueList) XXX_Size() int { + return xxx_messageInfo_KeyValueList.Size(m) +} +func (m *KeyValueList) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValueList.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValueList proto.InternalMessageInfo + +func (m *KeyValueList) GetValues() []*KeyValue { + if m != nil { + return m.Values + } + return nil +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{3} +} +func (m *KeyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyValue.Unmarshal(m, b) +} +func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) +} +func (m *KeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValue.Merge(m, src) +} +func (m *KeyValue) XXX_Size() int { + return xxx_messageInfo_KeyValue.Size(m) +} +func (m *KeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValue proto.InternalMessageInfo + +func (m *KeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyValue) GetValue() *AnyValue { + if m != nil { + return m.Value + } + return nil +} + +// StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version +// of KeyValue that only supports string values. +type StringKeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringKeyValue) Reset() { *m = StringKeyValue{} } +func (m *StringKeyValue) String() string { return proto.CompactTextString(m) } +func (*StringKeyValue) ProtoMessage() {} +func (*StringKeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{4} +} +func (m *StringKeyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringKeyValue.Unmarshal(m, b) +} +func (m *StringKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringKeyValue.Marshal(b, m, deterministic) +} +func (m *StringKeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringKeyValue.Merge(m, src) +} +func (m *StringKeyValue) XXX_Size() int { + return xxx_messageInfo_StringKeyValue.Size(m) +} +func (m *StringKeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringKeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringKeyValue proto.InternalMessageInfo + +func (m *StringKeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *StringKeyValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// InstrumentationLibrary is a message representing the instrumentation library information +// such as the fully qualified name and version. +type InstrumentationLibrary struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstrumentationLibrary) Reset() { *m = InstrumentationLibrary{} } +func (m *InstrumentationLibrary) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibrary) ProtoMessage() {} +func (*InstrumentationLibrary) Descriptor() ([]byte, []int) { + return fileDescriptor_62ba46dcb97aa817, []int{5} +} +func (m *InstrumentationLibrary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstrumentationLibrary.Unmarshal(m, b) +} +func (m *InstrumentationLibrary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstrumentationLibrary.Marshal(b, m, deterministic) +} +func (m *InstrumentationLibrary) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibrary.Merge(m, src) +} +func (m *InstrumentationLibrary) XXX_Size() int { + return xxx_messageInfo_InstrumentationLibrary.Size(m) +} +func (m *InstrumentationLibrary) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibrary.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibrary proto.InternalMessageInfo + +func (m *InstrumentationLibrary) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InstrumentationLibrary) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func init() { + proto.RegisterType((*AnyValue)(nil), "opentelemetry.proto.common.v1.AnyValue") + proto.RegisterType((*ArrayValue)(nil), "opentelemetry.proto.common.v1.ArrayValue") + proto.RegisterType((*KeyValueList)(nil), "opentelemetry.proto.common.v1.KeyValueList") + proto.RegisterType((*KeyValue)(nil), "opentelemetry.proto.common.v1.KeyValue") + proto.RegisterType((*StringKeyValue)(nil), "opentelemetry.proto.common.v1.StringKeyValue") + proto.RegisterType((*InstrumentationLibrary)(nil), "opentelemetry.proto.common.v1.InstrumentationLibrary") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/common/v1/common.proto", fileDescriptor_62ba46dcb97aa817) +} + +var fileDescriptor_62ba46dcb97aa817 = []byte{ + // 411 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4b, 0xab, 0xd3, 0x40, + 0x14, 0xce, 0xdc, 0xdc, 0xdb, 0x9b, 0x9c, 0x14, 0x91, 0x41, 0xa4, 0x9b, 0x8b, 0xa1, 0x2e, 0x8c, + 0xca, 0x4d, 0x68, 0xdd, 0xb8, 0x51, 0x69, 0x05, 0x89, 0x58, 0xb1, 0x44, 0x70, 0xa1, 0x0b, 0x49, + 0x74, 0x88, 0x43, 0x93, 0x99, 0x3a, 0x99, 0x04, 0xf2, 0xe3, 0xfc, 0x6f, 0x32, 0x8f, 0xf4, 0xb1, + 0x69, 0xe9, 0xee, 0xcc, 0x97, 0xef, 0x71, 0x4e, 0x66, 0x0e, 0xbc, 0xe0, 0x5b, 0xc2, 0x24, 0xa9, + 0x48, 0x4d, 0xa4, 0xe8, 0x93, 0xad, 0xe0, 0x92, 0x27, 0xbf, 0x78, 0x5d, 0x73, 0x96, 0x74, 0x33, + 0x5b, 0xc5, 0x1a, 0xc6, 0x77, 0x47, 0x5c, 0x03, 0xc6, 0x96, 0xd1, 0xcd, 0xa6, 0xff, 0xae, 0xc0, + 0x5b, 0xb0, 0xfe, 0x5b, 0x5e, 0xb5, 0x04, 0x3f, 0x85, 0x71, 0x23, 0x05, 0x65, 0xe5, 0xcf, 0x4e, + 0x9d, 0x27, 0x28, 0x44, 0x91, 0x9f, 0x3a, 0x59, 0x60, 0x50, 0x43, 0x7a, 0x02, 0x50, 0x70, 0x5e, + 0x59, 0xca, 0x55, 0x88, 0x22, 0x2f, 0x75, 0x32, 0x5f, 0x61, 0x86, 0x70, 0x07, 0x3e, 0x65, 0xd2, + 0x7e, 0x77, 0x43, 0x14, 0xb9, 0xa9, 0x93, 0x79, 0x94, 0xc9, 0x5d, 0xc8, 0x6f, 0xde, 0x16, 0x15, + 0xb1, 0x8c, 0xeb, 0x10, 0x45, 0x48, 0x85, 0x18, 0xd4, 0x90, 0x56, 0x10, 0xe4, 0x42, 0xe4, 0xbd, + 0xe5, 0xdc, 0x84, 0x28, 0x0a, 0xe6, 0xcf, 0xe3, 0x93, 0xb3, 0xc4, 0x0b, 0xa5, 0xd0, 0xfa, 0xd4, + 0xc9, 0x20, 0xdf, 0x9d, 0xf0, 0x1a, 0xc6, 0x9b, 0xae, 0xa2, 0xcd, 0xd0, 0xd4, 0x48, 0xdb, 0xbd, + 0x3c, 0x63, 0xf7, 0x89, 0x18, 0xf9, 0x8a, 0x36, 0x52, 0xf5, 0x67, 0x2c, 0x34, 0xb4, 0xbc, 0x85, + 0x1b, 0x6d, 0x35, 0xfd, 0x0c, 0xb0, 0x8f, 0xc5, 0xef, 0x60, 0xa4, 0xe1, 0x66, 0x82, 0x42, 0x37, + 0x0a, 0xe6, 0xcf, 0xce, 0x75, 0x6c, 0xff, 0x7c, 0x66, 0x65, 0xd3, 0x2f, 0x30, 0x3e, 0x8c, 0xbd, + 0xd8, 0x70, 0x10, 0xef, 0x0c, 0x7f, 0x80, 0x37, 0x60, 0xf8, 0x21, 0xb8, 0x1b, 0xd2, 0x9b, 0x5b, + 0xcd, 0x54, 0x89, 0xdf, 0xd8, 0x31, 0xf4, 0x35, 0x5e, 0xd0, 0xae, 0x1d, 0xfe, 0x35, 0x3c, 0xf8, + 0xaa, 0x5f, 0xc6, 0x89, 0x88, 0x47, 0x87, 0x11, 0xfe, 0xa0, 0xfc, 0x00, 0x8f, 0x3f, 0xb2, 0x46, + 0x8a, 0xb6, 0x26, 0x4c, 0xe6, 0x92, 0x72, 0xb6, 0xa2, 0x85, 0xc8, 0x45, 0x8f, 0x31, 0x5c, 0xb3, + 0xbc, 0xb6, 0x6f, 0x2f, 0xd3, 0x35, 0x9e, 0xc0, 0x6d, 0x47, 0x44, 0x43, 0x39, 0xb3, 0x2e, 0xc3, + 0x71, 0xf9, 0x17, 0x42, 0xca, 0x4f, 0x77, 0xbd, 0x0c, 0xde, 0xeb, 0x72, 0xad, 0xe0, 0x35, 0xfa, + 0xfe, 0xb6, 0xa4, 0xf2, 0x4f, 0x5b, 0x28, 0x42, 0xa2, 0x84, 0xf7, 0xfb, 0x45, 0x3a, 0xf2, 0xb9, + 0x37, 0x6b, 0x55, 0x12, 0x96, 0x94, 0x07, 0xdb, 0x55, 0x8c, 0x34, 0xfe, 0xea, 0x7f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x58, 0xdb, 0x68, 0x5e, 0x85, 0x03, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/logs/v1/logs.pb.go b/internal/opentelemetry-proto-gen/logs/v1/logs.pb.go new file mode 100644 index 0000000000..04b6212702 --- /dev/null +++ b/internal/opentelemetry-proto-gen/logs/v1/logs.pb.go @@ -0,0 +1,448 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/logs/v1/logs.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v11 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Possible values for LogRecord.SeverityNumber. +type SeverityNumber int32 + +const ( + // UNSPECIFIED is the default SeverityNumber, it MUST not be used. + SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0 + SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1 + SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2 + SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3 + SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4 + SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5 + SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6 + SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7 + SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8 + SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9 + SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10 + SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11 + SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12 + SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13 + SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14 + SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15 + SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16 + SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17 + SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18 + SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19 + SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20 + SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21 + SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22 + SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23 + SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24 +) + +var SeverityNumber_name = map[int32]string{ + 0: "SEVERITY_NUMBER_UNSPECIFIED", + 1: "SEVERITY_NUMBER_TRACE", + 2: "SEVERITY_NUMBER_TRACE2", + 3: "SEVERITY_NUMBER_TRACE3", + 4: "SEVERITY_NUMBER_TRACE4", + 5: "SEVERITY_NUMBER_DEBUG", + 6: "SEVERITY_NUMBER_DEBUG2", + 7: "SEVERITY_NUMBER_DEBUG3", + 8: "SEVERITY_NUMBER_DEBUG4", + 9: "SEVERITY_NUMBER_INFO", + 10: "SEVERITY_NUMBER_INFO2", + 11: "SEVERITY_NUMBER_INFO3", + 12: "SEVERITY_NUMBER_INFO4", + 13: "SEVERITY_NUMBER_WARN", + 14: "SEVERITY_NUMBER_WARN2", + 15: "SEVERITY_NUMBER_WARN3", + 16: "SEVERITY_NUMBER_WARN4", + 17: "SEVERITY_NUMBER_ERROR", + 18: "SEVERITY_NUMBER_ERROR2", + 19: "SEVERITY_NUMBER_ERROR3", + 20: "SEVERITY_NUMBER_ERROR4", + 21: "SEVERITY_NUMBER_FATAL", + 22: "SEVERITY_NUMBER_FATAL2", + 23: "SEVERITY_NUMBER_FATAL3", + 24: "SEVERITY_NUMBER_FATAL4", +} + +var SeverityNumber_value = map[string]int32{ + "SEVERITY_NUMBER_UNSPECIFIED": 0, + "SEVERITY_NUMBER_TRACE": 1, + "SEVERITY_NUMBER_TRACE2": 2, + "SEVERITY_NUMBER_TRACE3": 3, + "SEVERITY_NUMBER_TRACE4": 4, + "SEVERITY_NUMBER_DEBUG": 5, + "SEVERITY_NUMBER_DEBUG2": 6, + "SEVERITY_NUMBER_DEBUG3": 7, + "SEVERITY_NUMBER_DEBUG4": 8, + "SEVERITY_NUMBER_INFO": 9, + "SEVERITY_NUMBER_INFO2": 10, + "SEVERITY_NUMBER_INFO3": 11, + "SEVERITY_NUMBER_INFO4": 12, + "SEVERITY_NUMBER_WARN": 13, + "SEVERITY_NUMBER_WARN2": 14, + "SEVERITY_NUMBER_WARN3": 15, + "SEVERITY_NUMBER_WARN4": 16, + "SEVERITY_NUMBER_ERROR": 17, + "SEVERITY_NUMBER_ERROR2": 18, + "SEVERITY_NUMBER_ERROR3": 19, + "SEVERITY_NUMBER_ERROR4": 20, + "SEVERITY_NUMBER_FATAL": 21, + "SEVERITY_NUMBER_FATAL2": 22, + "SEVERITY_NUMBER_FATAL3": 23, + "SEVERITY_NUMBER_FATAL4": 24, +} + +func (x SeverityNumber) String() string { + return proto.EnumName(SeverityNumber_name, int32(x)) +} + +func (SeverityNumber) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{0} +} + +// Masks for LogRecord.flags field. +type LogRecordFlags int32 + +const ( + LogRecordFlags_LOG_RECORD_FLAG_UNSPECIFIED LogRecordFlags = 0 + LogRecordFlags_LOG_RECORD_FLAG_TRACE_FLAGS_MASK LogRecordFlags = 255 +) + +var LogRecordFlags_name = map[int32]string{ + 0: "LOG_RECORD_FLAG_UNSPECIFIED", + 255: "LOG_RECORD_FLAG_TRACE_FLAGS_MASK", +} + +var LogRecordFlags_value = map[string]int32{ + "LOG_RECORD_FLAG_UNSPECIFIED": 0, + "LOG_RECORD_FLAG_TRACE_FLAGS_MASK": 255, +} + +func (x LogRecordFlags) String() string { + return proto.EnumName(LogRecordFlags_name, int32(x)) +} + +func (LogRecordFlags) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{1} +} + +// A collection of InstrumentationLibraryLogs from a Resource. +type ResourceLogs struct { + // The resource for the logs in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of InstrumentationLibraryLogs that originate from a resource. + InstrumentationLibraryLogs []*InstrumentationLibraryLogs `protobuf:"bytes,2,rep,name=instrumentation_library_logs,json=instrumentationLibraryLogs,proto3" json:"instrumentation_library_logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceLogs) Reset() { *m = ResourceLogs{} } +func (m *ResourceLogs) String() string { return proto.CompactTextString(m) } +func (*ResourceLogs) ProtoMessage() {} +func (*ResourceLogs) Descriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{0} +} +func (m *ResourceLogs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceLogs.Unmarshal(m, b) +} +func (m *ResourceLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceLogs.Marshal(b, m, deterministic) +} +func (m *ResourceLogs) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceLogs.Merge(m, src) +} +func (m *ResourceLogs) XXX_Size() int { + return xxx_messageInfo_ResourceLogs.Size(m) +} +func (m *ResourceLogs) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceLogs.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceLogs proto.InternalMessageInfo + +func (m *ResourceLogs) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *ResourceLogs) GetInstrumentationLibraryLogs() []*InstrumentationLibraryLogs { + if m != nil { + return m.InstrumentationLibraryLogs + } + return nil +} + +// A collection of Logs produced by an InstrumentationLibrary. +type InstrumentationLibraryLogs struct { + // The instrumentation library information for the logs in this message. + // If this field is not set then no library info is known. + InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` + // A list of log records. + Logs []*LogRecord `protobuf:"bytes,2,rep,name=logs,proto3" json:"logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstrumentationLibraryLogs) Reset() { *m = InstrumentationLibraryLogs{} } +func (m *InstrumentationLibraryLogs) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibraryLogs) ProtoMessage() {} +func (*InstrumentationLibraryLogs) Descriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{1} +} +func (m *InstrumentationLibraryLogs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstrumentationLibraryLogs.Unmarshal(m, b) +} +func (m *InstrumentationLibraryLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstrumentationLibraryLogs.Marshal(b, m, deterministic) +} +func (m *InstrumentationLibraryLogs) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibraryLogs.Merge(m, src) +} +func (m *InstrumentationLibraryLogs) XXX_Size() int { + return xxx_messageInfo_InstrumentationLibraryLogs.Size(m) +} +func (m *InstrumentationLibraryLogs) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibraryLogs.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibraryLogs proto.InternalMessageInfo + +func (m *InstrumentationLibraryLogs) GetInstrumentationLibrary() *v11.InstrumentationLibrary { + if m != nil { + return m.InstrumentationLibrary + } + return nil +} + +func (m *InstrumentationLibraryLogs) GetLogs() []*LogRecord { + if m != nil { + return m.Logs + } + return nil +} + +// A log record according to OpenTelemetry Log Data Model: +// https://github.com/open-telemetry/oteps/blob/master/text/logs/0097-log-data-model.md +type LogRecord struct { + // time_unix_nano is the time when the event occurred. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Numerical value of the severity, normalized to values described in Log Data Model. + // [Optional]. + SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"` + // The severity text (also known as log level). The original string representation as + // it is known at the source. [Optional]. + SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"` + // Short event identifier that does not contain varying parts. Name describes + // what happened (e.g. "ProcessStarted"). Recommended to be no longer than 50 + // characters. Not guaranteed to be unique in any way. [Optional]. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // A value containing the body of the log record. Can be for example a human-readable + // string message (including multi-line) describing the event in a free form or it can + // be a structured data composed of arrays and maps of other values. [Optional]. + Body *v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"` + // Additional attributes that describe the specific event occurrence. [Optional]. + Attributes []*v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Flags, a bit field. 8 least significant bits are the trace flags as + // defined in W3C Trace Context specification. 24 most significant bits are reserved + // and must be set to 0. Readers must not assume that 24 most significant bits + // will be zero and must correctly mask the bits when reading 8-bit trace flag (use + // flags & TRACE_FLAGS_MASK). [Optional]. + Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"` + // A unique identifier for a trace. All logs from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. Can be set for logs that are part of request processing + // and have an assigned trace id. [Optional]. + TraceId []byte `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. Can be set for logs that are part of a particular processing span. + // If span_id is present trace_id SHOULD be also present. [Optional]. + SpanId []byte `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogRecord) Reset() { *m = LogRecord{} } +func (m *LogRecord) String() string { return proto.CompactTextString(m) } +func (*LogRecord) ProtoMessage() {} +func (*LogRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_d1c030a3ec7e961e, []int{2} +} +func (m *LogRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogRecord.Unmarshal(m, b) +} +func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) +} +func (m *LogRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogRecord.Merge(m, src) +} +func (m *LogRecord) XXX_Size() int { + return xxx_messageInfo_LogRecord.Size(m) +} +func (m *LogRecord) XXX_DiscardUnknown() { + xxx_messageInfo_LogRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_LogRecord proto.InternalMessageInfo + +func (m *LogRecord) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *LogRecord) GetSeverityNumber() SeverityNumber { + if m != nil { + return m.SeverityNumber + } + return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED +} + +func (m *LogRecord) GetSeverityText() string { + if m != nil { + return m.SeverityText + } + return "" +} + +func (m *LogRecord) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LogRecord) GetBody() *v11.AnyValue { + if m != nil { + return m.Body + } + return nil +} + +func (m *LogRecord) GetAttributes() []*v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *LogRecord) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func (m *LogRecord) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + +func (m *LogRecord) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *LogRecord) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value) + proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value) + proto.RegisterType((*ResourceLogs)(nil), "opentelemetry.proto.logs.v1.ResourceLogs") + proto.RegisterType((*InstrumentationLibraryLogs)(nil), "opentelemetry.proto.logs.v1.InstrumentationLibraryLogs") + proto.RegisterType((*LogRecord)(nil), "opentelemetry.proto.logs.v1.LogRecord") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/logs/v1/logs.proto", fileDescriptor_d1c030a3ec7e961e) +} + +var fileDescriptor_d1c030a3ec7e961e = []byte{ + // 756 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x95, 0xdf, 0x6f, 0xea, 0x36, + 0x14, 0xc7, 0x97, 0xf2, 0xdb, 0xa5, 0xd4, 0xf3, 0x5a, 0x9a, 0xd2, 0x69, 0x8d, 0xba, 0xad, 0x63, + 0x9d, 0x0a, 0x6a, 0x60, 0xda, 0xb4, 0xed, 0x25, 0xd0, 0x80, 0x50, 0x29, 0x54, 0x06, 0xba, 0x1f, + 0x2f, 0x51, 0x00, 0x8f, 0x45, 0x03, 0x1b, 0x25, 0x0e, 0x82, 0xbf, 0xef, 0xbe, 0x5c, 0xdd, 0xa7, + 0xfb, 0x1f, 0xdd, 0xab, 0x98, 0x1f, 0x2d, 0x28, 0xa6, 0x4f, 0xd8, 0xe7, 0x73, 0xbe, 0x5f, 0x9f, + 0x73, 0x44, 0x6c, 0x70, 0xcd, 0xa6, 0x84, 0x72, 0x32, 0x26, 0x13, 0xc2, 0xdd, 0x45, 0x71, 0xea, + 0x32, 0xce, 0x8a, 0x63, 0x36, 0xf2, 0x8a, 0xb3, 0x3b, 0xf1, 0x5b, 0x10, 0x21, 0x74, 0xb1, 0x95, + 0xb7, 0x0c, 0x16, 0x04, 0x9f, 0xdd, 0xe5, 0x6e, 0xc2, 0x4c, 0x06, 0x6c, 0x32, 0x61, 0x34, 0xb0, + 0x59, 0xae, 0x96, 0x9a, 0x5c, 0x21, 0x2c, 0xd7, 0x25, 0x1e, 0xf3, 0xdd, 0x01, 0x09, 0xb2, 0xd7, + 0xeb, 0x65, 0xfe, 0xd5, 0x47, 0x05, 0xa4, 0xf1, 0x2a, 0xd4, 0x64, 0x23, 0x0f, 0x99, 0x20, 0xb9, + 0x4e, 0x51, 0x15, 0x4d, 0xc9, 0x1f, 0xea, 0x3f, 0x16, 0xc2, 0x8a, 0xdb, 0xf8, 0xcc, 0xee, 0x0a, + 0x6b, 0x03, 0xbc, 0x91, 0xa2, 0x05, 0xf8, 0xda, 0xa1, 0x1e, 0x77, 0xfd, 0x09, 0xa1, 0xdc, 0xe6, + 0x0e, 0xa3, 0xd6, 0xd8, 0xe9, 0xbb, 0xb6, 0xbb, 0xb0, 0x82, 0xb6, 0xd4, 0x03, 0x2d, 0x92, 0x3f, + 0xd4, 0x7f, 0x29, 0xec, 0xe9, 0xbb, 0xd0, 0xd8, 0x36, 0x68, 0x2e, 0xf5, 0x41, 0x95, 0x38, 0xe7, + 0x48, 0xd9, 0xd5, 0x7b, 0x05, 0xe4, 0xe4, 0x52, 0x44, 0xc1, 0x99, 0xa4, 0xb2, 0x55, 0xbf, 0x3f, + 0x87, 0x16, 0xb5, 0x9a, 0xb2, 0xb4, 0x2c, 0x9c, 0x0d, 0x2f, 0x09, 0xfd, 0x06, 0xa2, 0xaf, 0x3a, + 0xbe, 0xde, 0xdb, 0x71, 0x93, 0x8d, 0x30, 0x19, 0x30, 0x77, 0x88, 0x85, 0xe6, 0xea, 0x43, 0x04, + 0xa4, 0x36, 0x31, 0xf4, 0x1d, 0xc8, 0x70, 0x67, 0x42, 0x2c, 0x9f, 0x3a, 0x73, 0x8b, 0xda, 0x94, + 0x89, 0x82, 0xe3, 0x38, 0x1d, 0x44, 0x7b, 0xd4, 0x99, 0xb7, 0x6c, 0xca, 0x50, 0x17, 0x1c, 0x7b, + 0x64, 0x46, 0x5c, 0x87, 0x2f, 0x2c, 0xea, 0x4f, 0xfa, 0xc4, 0x55, 0x0f, 0x34, 0x25, 0x9f, 0xd1, + 0x7f, 0xda, 0x7b, 0x74, 0x67, 0xa5, 0x69, 0x09, 0x09, 0xce, 0x78, 0x5b, 0x7b, 0xf4, 0x2d, 0x38, + 0xda, 0xb8, 0x72, 0x32, 0xe7, 0x6a, 0x44, 0x53, 0xf2, 0x29, 0x9c, 0x5e, 0x07, 0xbb, 0x64, 0xce, + 0x11, 0x02, 0x51, 0x6a, 0x4f, 0x88, 0x1a, 0x15, 0x4c, 0xac, 0xd1, 0xef, 0x20, 0xda, 0x67, 0xc3, + 0x85, 0x1a, 0x13, 0xb3, 0xfd, 0xe1, 0x8d, 0xd9, 0x1a, 0x74, 0xf1, 0x6c, 0x8f, 0x7d, 0x82, 0x85, + 0x08, 0xd5, 0x01, 0xb0, 0x39, 0x77, 0x9d, 0xbe, 0xcf, 0x89, 0xa7, 0xc6, 0xc5, 0x04, 0xdf, 0xb2, + 0x78, 0x20, 0x2b, 0x8b, 0x57, 0x52, 0xf4, 0x2b, 0x50, 0x87, 0x2e, 0x9b, 0x4e, 0xc9, 0xd0, 0x7a, + 0x89, 0x5a, 0x03, 0xe6, 0x53, 0xae, 0x26, 0x34, 0x25, 0x7f, 0x84, 0xb3, 0x2b, 0x6e, 0x6c, 0x70, + 0x35, 0xa0, 0xe8, 0x04, 0xc4, 0xfe, 0x1d, 0xdb, 0x23, 0x4f, 0x4d, 0x6a, 0x4a, 0x3e, 0x81, 0x97, + 0x1b, 0x74, 0x0e, 0x92, 0xdc, 0xb5, 0x07, 0xc4, 0x72, 0x86, 0x6a, 0x4a, 0x53, 0xf2, 0x69, 0x9c, + 0x10, 0xfb, 0xc6, 0x10, 0x9d, 0x81, 0x84, 0x37, 0xb5, 0x69, 0x40, 0x80, 0x20, 0xf1, 0x60, 0xdb, + 0x18, 0xde, 0xbc, 0x8b, 0x81, 0xcc, 0xf6, 0x94, 0xd1, 0x25, 0xb8, 0xe8, 0x98, 0xcf, 0x26, 0x6e, + 0x74, 0xff, 0xb6, 0x5a, 0xbd, 0xc7, 0x8a, 0x89, 0xad, 0x5e, 0xab, 0xf3, 0x64, 0x56, 0x1b, 0xb5, + 0x86, 0x79, 0x0f, 0xbf, 0x40, 0xe7, 0xe0, 0x74, 0x37, 0xa1, 0x8b, 0x8d, 0xaa, 0x09, 0x15, 0x94, + 0x03, 0xd9, 0x50, 0xa4, 0xc3, 0x03, 0x29, 0x2b, 0xc1, 0x88, 0x94, 0x95, 0x61, 0x34, 0xec, 0xb8, + 0x7b, 0xb3, 0xd2, 0xab, 0xc3, 0x58, 0x98, 0x4c, 0x20, 0x1d, 0xc6, 0xa5, 0xac, 0x04, 0x13, 0x52, + 0x56, 0x86, 0x49, 0xa4, 0x82, 0x93, 0x5d, 0xd6, 0x68, 0xd5, 0xda, 0x30, 0x15, 0x56, 0x48, 0x40, + 0x74, 0x08, 0x64, 0xa8, 0x04, 0x0f, 0x65, 0xa8, 0x0c, 0xd3, 0x61, 0x47, 0xfd, 0x69, 0xe0, 0x16, + 0x3c, 0x0a, 0x13, 0x05, 0x44, 0x87, 0x19, 0x19, 0x2a, 0xc1, 0x63, 0x19, 0x2a, 0x43, 0x18, 0x86, + 0x4c, 0x8c, 0xdb, 0x18, 0x7e, 0x19, 0x36, 0x0c, 0x81, 0x74, 0x88, 0xa4, 0xac, 0x04, 0xbf, 0x92, + 0xb2, 0x32, 0x3c, 0x09, 0x3b, 0xae, 0x66, 0x74, 0x8d, 0x26, 0x3c, 0x0d, 0x93, 0x09, 0xa4, 0xc3, + 0xac, 0x94, 0x95, 0xe0, 0x99, 0x94, 0x95, 0xa1, 0x7a, 0xf3, 0x17, 0xc8, 0x6c, 0x6e, 0xa4, 0x9a, + 0xf8, 0x16, 0x2e, 0xc1, 0x45, 0xb3, 0x5d, 0xb7, 0xb0, 0x59, 0x6d, 0xe3, 0x7b, 0xab, 0xd6, 0x34, + 0xea, 0x3b, 0x7f, 0xe2, 0xef, 0x81, 0xb6, 0x9b, 0x20, 0xfe, 0x71, 0x62, 0xd9, 0xb1, 0x1e, 0x8d, + 0xce, 0x03, 0xfc, 0xa4, 0x54, 0xfe, 0x07, 0xdf, 0x38, 0x6c, 0xdf, 0x1d, 0x55, 0x09, 0xee, 0x42, + 0xef, 0x29, 0x08, 0x3d, 0x29, 0xff, 0xfc, 0x31, 0x72, 0xf8, 0x7f, 0x7e, 0x3f, 0xf8, 0xf2, 0x8b, + 0x81, 0xe8, 0xf6, 0xe5, 0xd5, 0xdb, 0xf2, 0xb8, 0x5d, 0xbe, 0x81, 0x23, 0x42, 0x8b, 0xa3, 0xcd, + 0xdb, 0xdb, 0x8f, 0x8b, 0x68, 0xe9, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x3d, 0xc3, 0x0c, + 0xa1, 0x07, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go b/internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go new file mode 100644 index 0000000000..eebb0aa75e --- /dev/null +++ b/internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go @@ -0,0 +1,423 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/metrics/experimental/configservice.proto + +package experimental + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricConfigRequest struct { + // Required. The resource for which configuration should be returned. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // Optional. The value of MetricConfigResponse.fingerprint for the last + // configuration that the caller received and successfully applied. + LastKnownFingerprint []byte `protobuf:"bytes,2,opt,name=last_known_fingerprint,json=lastKnownFingerprint,proto3" json:"last_known_fingerprint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricConfigRequest) Reset() { *m = MetricConfigRequest{} } +func (m *MetricConfigRequest) String() string { return proto.CompactTextString(m) } +func (*MetricConfigRequest) ProtoMessage() {} +func (*MetricConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_79b5d4ea55caf90b, []int{0} +} +func (m *MetricConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricConfigRequest.Unmarshal(m, b) +} +func (m *MetricConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricConfigRequest.Marshal(b, m, deterministic) +} +func (m *MetricConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricConfigRequest.Merge(m, src) +} +func (m *MetricConfigRequest) XXX_Size() int { + return xxx_messageInfo_MetricConfigRequest.Size(m) +} +func (m *MetricConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MetricConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricConfigRequest proto.InternalMessageInfo + +func (m *MetricConfigRequest) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *MetricConfigRequest) GetLastKnownFingerprint() []byte { + if m != nil { + return m.LastKnownFingerprint + } + return nil +} + +type MetricConfigResponse struct { + // Optional. The fingerprint associated with this MetricConfigResponse. Each + // change in configs yields a different fingerprint. The resource SHOULD copy + // this value to MetricConfigRequest.last_known_fingerprint for the next + // configuration request. If there are no changes between fingerprint and + // MetricConfigRequest.last_known_fingerprint, then all other fields besides + // fingerprint in the response are optional, or the same as the last update if + // present. + // + // The exact mechanics of generating the fingerprint is up to the + // implementation. However, a fingerprint must be deterministically determined + // by the configurations -- the same configuration will generate the same + // fingerprint on any instance of an implementation. Hence using a timestamp is + // unacceptable, but a deterministic hash is fine. + Fingerprint []byte `protobuf:"bytes,1,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` + // A single metric may match multiple schedules. In such cases, the schedule + // that specifies the smallest period is applied. + // + // Note, for optimization purposes, it is recommended to use as few schedules + // as possible to capture all required metric updates. Where you can be + // conservative, do take full advantage of the inclusion/exclusion patterns to + // capture as much of your targeted metrics. + Schedules []*MetricConfigResponse_Schedule `protobuf:"bytes,2,rep,name=schedules,proto3" json:"schedules,omitempty"` + // Optional. The client is suggested to wait this long (in seconds) before + // pinging the configuration service again. + SuggestedWaitTimeSec int32 `protobuf:"varint,3,opt,name=suggested_wait_time_sec,json=suggestedWaitTimeSec,proto3" json:"suggested_wait_time_sec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricConfigResponse) Reset() { *m = MetricConfigResponse{} } +func (m *MetricConfigResponse) String() string { return proto.CompactTextString(m) } +func (*MetricConfigResponse) ProtoMessage() {} +func (*MetricConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_79b5d4ea55caf90b, []int{1} +} +func (m *MetricConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricConfigResponse.Unmarshal(m, b) +} +func (m *MetricConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricConfigResponse.Marshal(b, m, deterministic) +} +func (m *MetricConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricConfigResponse.Merge(m, src) +} +func (m *MetricConfigResponse) XXX_Size() int { + return xxx_messageInfo_MetricConfigResponse.Size(m) +} +func (m *MetricConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MetricConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricConfigResponse proto.InternalMessageInfo + +func (m *MetricConfigResponse) GetFingerprint() []byte { + if m != nil { + return m.Fingerprint + } + return nil +} + +func (m *MetricConfigResponse) GetSchedules() []*MetricConfigResponse_Schedule { + if m != nil { + return m.Schedules + } + return nil +} + +func (m *MetricConfigResponse) GetSuggestedWaitTimeSec() int32 { + if m != nil { + return m.SuggestedWaitTimeSec + } + return 0 +} + +// A Schedule is used to apply a particular scheduling configuration to +// a metric. If a metric name matches a schedule's patterns, then the metric +// adopts the configuration specified by the schedule. +type MetricConfigResponse_Schedule struct { + // Metrics with names that match a rule in the inclusion_patterns are + // targeted by this schedule. Metrics that match the exclusion_patterns + // are not targeted for this schedule, even if they match an inclusion + // pattern. + ExclusionPatterns []*MetricConfigResponse_Schedule_Pattern `protobuf:"bytes,1,rep,name=exclusion_patterns,json=exclusionPatterns,proto3" json:"exclusion_patterns,omitempty"` + InclusionPatterns []*MetricConfigResponse_Schedule_Pattern `protobuf:"bytes,2,rep,name=inclusion_patterns,json=inclusionPatterns,proto3" json:"inclusion_patterns,omitempty"` + // Describes the collection period for each metric in seconds. + // A period of 0 means to not export. + PeriodSec int32 `protobuf:"varint,3,opt,name=period_sec,json=periodSec,proto3" json:"period_sec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricConfigResponse_Schedule) Reset() { *m = MetricConfigResponse_Schedule{} } +func (m *MetricConfigResponse_Schedule) String() string { return proto.CompactTextString(m) } +func (*MetricConfigResponse_Schedule) ProtoMessage() {} +func (*MetricConfigResponse_Schedule) Descriptor() ([]byte, []int) { + return fileDescriptor_79b5d4ea55caf90b, []int{1, 0} +} +func (m *MetricConfigResponse_Schedule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricConfigResponse_Schedule.Unmarshal(m, b) +} +func (m *MetricConfigResponse_Schedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricConfigResponse_Schedule.Marshal(b, m, deterministic) +} +func (m *MetricConfigResponse_Schedule) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricConfigResponse_Schedule.Merge(m, src) +} +func (m *MetricConfigResponse_Schedule) XXX_Size() int { + return xxx_messageInfo_MetricConfigResponse_Schedule.Size(m) +} +func (m *MetricConfigResponse_Schedule) XXX_DiscardUnknown() { + xxx_messageInfo_MetricConfigResponse_Schedule.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricConfigResponse_Schedule proto.InternalMessageInfo + +func (m *MetricConfigResponse_Schedule) GetExclusionPatterns() []*MetricConfigResponse_Schedule_Pattern { + if m != nil { + return m.ExclusionPatterns + } + return nil +} + +func (m *MetricConfigResponse_Schedule) GetInclusionPatterns() []*MetricConfigResponse_Schedule_Pattern { + if m != nil { + return m.InclusionPatterns + } + return nil +} + +func (m *MetricConfigResponse_Schedule) GetPeriodSec() int32 { + if m != nil { + return m.PeriodSec + } + return 0 +} + +// A light-weight pattern that can match 1 or more +// metrics, for which this schedule will apply. The string is used to +// match against metric names. It should not exceed 100k characters. +type MetricConfigResponse_Schedule_Pattern struct { + // Types that are valid to be assigned to Match: + // *MetricConfigResponse_Schedule_Pattern_Equals + // *MetricConfigResponse_Schedule_Pattern_StartsWith + Match isMetricConfigResponse_Schedule_Pattern_Match `protobuf_oneof:"match"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricConfigResponse_Schedule_Pattern) Reset() { *m = MetricConfigResponse_Schedule_Pattern{} } +func (m *MetricConfigResponse_Schedule_Pattern) String() string { return proto.CompactTextString(m) } +func (*MetricConfigResponse_Schedule_Pattern) ProtoMessage() {} +func (*MetricConfigResponse_Schedule_Pattern) Descriptor() ([]byte, []int) { + return fileDescriptor_79b5d4ea55caf90b, []int{1, 0, 0} +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Unmarshal(m, b) +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Marshal(b, m, deterministic) +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Merge(m, src) +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_Size() int { + return xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Size(m) +} +func (m *MetricConfigResponse_Schedule_Pattern) XXX_DiscardUnknown() { + xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricConfigResponse_Schedule_Pattern proto.InternalMessageInfo + +type isMetricConfigResponse_Schedule_Pattern_Match interface { + isMetricConfigResponse_Schedule_Pattern_Match() +} + +type MetricConfigResponse_Schedule_Pattern_Equals struct { + Equals string `protobuf:"bytes,1,opt,name=equals,proto3,oneof" json:"equals,omitempty"` +} +type MetricConfigResponse_Schedule_Pattern_StartsWith struct { + StartsWith string `protobuf:"bytes,2,opt,name=starts_with,json=startsWith,proto3,oneof" json:"starts_with,omitempty"` +} + +func (*MetricConfigResponse_Schedule_Pattern_Equals) isMetricConfigResponse_Schedule_Pattern_Match() { +} +func (*MetricConfigResponse_Schedule_Pattern_StartsWith) isMetricConfigResponse_Schedule_Pattern_Match() { +} + +func (m *MetricConfigResponse_Schedule_Pattern) GetMatch() isMetricConfigResponse_Schedule_Pattern_Match { + if m != nil { + return m.Match + } + return nil +} + +func (m *MetricConfigResponse_Schedule_Pattern) GetEquals() string { + if x, ok := m.GetMatch().(*MetricConfigResponse_Schedule_Pattern_Equals); ok { + return x.Equals + } + return "" +} + +func (m *MetricConfigResponse_Schedule_Pattern) GetStartsWith() string { + if x, ok := m.GetMatch().(*MetricConfigResponse_Schedule_Pattern_StartsWith); ok { + return x.StartsWith + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*MetricConfigResponse_Schedule_Pattern) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*MetricConfigResponse_Schedule_Pattern_Equals)(nil), + (*MetricConfigResponse_Schedule_Pattern_StartsWith)(nil), + } +} + +func init() { + proto.RegisterType((*MetricConfigRequest)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigRequest") + proto.RegisterType((*MetricConfigResponse)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigResponse") + proto.RegisterType((*MetricConfigResponse_Schedule)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigResponse.Schedule") + proto.RegisterType((*MetricConfigResponse_Schedule_Pattern)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigResponse.Schedule.Pattern") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/metrics/experimental/configservice.proto", fileDescriptor_79b5d4ea55caf90b) +} + +var fileDescriptor_79b5d4ea55caf90b = []byte{ + // 499 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0x4d, 0x6f, 0xd3, 0x4c, + 0x10, 0xc7, 0x9f, 0x4d, 0x9f, 0xbe, 0x64, 0x52, 0x09, 0xb1, 0x44, 0x60, 0x45, 0x42, 0x0a, 0x3d, + 0x05, 0xa1, 0xae, 0xd5, 0x00, 0x37, 0xe0, 0x10, 0x04, 0x05, 0x21, 0xd4, 0xc8, 0x41, 0xaa, 0xc4, + 0xc5, 0x72, 0x9d, 0xa9, 0xbd, 0xc2, 0xde, 0x75, 0x77, 0xc7, 0x49, 0xb9, 0xf0, 0x19, 0x10, 0xe2, + 0x0b, 0xf0, 0x99, 0xf8, 0x36, 0x9c, 0x90, 0x5f, 0xea, 0x38, 0x22, 0x87, 0x8a, 0x97, 0xdb, 0xe4, + 0x3f, 0x33, 0xbf, 0xff, 0x64, 0x6c, 0x0f, 0x3c, 0xd1, 0x19, 0x2a, 0xc2, 0x04, 0x53, 0x24, 0xf3, + 0xd1, 0xcd, 0x8c, 0x26, 0xed, 0x16, 0xb1, 0x0c, 0xad, 0x8b, 0x97, 0x19, 0x1a, 0x99, 0xa2, 0xa2, + 0x20, 0x71, 0x43, 0xad, 0xce, 0x65, 0x64, 0xd1, 0x2c, 0x64, 0x88, 0xa2, 0x2c, 0xe4, 0xa3, 0xb5, + 0xee, 0x4a, 0x14, 0x75, 0xb7, 0x68, 0x77, 0x0f, 0xc4, 0x26, 0x1f, 0x83, 0x56, 0xe7, 0x26, 0x44, + 0x77, 0x71, 0xd4, 0xc4, 0x15, 0xe4, 0xe0, 0x0b, 0x83, 0x5b, 0x6f, 0x4b, 0xd0, 0xf3, 0xd2, 0xd7, + 0xc3, 0x8b, 0x1c, 0x2d, 0xf1, 0x17, 0xb0, 0x77, 0x55, 0xe9, 0xb0, 0x21, 0x1b, 0xf5, 0xc6, 0xf7, + 0xc5, 0xa6, 0x21, 0x1a, 0xdc, 0xe2, 0x48, 0x78, 0x75, 0xec, 0x35, 0xad, 0xfc, 0x11, 0xdc, 0x4e, + 0x02, 0x4b, 0xfe, 0x07, 0xa5, 0x97, 0xca, 0x3f, 0x97, 0x2a, 0x42, 0x93, 0x19, 0xa9, 0xc8, 0xe9, + 0x0c, 0xd9, 0x68, 0xdf, 0xeb, 0x17, 0xd9, 0x37, 0x45, 0xf2, 0xe5, 0x2a, 0x77, 0xf0, 0xfd, 0x7f, + 0xe8, 0xaf, 0x0f, 0x65, 0x33, 0xad, 0x2c, 0xf2, 0x21, 0xf4, 0xda, 0x0c, 0x56, 0x32, 0xda, 0x12, + 0x47, 0xe8, 0xda, 0x30, 0xc6, 0x79, 0x9e, 0xa0, 0x75, 0x3a, 0xc3, 0xad, 0x51, 0x6f, 0x7c, 0x2c, + 0xae, 0xbb, 0x3d, 0xb1, 0xc9, 0x54, 0xcc, 0x6a, 0x9e, 0xb7, 0x22, 0xf3, 0xc7, 0x70, 0xc7, 0xe6, + 0x51, 0x84, 0x96, 0x70, 0xee, 0x2f, 0x03, 0x49, 0x3e, 0xc9, 0x14, 0x7d, 0x8b, 0xa1, 0xb3, 0x35, + 0x64, 0xa3, 0x6d, 0xaf, 0xdf, 0xa4, 0x4f, 0x03, 0x49, 0xef, 0x64, 0x8a, 0x33, 0x0c, 0x07, 0x3f, + 0x3a, 0xb0, 0x77, 0x85, 0xe3, 0x9f, 0x80, 0xe3, 0x65, 0x98, 0xe4, 0x56, 0x6a, 0xe5, 0x67, 0x01, + 0x11, 0x1a, 0x65, 0x1d, 0x56, 0xce, 0x7c, 0xf2, 0x97, 0x66, 0x16, 0xd3, 0x8a, 0xeb, 0xdd, 0x6c, + 0xac, 0x6a, 0xc5, 0x16, 0xfe, 0x52, 0xfd, 0xe2, 0xdf, 0xf9, 0x47, 0xfe, 0x8d, 0x55, 0xe3, 0x7f, + 0x17, 0xa0, 0xc0, 0xe8, 0x79, 0x6b, 0x6d, 0xdd, 0x4a, 0x29, 0x76, 0x75, 0x02, 0xbb, 0x75, 0x29, + 0x77, 0x60, 0x07, 0x2f, 0xf2, 0x20, 0xb1, 0xe5, 0x13, 0xef, 0xbe, 0xfa, 0xcf, 0xab, 0x7f, 0xf3, + 0x7b, 0xd0, 0xb3, 0x14, 0x18, 0xb2, 0xfe, 0x52, 0x52, 0x5c, 0xbe, 0x54, 0x45, 0x1a, 0x2a, 0xf1, + 0x54, 0x52, 0x3c, 0xd9, 0x85, 0xed, 0x34, 0xa0, 0x30, 0x1e, 0x7f, 0x63, 0xb0, 0xdf, 0x1e, 0x96, + 0x7f, 0x66, 0x70, 0xe3, 0x18, 0x69, 0x4d, 0x7b, 0xfa, 0xbb, 0x7f, 0xbc, 0xfc, 0x6c, 0x06, 0xcf, + 0xfe, 0x6c, 0x6f, 0x93, 0xaf, 0x0c, 0x1e, 0x48, 0x7d, 0x6d, 0xc8, 0xc4, 0x69, 0x53, 0x66, 0xd5, + 0xcd, 0x98, 0x16, 0xe5, 0x53, 0xf6, 0xfe, 0x75, 0x24, 0x29, 0xce, 0xcf, 0x44, 0xa8, 0x53, 0xb7, + 0x00, 0x1e, 0xae, 0xce, 0xc2, 0x1a, 0xff, 0xb0, 0x3a, 0x12, 0x11, 0x2a, 0x37, 0xda, 0x7c, 0x93, + 0xce, 0x76, 0xca, 0x92, 0x87, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x51, 0x6b, 0xa4, 0x34, 0xc6, + 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricConfigClient is the client API for MetricConfig service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricConfigClient interface { + GetMetricConfig(ctx context.Context, in *MetricConfigRequest, opts ...grpc.CallOption) (*MetricConfigResponse, error) +} + +type metricConfigClient struct { + cc *grpc.ClientConn +} + +func NewMetricConfigClient(cc *grpc.ClientConn) MetricConfigClient { + return &metricConfigClient{cc} +} + +func (c *metricConfigClient) GetMetricConfig(ctx context.Context, in *MetricConfigRequest, opts ...grpc.CallOption) (*MetricConfigResponse, error) { + out := new(MetricConfigResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.metrics.experimental.MetricConfig/GetMetricConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricConfigServer is the server API for MetricConfig service. +type MetricConfigServer interface { + GetMetricConfig(context.Context, *MetricConfigRequest) (*MetricConfigResponse, error) +} + +// UnimplementedMetricConfigServer can be embedded to have forward compatible implementations. +type UnimplementedMetricConfigServer struct { +} + +func (*UnimplementedMetricConfigServer) GetMetricConfig(ctx context.Context, req *MetricConfigRequest) (*MetricConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMetricConfig not implemented") +} + +func RegisterMetricConfigServer(s *grpc.Server, srv MetricConfigServer) { + s.RegisterService(&_MetricConfig_serviceDesc, srv) +} + +func _MetricConfig_GetMetricConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricConfigServer).GetMetricConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.metrics.experimental.MetricConfig/GetMetricConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricConfigServer).GetMetricConfig(ctx, req.(*MetricConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricConfig_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.metrics.experimental.MetricConfig", + HandlerType: (*MetricConfigServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMetricConfig", + Handler: _MetricConfig_GetMetricConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/metrics/experimental/configservice.proto", +} diff --git a/internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go b/internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go new file mode 100644 index 0000000000..12df1441fb --- /dev/null +++ b/internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go @@ -0,0 +1,1501 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/metrics/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v11 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// AggregationTemporality defines how a metric aggregator reports aggregated +// values. It describes how those values relate to the time interval over +// which they are aggregated. +type AggregationTemporality int32 + +const ( + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 + // DELTA is an AggregationTemporality for a metric aggregator which reports + // changes since last report time. Successive metrics contain aggregation of + // values from continuous and non-overlapping intervals. + // + // The values for a DELTA metric are based only on the time interval + // associated with one measurement cycle. There is no dependency on + // previous measurements like is the case for CUMULATIVE metrics. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // DELTA metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0+1 to + // t_0+2 with a value of 2. + AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 + // CUMULATIVE is an AggregationTemporality for a metic aggregator which + // reports changes since a fixed start time. This means that current values + // of a CUMULATIVE metric depend on all previous measurements since the + // start time. Because of this, the sender is required to retain this state + // in some form. If this state is lost or invalidated, the CUMULATIVE metric + // values MUST be reset and a new fixed start time following the last + // reported measurement time sent MUST be used. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // CUMULATIVE metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+2 with a value of 5. + // 9. The system experiences a fault and loses state. + // 10. The system recovers and resumes receiving at time=t_1. + // 11. A request is received, the system measures 1 request. + // 12. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_1 to + // t_0+1 with a value of 1. + // + // Note: Even though, when reporting changes since last report time, using + // CUMULATIVE is valid, it is not recommended. This may cause problems for + // systems that do not use start_time to determine when the aggregation + // value was reset (e.g. Prometheus). + AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 +) + +var AggregationTemporality_name = map[int32]string{ + 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", + 1: "AGGREGATION_TEMPORALITY_DELTA", + 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", +} + +var AggregationTemporality_value = map[string]int32{ + "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, + "AGGREGATION_TEMPORALITY_DELTA": 1, + "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, +} + +func (x AggregationTemporality) String() string { + return proto.EnumName(AggregationTemporality_name, int32(x)) +} + +func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{0} +} + +// A collection of InstrumentationLibraryMetrics from a Resource. +type ResourceMetrics struct { + // The resource for the metrics in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of metrics that originate from a resource. + InstrumentationLibraryMetrics []*InstrumentationLibraryMetrics `protobuf:"bytes,2,rep,name=instrumentation_library_metrics,json=instrumentationLibraryMetrics,proto3" json:"instrumentation_library_metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceMetrics) Reset() { *m = ResourceMetrics{} } +func (m *ResourceMetrics) String() string { return proto.CompactTextString(m) } +func (*ResourceMetrics) ProtoMessage() {} +func (*ResourceMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{0} +} +func (m *ResourceMetrics) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceMetrics.Unmarshal(m, b) +} +func (m *ResourceMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceMetrics.Marshal(b, m, deterministic) +} +func (m *ResourceMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetrics.Merge(m, src) +} +func (m *ResourceMetrics) XXX_Size() int { + return xxx_messageInfo_ResourceMetrics.Size(m) +} +func (m *ResourceMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetrics proto.InternalMessageInfo + +func (m *ResourceMetrics) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *ResourceMetrics) GetInstrumentationLibraryMetrics() []*InstrumentationLibraryMetrics { + if m != nil { + return m.InstrumentationLibraryMetrics + } + return nil +} + +// A collection of Metrics produced by an InstrumentationLibrary. +type InstrumentationLibraryMetrics struct { + // The instrumentation library information for the metrics in this message. + // If this field is not set then no library info is known. + InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` + // A list of metrics that originate from an instrumentation library. + Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstrumentationLibraryMetrics) Reset() { *m = InstrumentationLibraryMetrics{} } +func (m *InstrumentationLibraryMetrics) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibraryMetrics) ProtoMessage() {} +func (*InstrumentationLibraryMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{1} +} +func (m *InstrumentationLibraryMetrics) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstrumentationLibraryMetrics.Unmarshal(m, b) +} +func (m *InstrumentationLibraryMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstrumentationLibraryMetrics.Marshal(b, m, deterministic) +} +func (m *InstrumentationLibraryMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibraryMetrics.Merge(m, src) +} +func (m *InstrumentationLibraryMetrics) XXX_Size() int { + return xxx_messageInfo_InstrumentationLibraryMetrics.Size(m) +} +func (m *InstrumentationLibraryMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibraryMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibraryMetrics proto.InternalMessageInfo + +func (m *InstrumentationLibraryMetrics) GetInstrumentationLibrary() *v11.InstrumentationLibrary { + if m != nil { + return m.InstrumentationLibrary + } + return nil +} + +func (m *InstrumentationLibraryMetrics) GetMetrics() []*Metric { + if m != nil { + return m.Metrics + } + return nil +} + +// Defines a Metric which has one or more timeseries. +// +// The data model and relation between entities is shown in the +// diagram below. Here, "DataPoint" is the term used to refer to any +// one of the specific data point value types, and "points" is the term used +// to refer to any one of the lists of points contained in the Metric. +// +// - Metric is composed of a metadata and data. +// - Metadata part contains a name, description, unit. +// - Data is one of the possible types (Gauge, Sum, Histogram, etc.). +// - DataPoint contains timestamps, labels, and one of the possible value type +// fields. +// +// Metric +// +------------+ +// |name | +// |description | +// |unit | +---------------------------+ +// |data |---> |Gauge, Sum, Histogram, ... | +// +------------+ +---------------------------+ +// +// Data [One of Gauge, Sum, Histogram, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// All DataPoint types have three common fields: +// - Labels zero or more key-value pairs associated with the data point. +// - StartTimeUnixNano MUST be set to the start of the interval when the data's +// type includes an AggregationTemporality. This field is not set otherwise. +// - TimeUnixNano MUST be set to: +// - the moment when an aggregation is reported (independent of the +// aggregation temporality). +// - the instantaneous time of the event. +type Metric struct { + // name of the metric, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + // + // TODO: Update table after the decision on: + // https://github.com/open-telemetry/opentelemetry-specification/issues/731. + // By default, metrics recording using the OpenTelemetry API are exported as + // (the table does not include MeasurementValueType to avoid extra rows): + // + // Instrument Type + // ---------------------------------------------- + // Counter Sum(aggregation_temporality=delta;is_monotonic=true) + // UpDownCounter Sum(aggregation_temporality=delta;is_monotonic=false) + // ValueRecorder TBD + // SumObserver Sum(aggregation_temporality=cumulative;is_monotonic=true) + // UpDownSumObserver Sum(aggregation_temporality=cumulative;is_monotonic=false) + // ValueObserver Gauge() + // + // Types that are valid to be assigned to Data: + // *Metric_IntGauge + // *Metric_DoubleGauge + // *Metric_IntSum + // *Metric_DoubleSum + // *Metric_IntHistogram + // *Metric_DoubleHistogram + Data isMetric_Data `protobuf_oneof:"data"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{2} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +type isMetric_Data interface { + isMetric_Data() +} + +type Metric_IntGauge struct { + IntGauge *IntGauge `protobuf:"bytes,4,opt,name=int_gauge,json=intGauge,proto3,oneof" json:"int_gauge,omitempty"` +} +type Metric_DoubleGauge struct { + DoubleGauge *DoubleGauge `protobuf:"bytes,5,opt,name=double_gauge,json=doubleGauge,proto3,oneof" json:"double_gauge,omitempty"` +} +type Metric_IntSum struct { + IntSum *IntSum `protobuf:"bytes,6,opt,name=int_sum,json=intSum,proto3,oneof" json:"int_sum,omitempty"` +} +type Metric_DoubleSum struct { + DoubleSum *DoubleSum `protobuf:"bytes,7,opt,name=double_sum,json=doubleSum,proto3,oneof" json:"double_sum,omitempty"` +} +type Metric_IntHistogram struct { + IntHistogram *IntHistogram `protobuf:"bytes,8,opt,name=int_histogram,json=intHistogram,proto3,oneof" json:"int_histogram,omitempty"` +} +type Metric_DoubleHistogram struct { + DoubleHistogram *DoubleHistogram `protobuf:"bytes,9,opt,name=double_histogram,json=doubleHistogram,proto3,oneof" json:"double_histogram,omitempty"` +} + +func (*Metric_IntGauge) isMetric_Data() {} +func (*Metric_DoubleGauge) isMetric_Data() {} +func (*Metric_IntSum) isMetric_Data() {} +func (*Metric_DoubleSum) isMetric_Data() {} +func (*Metric_IntHistogram) isMetric_Data() {} +func (*Metric_DoubleHistogram) isMetric_Data() {} + +func (m *Metric) GetData() isMetric_Data { + if m != nil { + return m.Data + } + return nil +} + +func (m *Metric) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Metric) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Metric) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *Metric) GetIntGauge() *IntGauge { + if x, ok := m.GetData().(*Metric_IntGauge); ok { + return x.IntGauge + } + return nil +} + +func (m *Metric) GetDoubleGauge() *DoubleGauge { + if x, ok := m.GetData().(*Metric_DoubleGauge); ok { + return x.DoubleGauge + } + return nil +} + +func (m *Metric) GetIntSum() *IntSum { + if x, ok := m.GetData().(*Metric_IntSum); ok { + return x.IntSum + } + return nil +} + +func (m *Metric) GetDoubleSum() *DoubleSum { + if x, ok := m.GetData().(*Metric_DoubleSum); ok { + return x.DoubleSum + } + return nil +} + +func (m *Metric) GetIntHistogram() *IntHistogram { + if x, ok := m.GetData().(*Metric_IntHistogram); ok { + return x.IntHistogram + } + return nil +} + +func (m *Metric) GetDoubleHistogram() *DoubleHistogram { + if x, ok := m.GetData().(*Metric_DoubleHistogram); ok { + return x.DoubleHistogram + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Metric) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Metric_IntGauge)(nil), + (*Metric_DoubleGauge)(nil), + (*Metric_IntSum)(nil), + (*Metric_DoubleSum)(nil), + (*Metric_IntHistogram)(nil), + (*Metric_DoubleHistogram)(nil), + } +} + +// Gauge represents the type of a int scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +type IntGauge struct { + DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntGauge) Reset() { *m = IntGauge{} } +func (m *IntGauge) String() string { return proto.CompactTextString(m) } +func (*IntGauge) ProtoMessage() {} +func (*IntGauge) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{3} +} +func (m *IntGauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntGauge.Unmarshal(m, b) +} +func (m *IntGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntGauge.Marshal(b, m, deterministic) +} +func (m *IntGauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntGauge.Merge(m, src) +} +func (m *IntGauge) XXX_Size() int { + return xxx_messageInfo_IntGauge.Size(m) +} +func (m *IntGauge) XXX_DiscardUnknown() { + xxx_messageInfo_IntGauge.DiscardUnknown(m) +} + +var xxx_messageInfo_IntGauge proto.InternalMessageInfo + +func (m *IntGauge) GetDataPoints() []*IntDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +// Gauge represents the type of a double scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +type DoubleGauge struct { + DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleGauge) Reset() { *m = DoubleGauge{} } +func (m *DoubleGauge) String() string { return proto.CompactTextString(m) } +func (*DoubleGauge) ProtoMessage() {} +func (*DoubleGauge) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{4} +} +func (m *DoubleGauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleGauge.Unmarshal(m, b) +} +func (m *DoubleGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleGauge.Marshal(b, m, deterministic) +} +func (m *DoubleGauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleGauge.Merge(m, src) +} +func (m *DoubleGauge) XXX_Size() int { + return xxx_messageInfo_DoubleGauge.Size(m) +} +func (m *DoubleGauge) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleGauge.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleGauge proto.InternalMessageInfo + +func (m *DoubleGauge) GetDataPoints() []*DoubleDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +// Sum represents the type of a numeric int scalar metric that is calculated as +// a sum of all reported measurements over a time interval. +type IntSum struct { + DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + // If "true" means that the sum is monotonic. + IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntSum) Reset() { *m = IntSum{} } +func (m *IntSum) String() string { return proto.CompactTextString(m) } +func (*IntSum) ProtoMessage() {} +func (*IntSum) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{5} +} +func (m *IntSum) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntSum.Unmarshal(m, b) +} +func (m *IntSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntSum.Marshal(b, m, deterministic) +} +func (m *IntSum) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntSum.Merge(m, src) +} +func (m *IntSum) XXX_Size() int { + return xxx_messageInfo_IntSum.Size(m) +} +func (m *IntSum) XXX_DiscardUnknown() { + xxx_messageInfo_IntSum.DiscardUnknown(m) +} + +var xxx_messageInfo_IntSum proto.InternalMessageInfo + +func (m *IntSum) GetDataPoints() []*IntDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *IntSum) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +func (m *IntSum) GetIsMonotonic() bool { + if m != nil { + return m.IsMonotonic + } + return false +} + +// Sum represents the type of a numeric double scalar metric that is calculated +// as a sum of all reported measurements over a time interval. +type DoubleSum struct { + DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + // If "true" means that the sum is monotonic. + IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleSum) Reset() { *m = DoubleSum{} } +func (m *DoubleSum) String() string { return proto.CompactTextString(m) } +func (*DoubleSum) ProtoMessage() {} +func (*DoubleSum) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{6} +} +func (m *DoubleSum) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleSum.Unmarshal(m, b) +} +func (m *DoubleSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleSum.Marshal(b, m, deterministic) +} +func (m *DoubleSum) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleSum.Merge(m, src) +} +func (m *DoubleSum) XXX_Size() int { + return xxx_messageInfo_DoubleSum.Size(m) +} +func (m *DoubleSum) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleSum.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleSum proto.InternalMessageInfo + +func (m *DoubleSum) GetDataPoints() []*DoubleDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *DoubleSum) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +func (m *DoubleSum) GetIsMonotonic() bool { + if m != nil { + return m.IsMonotonic + } + return false +} + +// Represents the type of a metric that is calculated by aggregating as a +// Histogram of all reported int measurements over a time interval. +type IntHistogram struct { + DataPoints []*IntHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntHistogram) Reset() { *m = IntHistogram{} } +func (m *IntHistogram) String() string { return proto.CompactTextString(m) } +func (*IntHistogram) ProtoMessage() {} +func (*IntHistogram) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{7} +} +func (m *IntHistogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntHistogram.Unmarshal(m, b) +} +func (m *IntHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntHistogram.Marshal(b, m, deterministic) +} +func (m *IntHistogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntHistogram.Merge(m, src) +} +func (m *IntHistogram) XXX_Size() int { + return xxx_messageInfo_IntHistogram.Size(m) +} +func (m *IntHistogram) XXX_DiscardUnknown() { + xxx_messageInfo_IntHistogram.DiscardUnknown(m) +} + +var xxx_messageInfo_IntHistogram proto.InternalMessageInfo + +func (m *IntHistogram) GetDataPoints() []*IntHistogramDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *IntHistogram) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// Represents the type of a metric that is calculated by aggregating as a +// Histogram of all reported double measurements over a time interval. +type DoubleHistogram struct { + DataPoints []*DoubleHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleHistogram) Reset() { *m = DoubleHistogram{} } +func (m *DoubleHistogram) String() string { return proto.CompactTextString(m) } +func (*DoubleHistogram) ProtoMessage() {} +func (*DoubleHistogram) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{8} +} +func (m *DoubleHistogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleHistogram.Unmarshal(m, b) +} +func (m *DoubleHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleHistogram.Marshal(b, m, deterministic) +} +func (m *DoubleHistogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleHistogram.Merge(m, src) +} +func (m *DoubleHistogram) XXX_Size() int { + return xxx_messageInfo_DoubleHistogram.Size(m) +} +func (m *DoubleHistogram) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleHistogram.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleHistogram proto.InternalMessageInfo + +func (m *DoubleHistogram) GetDataPoints() []*DoubleHistogramDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *DoubleHistogram) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// IntDataPoint is a single data point in a timeseries that describes the +// time-varying values of a int64 metric. +type IntDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // value itself. + Value int64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*IntExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntDataPoint) Reset() { *m = IntDataPoint{} } +func (m *IntDataPoint) String() string { return proto.CompactTextString(m) } +func (*IntDataPoint) ProtoMessage() {} +func (*IntDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{9} +} +func (m *IntDataPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntDataPoint.Unmarshal(m, b) +} +func (m *IntDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntDataPoint.Marshal(b, m, deterministic) +} +func (m *IntDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntDataPoint.Merge(m, src) +} +func (m *IntDataPoint) XXX_Size() int { + return xxx_messageInfo_IntDataPoint.Size(m) +} +func (m *IntDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_IntDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_IntDataPoint proto.InternalMessageInfo + +func (m *IntDataPoint) GetLabels() []*v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *IntDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *IntDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *IntDataPoint) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *IntDataPoint) GetExemplars() []*IntExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// DoubleDataPoint is a single data point in a timeseries that describes the +// time-varying value of a double metric. +type DoubleDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // value itself. + Value float64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*DoubleExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleDataPoint) Reset() { *m = DoubleDataPoint{} } +func (m *DoubleDataPoint) String() string { return proto.CompactTextString(m) } +func (*DoubleDataPoint) ProtoMessage() {} +func (*DoubleDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{10} +} +func (m *DoubleDataPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleDataPoint.Unmarshal(m, b) +} +func (m *DoubleDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleDataPoint.Marshal(b, m, deterministic) +} +func (m *DoubleDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleDataPoint.Merge(m, src) +} +func (m *DoubleDataPoint) XXX_Size() int { + return xxx_messageInfo_DoubleDataPoint.Size(m) +} +func (m *DoubleDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleDataPoint proto.InternalMessageInfo + +func (m *DoubleDataPoint) GetLabels() []*v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *DoubleDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *DoubleDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleDataPoint) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DoubleDataPoint) GetExemplars() []*DoubleExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// IntHistogramDataPoint is a single data point in a timeseries that describes +// the time-varying values of a Histogram of int values. A Histogram contains +// summary statistics for a population of values, it may optionally contain +// the distribution of those values across a set of buckets. +type IntHistogramDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. This value must be equal to the sum of the "sum" fields in + // buckets if a histogram is provided. + Sum int64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // The bucket boundaries are described by "bounds" field. + // + // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket + // at index i are: + // + // (-infinity, bounds[i]) for i == 0 + // [bounds[i-1], bounds[i]) for 0 < i < N-1 + // [bounds[i], +infinity) for i == N-1 + // The values in bounds array must be strictly increasing. + // + // Note: only [a, b) intervals are currently supported for each bucket except the first one. + // If we decide to also support (a, b] intervals we should add support for these by defining + // a boolean value which decides what type of intervals to use. + ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*IntExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntHistogramDataPoint) Reset() { *m = IntHistogramDataPoint{} } +func (m *IntHistogramDataPoint) String() string { return proto.CompactTextString(m) } +func (*IntHistogramDataPoint) ProtoMessage() {} +func (*IntHistogramDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{11} +} +func (m *IntHistogramDataPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntHistogramDataPoint.Unmarshal(m, b) +} +func (m *IntHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntHistogramDataPoint.Marshal(b, m, deterministic) +} +func (m *IntHistogramDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntHistogramDataPoint.Merge(m, src) +} +func (m *IntHistogramDataPoint) XXX_Size() int { + return xxx_messageInfo_IntHistogramDataPoint.Size(m) +} +func (m *IntHistogramDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_IntHistogramDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_IntHistogramDataPoint proto.InternalMessageInfo + +func (m *IntHistogramDataPoint) GetLabels() []*v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *IntHistogramDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *IntHistogramDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *IntHistogramDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *IntHistogramDataPoint) GetSum() int64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *IntHistogramDataPoint) GetBucketCounts() []uint64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *IntHistogramDataPoint) GetExplicitBounds() []float64 { + if m != nil { + return m.ExplicitBounds + } + return nil +} + +func (m *IntHistogramDataPoint) GetExemplars() []*IntExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram of double values. A Histogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +type DoubleHistogramDataPoint struct { + // The set of labels that uniquely identify this timeseries. + Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + // start_time_unix_nano is the last time when the aggregation value was reset + // to "zero". For some metric types this is ignored, see data types for more + // details. + // + // The aggregation value is over the time interval (start_time_unix_nano, + // time_unix_nano]. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + // + // Value of 0 indicates that the timestamp is unspecified. In that case the + // timestamp may be decided by the backend. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // time_unix_nano is the moment when this aggregation value was reported. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. This value must be equal to the sum of the "sum" fields in + // buckets if a histogram is provided. + Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // The bucket boundaries are described by "bounds" field. + // + // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket + // at index i are: + // + // (-infinity, bounds[i]) for i == 0 + // [bounds[i-1], bounds[i]) for 0 < i < N-1 + // [bounds[i], +infinity) for i == N-1 + // The values in bounds array must be strictly increasing. + // + // Note: only [a, b) intervals are currently supported for each bucket except the first one. + // If we decide to also support (a, b] intervals we should add support for these by defining + // a boolean value which decides what type of intervals to use. + ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*DoubleExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleHistogramDataPoint) Reset() { *m = DoubleHistogramDataPoint{} } +func (m *DoubleHistogramDataPoint) String() string { return proto.CompactTextString(m) } +func (*DoubleHistogramDataPoint) ProtoMessage() {} +func (*DoubleHistogramDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{12} +} +func (m *DoubleHistogramDataPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleHistogramDataPoint.Unmarshal(m, b) +} +func (m *DoubleHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleHistogramDataPoint.Marshal(b, m, deterministic) +} +func (m *DoubleHistogramDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleHistogramDataPoint.Merge(m, src) +} +func (m *DoubleHistogramDataPoint) XXX_Size() int { + return xxx_messageInfo_DoubleHistogramDataPoint.Size(m) +} +func (m *DoubleHistogramDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleHistogramDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleHistogramDataPoint proto.InternalMessageInfo + +func (m *DoubleHistogramDataPoint) GetLabels() []*v11.StringKeyValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *DoubleHistogramDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *DoubleHistogramDataPoint) GetBucketCounts() []uint64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *DoubleHistogramDataPoint) GetExplicitBounds() []float64 { + if m != nil { + return m.ExplicitBounds + } + return nil +} + +func (m *DoubleHistogramDataPoint) GetExemplars() []*DoubleExemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// A representation of an exemplar, which is a sample input int measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +type IntExemplar struct { + // The set of labels that were filtered out by the aggregator, but recorded + // alongside the original measurement. Only labels that were filtered out + // by the aggregator should be included + FilteredLabels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels,omitempty"` + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Numerical int value of the measurement that was recorded. + Value int64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntExemplar) Reset() { *m = IntExemplar{} } +func (m *IntExemplar) String() string { return proto.CompactTextString(m) } +func (*IntExemplar) ProtoMessage() {} +func (*IntExemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{13} +} +func (m *IntExemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntExemplar.Unmarshal(m, b) +} +func (m *IntExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntExemplar.Marshal(b, m, deterministic) +} +func (m *IntExemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntExemplar.Merge(m, src) +} +func (m *IntExemplar) XXX_Size() int { + return xxx_messageInfo_IntExemplar.Size(m) +} +func (m *IntExemplar) XXX_DiscardUnknown() { + xxx_messageInfo_IntExemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_IntExemplar proto.InternalMessageInfo + +func (m *IntExemplar) GetFilteredLabels() []*v11.StringKeyValue { + if m != nil { + return m.FilteredLabels + } + return nil +} + +func (m *IntExemplar) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *IntExemplar) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *IntExemplar) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *IntExemplar) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +// A representation of an exemplar, which is a sample input double measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +type DoubleExemplar struct { + // The set of labels that were filtered out by the aggregator, but recorded + // alongside the original measurement. Only labels that were filtered out + // by the aggregator should be included + FilteredLabels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels,omitempty"` + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Numerical double value of the measurement that was recorded. + Value float64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleExemplar) Reset() { *m = DoubleExemplar{} } +func (m *DoubleExemplar) String() string { return proto.CompactTextString(m) } +func (*DoubleExemplar) ProtoMessage() {} +func (*DoubleExemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{14} +} +func (m *DoubleExemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleExemplar.Unmarshal(m, b) +} +func (m *DoubleExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleExemplar.Marshal(b, m, deterministic) +} +func (m *DoubleExemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleExemplar.Merge(m, src) +} +func (m *DoubleExemplar) XXX_Size() int { + return xxx_messageInfo_DoubleExemplar.Size(m) +} +func (m *DoubleExemplar) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleExemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleExemplar proto.InternalMessageInfo + +func (m *DoubleExemplar) GetFilteredLabels() []*v11.StringKeyValue { + if m != nil { + return m.FilteredLabels + } + return nil +} + +func (m *DoubleExemplar) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *DoubleExemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DoubleExemplar) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *DoubleExemplar) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) + proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics") + proto.RegisterType((*InstrumentationLibraryMetrics)(nil), "opentelemetry.proto.metrics.v1.InstrumentationLibraryMetrics") + proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric") + proto.RegisterType((*IntGauge)(nil), "opentelemetry.proto.metrics.v1.IntGauge") + proto.RegisterType((*DoubleGauge)(nil), "opentelemetry.proto.metrics.v1.DoubleGauge") + proto.RegisterType((*IntSum)(nil), "opentelemetry.proto.metrics.v1.IntSum") + proto.RegisterType((*DoubleSum)(nil), "opentelemetry.proto.metrics.v1.DoubleSum") + proto.RegisterType((*IntHistogram)(nil), "opentelemetry.proto.metrics.v1.IntHistogram") + proto.RegisterType((*DoubleHistogram)(nil), "opentelemetry.proto.metrics.v1.DoubleHistogram") + proto.RegisterType((*IntDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntDataPoint") + proto.RegisterType((*DoubleDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleDataPoint") + proto.RegisterType((*IntHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntHistogramDataPoint") + proto.RegisterType((*DoubleHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleHistogramDataPoint") + proto.RegisterType((*IntExemplar)(nil), "opentelemetry.proto.metrics.v1.IntExemplar") + proto.RegisterType((*DoubleExemplar)(nil), "opentelemetry.proto.metrics.v1.DoubleExemplar") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917) +} + +var fileDescriptor_3c3112f9fa006917 = []byte{ + // 1059 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0x41, 0x4f, 0xe3, 0x46, + 0x14, 0xc6, 0x09, 0x38, 0xc9, 0x4b, 0x16, 0xd2, 0xd1, 0x16, 0xdc, 0x95, 0x68, 0x21, 0x5b, 0xb1, + 0x74, 0x77, 0x49, 0x04, 0xd5, 0x56, 0xbd, 0x54, 0x6d, 0x80, 0x14, 0xd2, 0x06, 0x36, 0x1a, 0x02, + 0x12, 0x55, 0x25, 0x6b, 0x12, 0x4f, 0xb3, 0xa3, 0xda, 0x33, 0x91, 0x3d, 0x46, 0xf0, 0x03, 0x7a, + 0x6b, 0x4f, 0xfd, 0x31, 0xfd, 0x1d, 0x3d, 0x54, 0xed, 0xa5, 0x52, 0xef, 0x3d, 0xf5, 0xd2, 0x53, + 0x0f, 0xd5, 0x8c, 0x6d, 0x92, 0x2c, 0x86, 0x64, 0xc5, 0xae, 0xc4, 0xde, 0xde, 0xbc, 0x79, 0xef, + 0xf3, 0xf7, 0xbe, 0xf7, 0x5e, 0x0c, 0x86, 0xa7, 0x62, 0x40, 0xb9, 0xa4, 0x2e, 0xf5, 0xa8, 0xf4, + 0x2f, 0x6a, 0x03, 0x5f, 0x48, 0x51, 0x53, 0x36, 0xeb, 0x05, 0xb5, 0xb3, 0xcd, 0xc4, 0xac, 0xea, + 0x0b, 0xf4, 0xfe, 0x58, 0x74, 0xe4, 0xac, 0x26, 0x21, 0x67, 0x9b, 0x0f, 0x1e, 0xa7, 0xa1, 0xf5, + 0x84, 0xe7, 0x09, 0xae, 0xc0, 0x22, 0x2b, 0x4a, 0x7b, 0x50, 0x4d, 0x8b, 0xf5, 0x69, 0x20, 0x42, + 0xbf, 0x47, 0x55, 0x74, 0x62, 0x47, 0xf1, 0x95, 0xbf, 0x0c, 0x58, 0xc0, 0xb1, 0xeb, 0x20, 0x7a, + 0x24, 0x6a, 0x40, 0x3e, 0x89, 0xb2, 0x8c, 0x15, 0x63, 0xbd, 0xb8, 0xf5, 0x51, 0x35, 0x8d, 0xe2, + 0x25, 0xd4, 0xd9, 0x66, 0x35, 0xc1, 0xc0, 0x97, 0xa9, 0xe8, 0x07, 0x03, 0x3e, 0x60, 0x3c, 0x90, + 0x7e, 0xe8, 0x51, 0x2e, 0x89, 0x64, 0x82, 0xdb, 0x2e, 0xeb, 0xfa, 0xc4, 0xbf, 0xb0, 0xe3, 0xea, + 0xac, 0xcc, 0x4a, 0x76, 0xbd, 0xb8, 0xf5, 0x59, 0xf5, 0x66, 0x05, 0xaa, 0xcd, 0x71, 0x98, 0x56, + 0x84, 0x12, 0xf3, 0xc5, 0xcb, 0xec, 0xa6, 0xeb, 0xca, 0xaf, 0x06, 0x2c, 0xdf, 0x08, 0x80, 0x38, + 0x2c, 0x5d, 0x43, 0x34, 0xae, 0xff, 0x59, 0x2a, 0xc1, 0x58, 0xf8, 0x6b, 0xf9, 0xe1, 0xc5, 0x74, + 0x62, 0xe8, 0x0b, 0xc8, 0x8d, 0x0b, 0xb0, 0x36, 0x49, 0x80, 0x88, 0x29, 0x4e, 0xd2, 0x2a, 0xbf, + 0xcc, 0x82, 0x19, 0xf9, 0x10, 0x82, 0x59, 0x4e, 0xbc, 0xa8, 0x53, 0x05, 0xac, 0x6d, 0xb4, 0x02, + 0x45, 0x87, 0x06, 0x3d, 0x9f, 0x0d, 0xd4, 0x63, 0xad, 0x8c, 0xbe, 0x1a, 0x75, 0xa9, 0xac, 0x90, + 0x33, 0x69, 0x65, 0xa3, 0x2c, 0x65, 0xa3, 0x3d, 0x28, 0x30, 0x2e, 0xed, 0x3e, 0x09, 0xfb, 0xd4, + 0x9a, 0xd5, 0x85, 0xaf, 0x4f, 0xee, 0x8c, 0xdc, 0x53, 0xf1, 0xfb, 0x33, 0x38, 0xcf, 0x62, 0x1b, + 0xb5, 0xa1, 0xe4, 0x88, 0xb0, 0xeb, 0xd2, 0x18, 0x6b, 0x4e, 0x63, 0x3d, 0x99, 0x84, 0xb5, 0xab, + 0x73, 0x12, 0xb8, 0xa2, 0x33, 0x3c, 0xa2, 0x3a, 0xe4, 0x14, 0xb5, 0x20, 0xf4, 0x2c, 0x53, 0x83, + 0xad, 0x4d, 0x41, 0xec, 0x28, 0xf4, 0xf6, 0x67, 0xb0, 0xc9, 0xb4, 0x85, 0xbe, 0x02, 0x88, 0x49, + 0x29, 0x94, 0xdc, 0x0d, 0x73, 0x7d, 0x85, 0x52, 0x04, 0x54, 0x70, 0x92, 0x03, 0x3a, 0x82, 0x7b, + 0x8a, 0xce, 0x0b, 0x16, 0x48, 0xd1, 0xf7, 0x89, 0x67, 0xe5, 0x35, 0xdc, 0xd3, 0x29, 0x48, 0xed, + 0x27, 0x39, 0xfb, 0x33, 0xb8, 0xc4, 0x46, 0xce, 0xe8, 0x5b, 0x28, 0xc7, 0x04, 0x87, 0xb8, 0x05, + 0x8d, 0x5b, 0x9b, 0x8e, 0xe6, 0x28, 0xf4, 0x82, 0x33, 0xee, 0xda, 0x36, 0x61, 0xd6, 0x21, 0x92, + 0x54, 0x4e, 0x21, 0x9f, 0xf4, 0x0c, 0x1d, 0x40, 0x51, 0xf9, 0xec, 0x81, 0x60, 0x5c, 0x06, 0x96, + 0xa1, 0x67, 0x71, 0x9a, 0x22, 0x76, 0x89, 0x24, 0x6d, 0x95, 0x84, 0xc1, 0x49, 0xcc, 0xa0, 0x62, + 0x43, 0x71, 0xa4, 0x85, 0xa8, 0x9d, 0x86, 0x3e, 0x65, 0x29, 0xe9, 0x0f, 0xf8, 0xdb, 0x00, 0x33, + 0xea, 0xeb, 0x6b, 0xa6, 0x8e, 0x04, 0x2c, 0x91, 0x7e, 0xdf, 0xa7, 0xfd, 0x68, 0xfb, 0x25, 0xf5, + 0x06, 0xc2, 0x27, 0x2e, 0x93, 0x17, 0x7a, 0x79, 0xe6, 0xb7, 0x3e, 0x99, 0x04, 0x5d, 0x1f, 0xa6, + 0x77, 0x86, 0xd9, 0x78, 0x91, 0xa4, 0xfa, 0xd1, 0x2a, 0x94, 0x58, 0x60, 0x7b, 0x82, 0x0b, 0x29, + 0x38, 0xeb, 0xe9, 0x3d, 0xcc, 0xe3, 0x22, 0x0b, 0x0e, 0x12, 0x57, 0xe5, 0x1f, 0x03, 0x0a, 0x97, + 0xf3, 0xf7, 0xfa, 0xd5, 0xbc, 0x93, 0x35, 0xff, 0x6e, 0x40, 0x69, 0x74, 0x49, 0xd0, 0x49, 0x5a, + 0xd9, 0xcf, 0x5e, 0x65, 0xcf, 0xee, 0x46, 0xf1, 0x95, 0x3f, 0x0d, 0x58, 0x78, 0x69, 0x4d, 0xd1, + 0x69, 0x5a, 0x71, 0x9f, 0xbe, 0xe2, 0xb2, 0xdf, 0x91, 0xfa, 0x7e, 0xca, 0xe8, 0xce, 0x5d, 0xb2, + 0x41, 0x0d, 0x30, 0x5d, 0xd2, 0xa5, 0x6e, 0x52, 0xd7, 0xc6, 0x84, 0x77, 0xe8, 0x91, 0xf4, 0x19, + 0xef, 0x7f, 0x4d, 0x2f, 0x4e, 0x88, 0x1b, 0x52, 0x1c, 0x27, 0xa3, 0x1a, 0xdc, 0x0f, 0x24, 0xf1, + 0xa5, 0x2d, 0x99, 0x47, 0xed, 0x90, 0xb3, 0x73, 0x9b, 0x13, 0x2e, 0x74, 0x15, 0x26, 0x7e, 0x47, + 0xdf, 0x75, 0x98, 0x47, 0x8f, 0x39, 0x3b, 0x3f, 0x24, 0x5c, 0xa0, 0x0f, 0x61, 0xfe, 0xa5, 0xd0, + 0xac, 0x0e, 0x2d, 0xc9, 0xd1, 0xa8, 0xfb, 0x30, 0x77, 0xa6, 0x9e, 0xa3, 0xdf, 0x73, 0x65, 0x1c, + 0x1d, 0x50, 0x13, 0x0a, 0xf4, 0x9c, 0x7a, 0x03, 0x97, 0xf8, 0x81, 0x35, 0xa7, 0x69, 0x3f, 0x99, + 0x62, 0xd6, 0x1a, 0x71, 0x0e, 0x1e, 0x66, 0x57, 0x7e, 0xce, 0x24, 0xfd, 0x7e, 0x2b, 0x25, 0x31, + 0x12, 0x49, 0x5a, 0x57, 0x25, 0xa9, 0x4e, 0x37, 0xa1, 0x69, 0xaa, 0xfc, 0x9b, 0x81, 0x77, 0x53, + 0x97, 0xf3, 0xee, 0x6b, 0xd3, 0x13, 0x21, 0x97, 0x5a, 0x1b, 0x13, 0x47, 0x07, 0x54, 0x86, 0xac, + 0xfa, 0x5b, 0x62, 0x4e, 0x8f, 0x90, 0x32, 0xd1, 0x43, 0xb8, 0xd7, 0x0d, 0x7b, 0xdf, 0x53, 0x69, + 0xeb, 0x88, 0xc0, 0x32, 0x57, 0xb2, 0x0a, 0x2c, 0x72, 0xee, 0x68, 0x1f, 0x7a, 0x04, 0x0b, 0xf4, + 0x7c, 0xe0, 0xb2, 0x1e, 0x93, 0x76, 0x57, 0x84, 0xdc, 0x09, 0xac, 0xdc, 0x4a, 0x76, 0xdd, 0xc0, + 0xf3, 0x89, 0x7b, 0x5b, 0x7b, 0xc7, 0xc7, 0x31, 0x7f, 0xab, 0x71, 0xfc, 0x2f, 0x03, 0xd6, 0x75, + 0x3f, 0x1c, 0x6f, 0xbb, 0xf6, 0xc6, 0x9b, 0xd0, 0xbe, 0x75, 0x55, 0xfb, 0x5b, 0xcc, 0xfd, 0x6f, + 0x06, 0x14, 0x47, 0x3a, 0x83, 0x4e, 0x60, 0xe1, 0x3b, 0xe6, 0x4a, 0xea, 0x53, 0xc7, 0xbe, 0x8d, + 0xf4, 0xf3, 0x09, 0x4a, 0x2b, 0x6a, 0xc1, 0x55, 0x45, 0x33, 0x37, 0x6d, 0x7a, 0x76, 0xf4, 0xc7, + 0x6f, 0x09, 0x72, 0xc1, 0x80, 0x70, 0x9b, 0x39, 0x5a, 0xe9, 0x12, 0x36, 0xd5, 0xb1, 0xe9, 0xa0, + 0xf7, 0x20, 0x2f, 0x7d, 0xd2, 0xa3, 0xea, 0x66, 0x4e, 0xdf, 0xe4, 0xf4, 0xb9, 0xe9, 0x54, 0xfe, + 0x30, 0x60, 0x7e, 0xbc, 0xea, 0xbb, 0x54, 0x9a, 0x71, 0x8b, 0xd2, 0x1e, 0xff, 0x68, 0xc0, 0x62, + 0xfa, 0x3b, 0x10, 0x3d, 0x82, 0x87, 0xf5, 0xbd, 0x3d, 0xdc, 0xd8, 0xab, 0x77, 0x9a, 0xcf, 0x0f, + 0xed, 0x4e, 0xe3, 0xa0, 0xfd, 0x1c, 0xd7, 0x5b, 0xcd, 0xce, 0xa9, 0x7d, 0x7c, 0x78, 0xd4, 0x6e, + 0xec, 0x34, 0xbf, 0x6c, 0x36, 0x76, 0xcb, 0x33, 0x68, 0x15, 0x96, 0xaf, 0x0b, 0xdc, 0x6d, 0xb4, + 0x3a, 0xf5, 0xb2, 0x81, 0xd6, 0xa0, 0x72, 0x5d, 0xc8, 0xce, 0xf1, 0xc1, 0x71, 0xab, 0xde, 0x69, + 0x9e, 0x34, 0xca, 0x99, 0x6d, 0x09, 0xab, 0x4c, 0x4c, 0x18, 0xc0, 0xed, 0x52, 0xfc, 0x1f, 0x6d, + 0x5b, 0x5d, 0xb4, 0x8d, 0x6f, 0x3e, 0xef, 0x33, 0xf9, 0x22, 0xec, 0x2a, 0x91, 0x6b, 0x2a, 0x75, + 0x63, 0xf8, 0x65, 0x60, 0x0c, 0x69, 0x23, 0xfa, 0x4e, 0xd0, 0xa7, 0xbc, 0xd6, 0x1f, 0xfd, 0x50, + 0xd1, 0x35, 0xf5, 0xc5, 0xc7, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x7c, 0x5f, 0x8f, 0xd1, + 0x10, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/resource/v1/resource.pb.go b/internal/opentelemetry-proto-gen/resource/v1/resource.pb.go new file mode 100644 index 0000000000..75fedfe4e5 --- /dev/null +++ b/internal/opentelemetry-proto-gen/resource/v1/resource.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Set of labels that describe the resource. + Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_446f73eacf88f3f5, []int{0} +} +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetAttributes() []*v1.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Resource) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func init() { + proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5) +} + +var fileDescriptor_446f73eacf88f3f5 = []byte{ + // 227 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcb, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, + 0x42, 0xf2, 0x28, 0xea, 0x21, 0x82, 0x7a, 0x70, 0x35, 0x65, 0x86, 0x52, 0x5a, 0xd8, 0x0c, 0x4c, + 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0x03, 0x19, 0x07, 0x61, 0x41, 0xf4, 0x29, 0xf5, 0x32, 0x72, 0x71, + 0x04, 0x41, 0xf5, 0x0a, 0xb9, 0x73, 0x71, 0x25, 0x96, 0x94, 0x14, 0x65, 0x26, 0x95, 0x96, 0xa4, + 0x16, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0x61, 0xb3, 0x0e, 0x6a, 0x46, 0x99, + 0xa1, 0x9e, 0x77, 0x6a, 0x65, 0x58, 0x62, 0x4e, 0x69, 0x6a, 0x10, 0x92, 0x56, 0x21, 0x0b, 0x2e, + 0x89, 0x94, 0xa2, 0xfc, 0x82, 0x82, 0xd4, 0x94, 0x78, 0x84, 0x68, 0x7c, 0x72, 0x7e, 0x69, 0x5e, + 0x89, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6f, 0x90, 0x18, 0x54, 0xde, 0x11, 0x2e, 0xed, 0x0c, 0x92, + 0x75, 0x2a, 0xe7, 0x52, 0xca, 0xcc, 0xd7, 0x23, 0xe0, 0x43, 0x27, 0x5e, 0x98, 0x93, 0x03, 0x40, + 0x52, 0x01, 0x8c, 0x51, 0x0e, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x20, 0x77, 0xe9, 0x83, 0x34, + 0xeb, 0x22, 0xbc, 0x8f, 0x62, 0x96, 0x2e, 0x24, 0x30, 0xd2, 0x53, 0xf3, 0xf4, 0xd3, 0x51, 0x02, + 0x39, 0x89, 0x0d, 0x2c, 0x63, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xba, 0x7f, 0x2f, 0x93, 0x8e, + 0x01, 0x00, 0x00, +} diff --git a/internal/opentelemetry-proto-gen/trace/v1/trace.pb.go b/internal/opentelemetry-proto-gen/trace/v1/trace.pb.go new file mode 100644 index 0000000000..8e63f5749f --- /dev/null +++ b/internal/opentelemetry-proto-gen/trace/v1/trace.pb.go @@ -0,0 +1,815 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/trace/v1/trace.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + v11 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span represents an internal operation within an application, + // as opposed to an operations happening at the boundaries. Default value. + Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SPAN_KIND_SERVER Span_SpanKind = 2 + // Indicates that the span describes a request to some remote service. + Span_SPAN_KIND_CLIENT Span_SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 +) + +var Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SPAN_KIND_INTERNAL", + 2: "SPAN_KIND_SERVER", + 3: "SPAN_KIND_CLIENT", + 4: "SPAN_KIND_PRODUCER", + 5: "SPAN_KIND_CONSUMER", +} + +var Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SPAN_KIND_INTERNAL": 1, + "SPAN_KIND_SERVER": 2, + "SPAN_KIND_CLIENT": 3, + "SPAN_KIND_PRODUCER": 4, + "SPAN_KIND_CONSUMER": 5, +} + +func (x Span_SpanKind) String() string { + return proto.EnumName(Span_SpanKind_name, int32(x)) +} + +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2, 0} +} + +type Status_DeprecatedStatusCode int32 + +const ( + Status_DEPRECATED_STATUS_CODE_OK Status_DeprecatedStatusCode = 0 + Status_DEPRECATED_STATUS_CODE_CANCELLED Status_DeprecatedStatusCode = 1 + Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR Status_DeprecatedStatusCode = 2 + Status_DEPRECATED_STATUS_CODE_INVALID_ARGUMENT Status_DeprecatedStatusCode = 3 + Status_DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED Status_DeprecatedStatusCode = 4 + Status_DEPRECATED_STATUS_CODE_NOT_FOUND Status_DeprecatedStatusCode = 5 + Status_DEPRECATED_STATUS_CODE_ALREADY_EXISTS Status_DeprecatedStatusCode = 6 + Status_DEPRECATED_STATUS_CODE_PERMISSION_DENIED Status_DeprecatedStatusCode = 7 + Status_DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED Status_DeprecatedStatusCode = 8 + Status_DEPRECATED_STATUS_CODE_FAILED_PRECONDITION Status_DeprecatedStatusCode = 9 + Status_DEPRECATED_STATUS_CODE_ABORTED Status_DeprecatedStatusCode = 10 + Status_DEPRECATED_STATUS_CODE_OUT_OF_RANGE Status_DeprecatedStatusCode = 11 + Status_DEPRECATED_STATUS_CODE_UNIMPLEMENTED Status_DeprecatedStatusCode = 12 + Status_DEPRECATED_STATUS_CODE_INTERNAL_ERROR Status_DeprecatedStatusCode = 13 + Status_DEPRECATED_STATUS_CODE_UNAVAILABLE Status_DeprecatedStatusCode = 14 + Status_DEPRECATED_STATUS_CODE_DATA_LOSS Status_DeprecatedStatusCode = 15 + Status_DEPRECATED_STATUS_CODE_UNAUTHENTICATED Status_DeprecatedStatusCode = 16 +) + +var Status_DeprecatedStatusCode_name = map[int32]string{ + 0: "DEPRECATED_STATUS_CODE_OK", + 1: "DEPRECATED_STATUS_CODE_CANCELLED", + 2: "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR", + 3: "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT", + 4: "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED", + 5: "DEPRECATED_STATUS_CODE_NOT_FOUND", + 6: "DEPRECATED_STATUS_CODE_ALREADY_EXISTS", + 7: "DEPRECATED_STATUS_CODE_PERMISSION_DENIED", + 8: "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED", + 9: "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION", + 10: "DEPRECATED_STATUS_CODE_ABORTED", + 11: "DEPRECATED_STATUS_CODE_OUT_OF_RANGE", + 12: "DEPRECATED_STATUS_CODE_UNIMPLEMENTED", + 13: "DEPRECATED_STATUS_CODE_INTERNAL_ERROR", + 14: "DEPRECATED_STATUS_CODE_UNAVAILABLE", + 15: "DEPRECATED_STATUS_CODE_DATA_LOSS", + 16: "DEPRECATED_STATUS_CODE_UNAUTHENTICATED", +} + +var Status_DeprecatedStatusCode_value = map[string]int32{ + "DEPRECATED_STATUS_CODE_OK": 0, + "DEPRECATED_STATUS_CODE_CANCELLED": 1, + "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR": 2, + "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT": 3, + "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED": 4, + "DEPRECATED_STATUS_CODE_NOT_FOUND": 5, + "DEPRECATED_STATUS_CODE_ALREADY_EXISTS": 6, + "DEPRECATED_STATUS_CODE_PERMISSION_DENIED": 7, + "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED": 8, + "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION": 9, + "DEPRECATED_STATUS_CODE_ABORTED": 10, + "DEPRECATED_STATUS_CODE_OUT_OF_RANGE": 11, + "DEPRECATED_STATUS_CODE_UNIMPLEMENTED": 12, + "DEPRECATED_STATUS_CODE_INTERNAL_ERROR": 13, + "DEPRECATED_STATUS_CODE_UNAVAILABLE": 14, + "DEPRECATED_STATUS_CODE_DATA_LOSS": 15, + "DEPRECATED_STATUS_CODE_UNAUTHENTICATED": 16, +} + +func (x Status_DeprecatedStatusCode) String() string { + return proto.EnumName(Status_DeprecatedStatusCode_name, int32(x)) +} + +func (Status_DeprecatedStatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3, 0} +} + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#set-status +type Status_StatusCode int32 + +const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 + // The Span has been validated by an Application developers or Operator to have + // completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +) + +var Status_StatusCode_name = map[int32]string{ + 0: "STATUS_CODE_UNSET", + 1: "STATUS_CODE_OK", + 2: "STATUS_CODE_ERROR", +} + +var Status_StatusCode_value = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, +} + +func (x Status_StatusCode) String() string { + return proto.EnumName(Status_StatusCode_name, int32(x)) +} + +func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3, 1} +} + +// A collection of InstrumentationLibrarySpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of InstrumentationLibrarySpans that originate from a resource. + InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,2,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceSpans) Reset() { *m = ResourceSpans{} } +func (m *ResourceSpans) String() string { return proto.CompactTextString(m) } +func (*ResourceSpans) ProtoMessage() {} +func (*ResourceSpans) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{0} +} +func (m *ResourceSpans) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceSpans.Unmarshal(m, b) +} +func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic) +} +func (m *ResourceSpans) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSpans.Merge(m, src) +} +func (m *ResourceSpans) XXX_Size() int { + return xxx_messageInfo_ResourceSpans.Size(m) +} +func (m *ResourceSpans) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSpans.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo + +func (m *ResourceSpans) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { + if m != nil { + return m.InstrumentationLibrarySpans + } + return nil +} + +// A collection of Spans produced by an InstrumentationLibrary. +type InstrumentationLibrarySpans struct { + // The instrumentation library information for the spans in this message. + // If this field is not set then no library info is known. + InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` + // A list of Spans that originate from an instrumentation library. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InstrumentationLibrarySpans) Reset() { *m = InstrumentationLibrarySpans{} } +func (m *InstrumentationLibrarySpans) String() string { return proto.CompactTextString(m) } +func (*InstrumentationLibrarySpans) ProtoMessage() {} +func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{1} +} +func (m *InstrumentationLibrarySpans) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InstrumentationLibrarySpans.Unmarshal(m, b) +} +func (m *InstrumentationLibrarySpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InstrumentationLibrarySpans.Marshal(b, m, deterministic) +} +func (m *InstrumentationLibrarySpans) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstrumentationLibrarySpans.Merge(m, src) +} +func (m *InstrumentationLibrarySpans) XXX_Size() int { + return xxx_messageInfo_InstrumentationLibrarySpans.Size(m) +} +func (m *InstrumentationLibrarySpans) XXX_DiscardUnknown() { + xxx_messageInfo_InstrumentationLibrarySpans.DiscardUnknown(m) +} + +var xxx_messageInfo_InstrumentationLibrarySpans proto.InternalMessageInfo + +func (m *InstrumentationLibrarySpans) GetInstrumentationLibrary() *v11.InstrumentationLibrary { + if m != nil { + return m.InstrumentationLibrary + } + return nil +} + +func (m *InstrumentationLibrarySpans) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +// Span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Spans may also be linked to other spans +// from the same or different trace and form graphs. Often, a trace +// contains a root span that describes the end-to-end latency, and one +// or more subspans for its sub-operations. A trace can also contain +// multiple root spans, or none at all. Spans do not need to be +// contiguous - there may be gaps or overlaps between spans in a trace. +// +// The next available field id is 17. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. + // + // This field is semantically required. Receiver should generate new + // random trace_id if empty or invalid trace_id was received. + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. + // + // This field is semantically required. Receiver should generate new + // random span_id if empty or invalid span_id was received. + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // When null or empty string received - receiver may use string "name" + // as a replacement. There might be smarted algorithms implemented by + // receiver to fix the empty span name. + // + // This field is required. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` + // attributes is a collection of key/value pairs. The value can be a string, + // an integer, a double or the Boolean values `true` or `false`. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // events is a collection of Event items. + Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2} +} +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span) GetTraceState() string { + if m != nil { + return m.TraceState + } + return "" +} + +func (m *Span) GetParentSpanId() []byte { + if m != nil { + return m.ParentSpanId + } + return nil +} + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetKind() Span_SpanKind { + if m != nil { + return m.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (m *Span) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *Span) GetEndTimeUnixNano() uint64 { + if m != nil { + return m.EndTimeUnixNano + } + return 0 +} + +func (m *Span) GetAttributes() []*v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func (m *Span) GetEvents() []*Span_Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *Span) GetDroppedEventsCount() uint32 { + if m != nil { + return m.DroppedEventsCount + } + return 0 +} + +func (m *Span) GetLinks() []*Span_Link { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetDroppedLinksCount() uint32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +func (m *Span) GetStatus() *Status { + if m != nil { + return m.Status + } + return nil +} + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type Span_Event struct { + // time_unix_nano is the time the event occurred. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Event) Reset() { *m = Span_Event{} } +func (m *Span_Event) String() string { return proto.CompactTextString(m) } +func (*Span_Event) ProtoMessage() {} +func (*Span_Event) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2, 0} +} +func (m *Span_Event) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Event.Unmarshal(m, b) +} +func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic) +} +func (m *Span_Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Event.Merge(m, src) +} +func (m *Span_Event) XXX_Size() int { + return xxx_messageInfo_Span_Event.Size(m) +} +func (m *Span_Event) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Event proto.InternalMessageInfo + +func (m *Span_Event) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *Span_Event) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span_Event) GetAttributes() []*v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Event) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The trace_state associated with the link. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2, 1} +} +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span_Link) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span_Link) GetTraceState() string { + if m != nil { + return m.TraceState + } + return "" +} + +func (m *Span_Link) GetAttributes() []*v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Link) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // The deprecated status code. This is an optional field. + // + // This field is deprecated and is replaced by the `code` field below. See backward + // compatibility notes below. According to our stability guarantees this field + // will be removed in 12 months, on Oct 22, 2021. All usage of old senders and + // receivers that do not understand the `code` field MUST be phased out by then. + DeprecatedCode Status_DeprecatedStatusCode `protobuf:"varint,1,opt,name=deprecated_code,json=deprecatedCode,proto3,enum=opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode" json:"deprecated_code,omitempty"` // Deprecated: Do not use. + // A developer-facing human readable error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // The status code. + Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *Status) GetDeprecatedCode() Status_DeprecatedStatusCode { + if m != nil { + return m.DeprecatedCode + } + return Status_DEPRECATED_STATUS_CODE_OK +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetCode() Status_StatusCode { + if m != nil { + return m.Code + } + return Status_STATUS_CODE_UNSET +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) + proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode", Status_DeprecatedStatusCode_name, Status_DeprecatedStatusCode_value) + proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value) + proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans") + proto.RegisterType((*InstrumentationLibrarySpans)(nil), "opentelemetry.proto.trace.v1.InstrumentationLibrarySpans") + proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span") + proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event") + proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link") + proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601) +} + +var fileDescriptor_5c407ac9c675a601 = []byte{ + // 1130 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xc1, 0x6e, 0xdb, 0x46, + 0x10, 0x0d, 0x6d, 0x49, 0x76, 0xc6, 0xb6, 0xcc, 0x6c, 0x9d, 0x84, 0x71, 0x9a, 0x46, 0x50, 0xdd, + 0x44, 0x49, 0x1a, 0xa9, 0x49, 0x51, 0x20, 0x05, 0x1a, 0xb4, 0x34, 0xb9, 0x4e, 0x08, 0xd3, 0xa4, + 0xb0, 0x24, 0xdd, 0xb4, 0x97, 0x05, 0x6d, 0x6e, 0x5d, 0x22, 0xd6, 0x52, 0x20, 0x29, 0x23, 0x39, + 0xf4, 0x43, 0x0a, 0xf4, 0x73, 0x0a, 0xf4, 0x0b, 0x7a, 0xe9, 0xa1, 0x5f, 0xd2, 0x43, 0xb1, 0x4b, + 0xca, 0xb2, 0x0c, 0x91, 0xce, 0x25, 0x17, 0x83, 0x7c, 0xf3, 0xde, 0xbc, 0x99, 0x9d, 0x59, 0x8b, + 0xd0, 0x4b, 0xc6, 0x8c, 0xe7, 0xec, 0x94, 0x8d, 0x58, 0x9e, 0xbe, 0x1f, 0x8c, 0xd3, 0x24, 0x4f, + 0x06, 0x79, 0x1a, 0x1e, 0xb3, 0xc1, 0xd9, 0xb3, 0xe2, 0xa1, 0x2f, 0x41, 0xf4, 0xe9, 0x1c, 0xb3, + 0x00, 0xfb, 0x05, 0xe1, 0xec, 0xd9, 0xf6, 0xe3, 0x45, 0x79, 0x8e, 0x93, 0xd1, 0x28, 0xe1, 0x22, + 0x51, 0xf1, 0x54, 0x88, 0xb6, 0xfb, 0x8b, 0xb8, 0x29, 0xcb, 0x92, 0x49, 0x5a, 0xd8, 0x4e, 0x9f, + 0x0b, 0x7e, 0xf7, 0x6f, 0x05, 0x36, 0x48, 0x09, 0x79, 0xe3, 0x90, 0x67, 0x08, 0xc3, 0xea, 0x94, + 0xa3, 0x29, 0x1d, 0xa5, 0xb7, 0xf6, 0xfc, 0x51, 0x7f, 0x51, 0x79, 0xe7, 0x89, 0xce, 0x9e, 0xf5, + 0xa7, 0x19, 0xc8, 0xb9, 0x14, 0xfd, 0x06, 0xf7, 0x62, 0x9e, 0xe5, 0xe9, 0x64, 0xc4, 0x78, 0x1e, + 0xe6, 0x71, 0xc2, 0xe9, 0x69, 0x7c, 0x94, 0x86, 0xe9, 0x7b, 0x9a, 0x09, 0x1f, 0x6d, 0xa9, 0xb3, + 0xdc, 0x5b, 0x7b, 0xfe, 0x6d, 0xbf, 0xae, 0xf5, 0xbe, 0x35, 0x9f, 0xc2, 0x2e, 0x32, 0xc8, 0x42, + 0xc9, 0xdd, 0xb8, 0x3a, 0xd8, 0xfd, 0x4b, 0x81, 0xbb, 0x35, 0x62, 0xc4, 0xe1, 0x76, 0x45, 0x79, + 0x65, 0xd3, 0xdf, 0x2c, 0x2c, 0xac, 0x3c, 0xeb, 0xca, 0xca, 0xc8, 0xad, 0xc5, 0x45, 0xa1, 0x17, + 0xd0, 0xbc, 0xd8, 0x76, 0xb7, 0xbe, 0x6d, 0x51, 0x23, 0x29, 0x04, 0xdd, 0x3f, 0x00, 0x1a, 0xe2, + 0x1d, 0xdd, 0x81, 0x55, 0x49, 0xa0, 0x71, 0x24, 0x6b, 0x5c, 0x27, 0x2b, 0xf2, 0xdd, 0x8a, 0xd0, + 0x6d, 0x58, 0x11, 0x64, 0x11, 0x59, 0x92, 0x91, 0x96, 0x78, 0xb5, 0x22, 0x74, 0x1f, 0xd6, 0x0a, + 0x4d, 0x96, 0x87, 0x39, 0xd3, 0x96, 0x3b, 0x4a, 0xef, 0x3a, 0x01, 0x09, 0x79, 0x02, 0x41, 0x3b, + 0xd0, 0x1e, 0x87, 0x29, 0xe3, 0x39, 0x9d, 0x26, 0x68, 0xc8, 0x04, 0xeb, 0x05, 0xea, 0x15, 0x69, + 0x10, 0x34, 0x78, 0x38, 0x62, 0x5a, 0x53, 0xea, 0xe5, 0x33, 0xfa, 0x1e, 0x1a, 0x6f, 0x63, 0x1e, + 0x69, 0xad, 0x8e, 0xd2, 0x6b, 0x3f, 0x7f, 0x72, 0x75, 0x43, 0xf2, 0xcf, 0x7e, 0xcc, 0x23, 0x22, + 0x85, 0x68, 0x00, 0x5b, 0x59, 0x1e, 0xa6, 0x39, 0xcd, 0xe3, 0x11, 0xa3, 0x13, 0x1e, 0xbf, 0xa3, + 0x3c, 0xe4, 0x89, 0xb6, 0xd2, 0x51, 0x7a, 0x2d, 0x72, 0x43, 0xc6, 0xfc, 0x78, 0xc4, 0x02, 0x1e, + 0xbf, 0x73, 0x42, 0x9e, 0xa0, 0x27, 0x80, 0x18, 0x8f, 0x2e, 0xd3, 0x57, 0x25, 0x7d, 0x93, 0xf1, + 0x68, 0x8e, 0xfc, 0x0a, 0x20, 0xcc, 0xf3, 0x34, 0x3e, 0x9a, 0xe4, 0x2c, 0xd3, 0xae, 0xcb, 0x53, + 0x7f, 0x78, 0xc5, 0x4c, 0xf7, 0xd9, 0xfb, 0xc3, 0xf0, 0x74, 0xc2, 0xc8, 0x05, 0x29, 0x7a, 0x01, + 0x5a, 0x94, 0x26, 0xe3, 0x31, 0x8b, 0xe8, 0x0c, 0xa5, 0xc7, 0xc9, 0x84, 0xe7, 0x1a, 0x74, 0x94, + 0xde, 0x06, 0xb9, 0x55, 0xc6, 0xf5, 0xf3, 0xb0, 0x21, 0xa2, 0xe8, 0x07, 0x68, 0xb1, 0x33, 0xc6, + 0xf3, 0x4c, 0x5b, 0x93, 0xf6, 0xbd, 0x0f, 0x38, 0x23, 0x2c, 0x04, 0xa4, 0xd4, 0xa1, 0xaf, 0x60, + 0x6b, 0xea, 0x5d, 0x20, 0xa5, 0xef, 0xba, 0xf4, 0x45, 0x65, 0x4c, 0x6a, 0x4a, 0xcf, 0x97, 0xd0, + 0x3c, 0x8d, 0xf9, 0xdb, 0x4c, 0xdb, 0xa8, 0xe9, 0x78, 0xde, 0xd2, 0x8e, 0xf9, 0x5b, 0x52, 0xa8, + 0x50, 0x1f, 0x3e, 0x99, 0x1a, 0x4a, 0xa0, 0xf4, 0x6b, 0x4b, 0xbf, 0x1b, 0x65, 0x48, 0x08, 0x4a, + 0xbb, 0xef, 0xa0, 0x25, 0x36, 0x6b, 0x92, 0x69, 0x9b, 0xf2, 0xd6, 0xec, 0x5c, 0xe1, 0x27, 0xb9, + 0xa4, 0xd4, 0x6c, 0xff, 0xa9, 0x40, 0x53, 0x16, 0x2f, 0xd6, 0xf0, 0xd2, 0x58, 0x15, 0x39, 0xd6, + 0xf5, 0xfc, 0xe2, 0x4c, 0xa7, 0x6b, 0xb8, 0x74, 0x61, 0x0d, 0xe7, 0xe7, 0xbc, 0xfc, 0x71, 0xe6, + 0xdc, 0xa8, 0x9b, 0xf3, 0xf6, 0xbf, 0x0a, 0x34, 0xc4, 0x99, 0x7c, 0x9c, 0x1b, 0x3a, 0xdf, 0x60, + 0xe3, 0xe3, 0x34, 0xd8, 0xac, 0x6b, 0xb0, 0xfb, 0xbb, 0x02, 0xab, 0xd3, 0xcb, 0x8b, 0xee, 0xc0, + 0x4d, 0x6f, 0xa8, 0x3b, 0x74, 0xdf, 0x72, 0x4c, 0x1a, 0x38, 0xde, 0x10, 0x1b, 0xd6, 0x9e, 0x85, + 0x4d, 0xf5, 0x1a, 0xba, 0x05, 0x68, 0x16, 0xb2, 0x1c, 0x1f, 0x13, 0x47, 0xb7, 0x55, 0x05, 0x6d, + 0x81, 0x3a, 0xc3, 0x3d, 0x4c, 0x0e, 0x31, 0x51, 0x97, 0xe6, 0x51, 0xc3, 0xb6, 0xb0, 0xe3, 0xab, + 0xcb, 0xf3, 0x39, 0x86, 0xc4, 0x35, 0x03, 0x03, 0x13, 0xb5, 0x31, 0x8f, 0x1b, 0xae, 0xe3, 0x05, + 0x07, 0x98, 0xa8, 0xcd, 0xee, 0x7f, 0x2b, 0xd0, 0x2a, 0xd6, 0x0a, 0xfd, 0x02, 0x9b, 0x11, 0x1b, + 0xa7, 0xec, 0x38, 0xcc, 0x59, 0x44, 0x8f, 0x93, 0xa8, 0xf8, 0x01, 0x6b, 0x5f, 0xf5, 0x23, 0x53, + 0xc8, 0xfb, 0xe6, 0xb9, 0xb6, 0x00, 0x8c, 0x24, 0x62, 0xbb, 0x4b, 0x9a, 0x42, 0xda, 0xb3, 0xac, + 0x02, 0x43, 0x1a, 0xac, 0x8c, 0x58, 0x96, 0x85, 0x27, 0xd3, 0x4d, 0x9c, 0xbe, 0x22, 0x03, 0x1a, + 0xd2, 0x76, 0x59, 0xda, 0x0e, 0x3e, 0xc8, 0x76, 0x66, 0x46, 0xa4, 0xb8, 0xfb, 0x4f, 0x13, 0xb6, + 0x16, 0xd5, 0x82, 0xee, 0xc1, 0x1d, 0x13, 0x0f, 0x09, 0x36, 0x74, 0x1f, 0x9b, 0xd4, 0xf3, 0x75, + 0x3f, 0xf0, 0xa8, 0xe1, 0x9a, 0x98, 0xba, 0xfb, 0xea, 0x35, 0xb4, 0x03, 0x9d, 0x8a, 0xb0, 0xa1, + 0x3b, 0x06, 0xb6, 0x6d, 0x6c, 0xaa, 0x0a, 0xea, 0xc1, 0x4e, 0x05, 0x2b, 0x70, 0xf6, 0x1d, 0xf7, + 0x47, 0x87, 0x62, 0x42, 0x5c, 0x31, 0x9f, 0x27, 0xf0, 0xb0, 0x82, 0x69, 0x39, 0x87, 0xba, 0x6d, + 0x99, 0x54, 0x27, 0xaf, 0x82, 0x83, 0x62, 0x6c, 0x5f, 0x42, 0xaf, 0x82, 0x6c, 0x62, 0xdd, 0xb4, + 0x2d, 0x07, 0x53, 0xfc, 0xc6, 0xc0, 0xd8, 0xc4, 0xa6, 0xda, 0xa8, 0x29, 0xd5, 0x71, 0x7d, 0xba, + 0xe7, 0x06, 0x8e, 0xa9, 0x36, 0xd1, 0x23, 0xf8, 0xa2, 0x82, 0xa5, 0xdb, 0x04, 0xeb, 0xe6, 0x4f, + 0x14, 0xbf, 0xb1, 0x3c, 0xdf, 0x53, 0x5b, 0x35, 0xf6, 0x43, 0x4c, 0x0e, 0x2c, 0xcf, 0xb3, 0x5c, + 0x87, 0x9a, 0xd8, 0x11, 0x7b, 0xba, 0x82, 0x9e, 0xc2, 0xa3, 0x0a, 0x36, 0xc1, 0x9e, 0x1b, 0x10, + 0x43, 0x14, 0xfb, 0x5a, 0x0f, 0x3c, 0x1f, 0x9b, 0xea, 0x2a, 0xea, 0xc3, 0xe3, 0x0a, 0xfa, 0x9e, + 0x6e, 0xd9, 0x58, 0xac, 0x29, 0x36, 0x5c, 0xc7, 0xb4, 0x7c, 0xcb, 0x75, 0xd4, 0xeb, 0xa8, 0x0b, + 0x9f, 0x55, 0xd5, 0xbd, 0xeb, 0x12, 0x91, 0x13, 0xd0, 0x43, 0xf8, 0xbc, 0x6a, 0x96, 0x81, 0x4f, + 0xdd, 0x3d, 0x4a, 0x74, 0xe7, 0x15, 0x56, 0xd7, 0x6a, 0xe7, 0x65, 0x1d, 0x0c, 0x6d, 0x2c, 0x06, + 0x80, 0x4d, 0x75, 0xbd, 0xe6, 0xb8, 0xa6, 0x57, 0xb1, 0x1c, 0xed, 0x06, 0x7a, 0x00, 0xdd, 0xca, + 0xa4, 0xfa, 0xa1, 0x6e, 0xd9, 0xfa, 0xae, 0x8d, 0xd5, 0x76, 0xcd, 0x9c, 0x4c, 0xdd, 0xd7, 0xa9, + 0xed, 0x7a, 0x9e, 0xba, 0x89, 0x1e, 0xc3, 0x83, 0xea, 0x6c, 0x81, 0xff, 0x1a, 0x3b, 0xbe, 0x25, + 0x63, 0xaa, 0xda, 0x75, 0x00, 0x2e, 0x6c, 0xf4, 0x4d, 0xb8, 0x31, 0x4f, 0xf7, 0xb0, 0xaf, 0x5e, + 0x43, 0x08, 0xda, 0x97, 0xb6, 0x5b, 0xb9, 0x4c, 0x2d, 0x97, 0x74, 0x97, 0xc3, 0xfd, 0x38, 0xa9, + 0xbd, 0x67, 0xbb, 0xe0, 0x8b, 0xa7, 0xa1, 0x00, 0x87, 0xca, 0xcf, 0x2f, 0x4f, 0xe2, 0xfc, 0xd7, + 0xc9, 0x91, 0xf8, 0x67, 0x39, 0x10, 0xb2, 0xa7, 0xb3, 0x8f, 0xe5, 0xb9, 0x2c, 0x4f, 0x8b, 0x4f, + 0xe7, 0x13, 0xc6, 0x07, 0x27, 0xb3, 0xaf, 0xf6, 0xa3, 0x96, 0x84, 0xbf, 0xfe, 0x3f, 0x00, 0x00, + 0xff, 0xff, 0x63, 0x4b, 0xfa, 0x64, 0xdc, 0x0b, 0x00, 0x00, +} diff --git a/route/route.go b/route/route.go index 3dbdcf93ff..eca2635ac1 100644 --- a/route/route.go +++ b/route/route.go @@ -4,12 +4,15 @@ import ( "bytes" "compress/gzip" "context" + "encoding/binary" + "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/ioutil" "math" + "net" "net/http" "strconv" "sync" @@ -19,6 +22,9 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/vmihailenco/msgpack/v4" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" "github.com/honeycombio/refinery/collect" "github.com/honeycombio/refinery/config" @@ -27,13 +33,21 @@ import ( "github.com/honeycombio/refinery/sharder" "github.com/honeycombio/refinery/transmit" "github.com/honeycombio/refinery/types" + + collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" + common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" ) const ( // numZstdDecoders is set statically here - we may make it into a config option // A normal practice might be to use some multiple of the CPUs, but that goes south // in kubernetes - numZstdDecoders = 4 + numZstdDecoders = 4 + traceIDShortLength = 8 + traceIDLongLength = 16 + GRPCMessageSizeMax int = 5000000 // 5MB + defaultSampleRate = 1 ) type Router struct { @@ -61,8 +75,9 @@ type Router struct { zstdDecoders chan *zstd.Decoder - server *http.Server - doneWG sync.WaitGroup + server *http.Server + grpcServer *grpc.Server + doneWG sync.WaitGroup } type BatchResponse struct { @@ -145,13 +160,19 @@ func (r *Router) LnS(incomingOrPeer string) { // pass everything else through unmolested muxxer.PathPrefix("/").HandlerFunc(r.proxy).Name("proxy") - var listenAddr string + var listenAddr, grpcAddr string if r.incomingOrPeer == "incoming" { listenAddr, err = r.Config.GetListenAddr() if err != nil { r.iopLogger.Error().Logf("failed to get listen addr config: %s", err) return } + // GRPC listen addr is optional, err means addr was not empty and invalid + grpcAddr, err = r.Config.GetGRPCListenAddr() + if err != nil { + r.iopLogger.Error().Logf("failed to get grpc listen addr config: %s", err) + return + } } else { listenAddr, err = r.Config.GetPeerListenAddr() if err != nil { @@ -166,6 +187,27 @@ func (r *Router) LnS(incomingOrPeer string) { Handler: muxxer, } + if len(grpcAddr) > 0 { + l, err := net.Listen("tcp", grpcAddr) + if err != nil { + r.iopLogger.Error().Logf("failed to listen to grpc addr: " + grpcAddr) + } + + r.iopLogger.Info().Logf("gRPC listening on %s", grpcAddr) + serverOpts := []grpc.ServerOption{ + grpc.MaxSendMsgSize(GRPCMessageSizeMax), // default is math.MaxInt32 + grpc.MaxRecvMsgSize(GRPCMessageSizeMax), // default is 4MB + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 10 * time.Second, + Timeout: 2 * time.Second, + MaxConnectionIdle: time.Minute, + }), + } + r.grpcServer = grpc.NewServer(serverOpts...) + collectortrace.RegisterTraceServiceServer(r.grpcServer, r) + go r.grpcServer.Serve(l) + } + r.doneWG.Add(1) go func() { defer r.doneWG.Done() @@ -183,6 +225,9 @@ func (r *Router) Stop() error { if err != nil { return err } + if r.grpcServer != nil { + r.grpcServer.GracefulStop() + } r.doneWG.Wait() return nil } @@ -337,6 +382,110 @@ func (r *Router) batch(w http.ResponseWriter, req *http.Request) { w.Write(response) } +func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { + + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + r.Logger.Error().Logf("Unable to retreive metadata from OTLP request.") + return &collectortrace.ExportTraceServiceResponse{}, nil + } + + // requestID is used to track a requst as it moves between refinery nodes (peers) + // the OTLP handler only receives incoming (not peer) requests for now so will be empty here + var requestID types.RequestIDContextKey + debugLog := r.iopLogger.Debug().WithField("request_id", requestID) + + apiKey, dataset := getAPIKeyAndDatasetFromMetadata(md) + if apiKey == "" { + r.Logger.Error().Logf("Received OTLP request without Honeycomb APIKey header") + return &collectortrace.ExportTraceServiceResponse{}, nil + } + if dataset == "" { + r.Logger.Error().Logf("Received OTLP request without Honeycomb dataset header") + return &collectortrace.ExportTraceServiceResponse{}, nil + } + + apiHost, err := r.Config.GetHoneycombAPI() + if err != nil { + r.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") + return &collectortrace.ExportTraceServiceResponse{}, nil + } + + for _, resourceSpan := range req.ResourceSpans { + resourceAttrs := make(map[string]interface{}) + + if resourceSpan.Resource != nil { + addAttributesToMap(resourceAttrs, resourceSpan.Resource.Attributes) + } + + for _, librarySpan := range resourceSpan.InstrumentationLibrarySpans { + library := librarySpan.InstrumentationLibrary + if library != nil { + if len(library.Name) > 0 { + resourceAttrs["library.name"] = library.Name + } + if len(library.Version) > 0 { + resourceAttrs["library.version"] = library.Version + } + } + + for _, span := range librarySpan.GetSpans() { + traceID := bytesToTraceID(span.TraceId) + spanID := hex.EncodeToString(span.SpanId) + timestamp := time.Unix(0, int64(span.StartTimeUnixNano)).UTC() + + eventAttrs := map[string]interface{}{ + "trace.trace_id": traceID, + "trace.span_id": spanID, + "type": getSpanKind(span.Kind), + "name": span.Name, + "duration_ms": float64(span.EndTimeUnixNano-span.StartTimeUnixNano) / float64(time.Millisecond), + "status_code": int32(span.Status.Code), + } + if span.ParentSpanId != nil { + eventAttrs["trace.parent_id"] = hex.EncodeToString(span.ParentSpanId) + } + if r.getSpanStatusCode(span.Status) == trace.Status_STATUS_CODE_ERROR { + eventAttrs["error"] = true + } + if len(span.Status.Message) > 0 { + eventAttrs["status_message"] = span.Status.Message + } + if span.Attributes != nil { + addAttributesToMap(eventAttrs, span.Attributes) + } + + sampleRate, err := getSampleRateFromAttributes(eventAttrs) + if err != nil { + debugLog.WithField("error", err.Error()).WithField("sampleRate", eventAttrs["sampleRate"]).Logf("error parsing sampleRate") + } + + // copy resource attributes to event attributes + for k, v := range resourceAttrs { + eventAttrs[k] = v + } + + event := &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + Dataset: dataset, + SampleRate: uint(sampleRate), + Timestamp: timestamp, + Data: eventAttrs, + } + + err = r.processEvent(event, requestID) + if err != nil { + r.Logger.Error().Logf("Error processing event: " + err.Error()) + } + } + } + } + + return &collectortrace.ExportTraceServiceResponse{}, nil +} + func (r *Router) processEvent(ev *types.Event, reqID interface{}) error { debugLog := r.iopLogger.Debug(). WithField("request_id", reqID). @@ -553,3 +702,123 @@ func unmarshal(r *http.Request, data io.Reader, v interface{}) error { return jsoniter.NewDecoder(data).Decode(v) } } + +func getAPIKeyAndDatasetFromMetadata(md metadata.MD) (apiKey string, dataset string) { + apiKey = getFirstValueFromMetadata(types.APIKeyHeader, md) + if apiKey == "" { + apiKey = getFirstValueFromMetadata(types.APIKeyHeaderShort, md) + } + dataset = getFirstValueFromMetadata(types.DatasetHeader, md) + + return apiKey, dataset +} + +// getFirstValueFromMetadata returns the first value of a metadata entry using a +// case-insensitive key +func getFirstValueFromMetadata(key string, md metadata.MD) string { + if values := md.Get(key); len(values) > 0 { + return values[0] + } + return "" +} + +func addAttributesToMap(attrs map[string]interface{}, attributes []*common.KeyValue) { + for _, attr := range attributes { + if attr.Key == "" { + continue + } + switch attr.Value.Value.(type) { + case *common.AnyValue_StringValue: + attrs[attr.Key] = attr.Value.GetStringValue() + case *common.AnyValue_BoolValue: + attrs[attr.Key] = attr.Value.GetBoolValue() + case *common.AnyValue_DoubleValue: + attrs[attr.Key] = attr.Value.GetDoubleValue() + case *common.AnyValue_IntValue: + attrs[attr.Key] = attr.Value.GetIntValue() + } + } +} + +func getSpanKind(kind trace.Span_SpanKind) string { + switch kind { + case trace.Span_SPAN_KIND_CLIENT: + return "client" + case trace.Span_SPAN_KIND_SERVER: + return "server" + case trace.Span_SPAN_KIND_PRODUCER: + return "producer" + case trace.Span_SPAN_KIND_CONSUMER: + return "consumer" + case trace.Span_SPAN_KIND_INTERNAL: + return "internal" + case trace.Span_SPAN_KIND_UNSPECIFIED: + fallthrough + default: + return "unspecified" + } +} + +// bytesToTraceID returns an ID suitable for use for spans and traces. Before +// encoding the bytes as a hex string, we want to handle cases where we are +// given 128-bit IDs with zero padding, e.g. 0000000000000000f798a1e7f33c8af6. +// To do this, we borrow a strategy from Jaeger [1] wherein we split the byte +// sequence into two parts. The leftmost part could contain all zeros. We use +// that to determine whether to return a 64-bit hex encoded string or a 128-bit +// one. +// +// [1]: https://github.com/jaegertracing/jaeger/blob/cd19b64413eca0f06b61d92fe29bebce1321d0b0/model/ids.go#L81 +func bytesToTraceID(traceID []byte) string { + // binary.BigEndian.Uint64() does a bounds check on traceID which will + // cause a panic if traceID is fewer than 8 bytes. In this case, we don't + // need to check for zero padding on the high part anyway, so just return a + // hex string. + if len(traceID) < traceIDShortLength { + return fmt.Sprintf("%x", traceID) + } + var low uint64 + if len(traceID) == traceIDLongLength { + low = binary.BigEndian.Uint64(traceID[traceIDShortLength:]) + if high := binary.BigEndian.Uint64(traceID[:traceIDShortLength]); high != 0 { + return fmt.Sprintf("%016x%016x", high, low) + } + } else { + low = binary.BigEndian.Uint64(traceID) + } + + return fmt.Sprintf("%016x", low) +} + +// getSpanStatusCode checks the value of both the deprecated code and code fields +// on the span status and using the rules specified in the backward compatibility +// notes in the protobuf definitions. See: +// +// https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L230 +func (r *Router) getSpanStatusCode(status *trace.Status) trace.Status_StatusCode { + if status.Code == trace.Status_STATUS_CODE_UNSET { + if status.DeprecatedCode == trace.Status_DEPRECATED_STATUS_CODE_OK { + return trace.Status_STATUS_CODE_UNSET + } + return trace.Status_STATUS_CODE_ERROR + } + return status.Code +} + +func getSampleRateFromAttributes(attributes map[string]interface{}) (int, error) { + var err error + sampleRate := defaultSampleRate + if attributes["sampleRate"] != nil { + switch attributes["sampleRate"].(type) { + case string: + sampleRate, err = strconv.Atoi(attributes["sampleRate"].(string)) + case int: + sampleRate = attributes["sampleRate"].(int) + default: + err = fmt.Errorf("Unrecognised sampleRate datatype - %T", attributes["sampleRate"]) + } + // remove sampleRate from event fields + delete(attributes, "sampleRate") + } + + return sampleRate, err +} diff --git a/route/route_test.go b/route/route_test.go index c4e75a94ca..8a5a5f754b 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -18,6 +18,7 @@ import ( "github.com/honeycombio/refinery/sharder" "github.com/klauspost/compress/zstd" "github.com/vmihailenco/msgpack/v4" + "google.golang.org/grpc/metadata" ) func TestDecompression(t *testing.T) { @@ -237,6 +238,128 @@ func TestUnmarshal(t *testing.T) { } } +func TestGetAPIKeyAndDatasetFromMetadataCaseInsensitive(t *testing.T) { + const ( + apiKeyValue = "test-apikey" + datasetValue = "test-dataset" + ) + + tests := []struct { + name string + apikeyHeader string + datasetHeader string + }{ + { + name: "lowercase", + apikeyHeader: "x-honeycomb-team", + datasetHeader: "x-honeycomb-dataset", + }, + { + name: "uppercase", + apikeyHeader: "X-HONEYCOMB-TEAM", + datasetHeader: "X-HONEYCOMB-DATASET", + }, + { + name: "mixed-case", + apikeyHeader: "x-HoNeYcOmB-tEaM", + datasetHeader: "X-hOnEyCoMb-DaTaSeT", + }, + { + name: "lowercase-short", + apikeyHeader: "x-hny-team", + datasetHeader: "x-honeycomb-dataset", + }, + { + name: "uppercase-short", + apikeyHeader: "X-HNY-TEAM", + datasetHeader: "X-HONEYCOMB-DATASET", + }, + { + name: "mixed-case-short", + apikeyHeader: "X-hNy-TeAm", + datasetHeader: "X-hOnEyCoMb-DaTaSeT", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + md := metadata.MD{} + md.Set(tt.apikeyHeader, apiKeyValue) + md.Set(tt.datasetHeader, datasetValue) + + apikey, dataset := getAPIKeyAndDatasetFromMetadata(md) + if apikey != apiKeyValue { + t.Errorf("got: %s\n\twant: %v", apikey, apiKeyValue) + } + if dataset != datasetValue { + t.Errorf("got: %s\n\twant: %v", dataset, datasetValue) + } + }) + } +} + +func TestGetSampleRateFromAttributes(t *testing.T) { + const ( + defaultSampleRate = 1 + ) + tests := []struct { + name string + attrKey string + attrValue interface{} + expectedValue int + }{ + { + name: "missing attr gets default value", + attrKey: "", + attrValue: nil, + expectedValue: defaultSampleRate, + }, + { + name: "can parse integer value", + attrKey: "sampleRate", + attrValue: 5, + expectedValue: 5, + }, + { + name: "can parse string value", + attrKey: "sampleRate", + attrValue: "5", + expectedValue: 5, + }, + { + name: "does not parse float, gets default value", + attrKey: "sampleRate", + attrValue: 0.25, + expectedValue: defaultSampleRate, + }, + { + name: "does not parse bool, gets default value", + attrKey: "sampleRate", + attrValue: true, + expectedValue: defaultSampleRate, + }, + { + name: "does not parse struct, gets default value", + attrKey: "sampleRate", + attrValue: struct{}{}, + expectedValue: defaultSampleRate, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + attrs := map[string]interface{}{ + tt.attrKey: tt.attrValue, + } + + sampleRate, _ := getSampleRateFromAttributes(attrs) + if sampleRate != tt.expectedValue { + t.Errorf("got: %d\n\twant: %d", sampleRate, tt.expectedValue) + } + }) + } +} + func TestDebugTrace(t *testing.T) { req, _ := http.NewRequest("GET", "/debug/trace/123abcdef", nil) req = mux.SetURLVars(req, map[string]string{"traceID": "123abcdef"}) diff --git a/types/event.go b/types/event.go index 0ef2157889..0a1115155c 100644 --- a/types/event.go +++ b/types/event.go @@ -9,6 +9,7 @@ const ( APIKeyHeader = "X-Honeycomb-Team" // libhoney-js uses this APIKeyHeaderShort = "X-Hny-Team" + DatasetHeader = "X-Honeycomb-Dataset" SampleRateHeader = "X-Honeycomb-Samplerate" TimestampHeader = "X-Honeycomb-Event-Time" ) From c6c4d8a0f54117547299ddadae323d85c7c45629 Mon Sep 17 00:00:00 2001 From: Robb Kidd Date: Thu, 28 Jan 2021 07:48:08 -0500 Subject: [PATCH 004/351] add our custom action to manage project labels (#207) --- .github/workflows/apply-labels.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .github/workflows/apply-labels.yml diff --git a/.github/workflows/apply-labels.yml b/.github/workflows/apply-labels.yml new file mode 100644 index 0000000000..b6aeae298a --- /dev/null +++ b/.github/workflows/apply-labels.yml @@ -0,0 +1,16 @@ +name: Apply project labels + +on: + - issues + - label + - pull_request_target + - pull_request + +jobs: + apply-labels: + runs-on: ubuntu-latest + name: Apply common project labels + steps: + - uses: honeycombio/integrations-labels@v1 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} From 9702bb475c480d8f61afc986ed31d945f91a7f39 Mon Sep 17 00:00:00 2001 From: Matt Button Date: Fri, 29 Jan 2021 11:56:31 +0000 Subject: [PATCH 005/351] Make compression of peer forwarding configurable, rather than hard coded (#208) We're seeing substantial data transfer between peers in our refinery cluster, far more than we saw going to honeycomb when we were using hosted refinery. We're currently running 3 c5.2xlarge instances of refinery, in separate AZs, and each one seems to be sending ~7MB/s of data to its other 2 peers. Some back of the envelope math suggests this could generate a bill of (7MB/s * 3600 * 24 * 30)/1000MB per GB * $0.02/GB = $362.88 for 1 node to talk to one peer. Given each node has 2 peers, and there are three nodes, it seems plausible that the bandwidth alone could cost in the ballpark of $2,172 - about 3 times the cost of running the cluster, without even considering the cost of transmitting the sampled-in data to Honeycomb. While looking through the source code I noticed that refinery explicitly opts out of using gzip compression when communicating with peers. https://github.com/honeycombio/refinery/blob/5dcb4af9e13785dfd7980c79d36c649899e3efb8/cmd/refinery/main.go#L150-L153 It seems [this change was made][original-disable] in February 2019. A few months later [libhoney-go was changed to use zstd for compression][libhoney-zstd] by default when sending data to honeycomb's API. The PR that introduces zstd references substantial performance improvements, so I'm assuming Honeycomb had some performance issues with gzip compression, disabled it on several of their services, switched to using zstd, but never got around to enabling it in refinery...? By the looks of it libhoney-go is currently sending zstd compressed data to refinery, and refinery uses it to compress data sent to honeycomb, so I assume we won't run into any substantial performance issues by also using it on peer-to-peer communication. I've introduced a config option to control this in case there are legitimate situations where compression has to be disabled to prevent over-utilization of CPU. fixes honeycombio/refinery#206 [original-disable]: https://github.com/honeycombio/refinery/pull/21 [libhoney-zstd]: https://github.com/honeycombio/libhoney-go/pull/57 --- cmd/refinery/main.go | 20 ++++++++---------- config/config.go | 4 ++++ config/file_config.go | 49 +++++++++++++++++++++++++------------------ config/mock.go | 7 +++++++ config_complete.toml | 8 +++++++ 5 files changed, 57 insertions(+), 31 deletions(-) diff --git a/cmd/refinery/main.go b/cmd/refinery/main.go index 6f21ad836d..9738cc2e1a 100644 --- a/cmd/refinery/main.go +++ b/cmd/refinery/main.go @@ -141,17 +141,15 @@ func main() { peerClient, err := libhoney.NewClient(libhoney.ClientConfig{ Transmission: &transmission.Honeycomb{ - MaxBatchSize: 500, - BatchTimeout: libhoney.DefaultBatchTimeout, - MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, - PendingWorkCapacity: uint(c.GetPeerBufferSize()), - UserAgentAddition: userAgentAddition, - Transport: peerTransport, - // gzip compression is expensive, and peers are most likely close to each other - // so we can turn off gzip when forwarding to peers - DisableGzipCompression: true, - EnableMsgpackEncoding: true, - Metrics: sdPeer, + MaxBatchSize: 500, + BatchTimeout: libhoney.DefaultBatchTimeout, + MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, + PendingWorkCapacity: uint(c.GetPeerBufferSize()), + UserAgentAddition: userAgentAddition, + Transport: peerTransport, + DisableCompression: !c.GetCompressPeerCommunication(), + EnableMsgpackEncoding: true, + Metrics: sdPeer, }, }) if err != nil { diff --git a/config/config.go b/config/config.go index 09d341c0e4..b1ae39df67 100644 --- a/config/config.go +++ b/config/config.go @@ -25,6 +25,10 @@ type Config interface { // peer traffic GetPeerListenAddr() (string, error) + // GetCompressPeerCommunication will be true if refinery should compress + // data before forwarding it to a peer. + GetCompressPeerCommunication() bool + // GetGRPCListenAddr returns the address and port on which to listen for // incoming events over gRPC GetGRPCListenAddr() (string, error) diff --git a/config/file_config.go b/config/file_config.go index dc18a6fce5..eff5daf4aa 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -53,26 +53,27 @@ func (r *RulesBasedSamplerConfig) String() string { } type configContents struct { - ListenAddr string `validate:"required"` - PeerListenAddr string `validate:"required"` - GRPCListenAddr string - APIKeys []string `validate:"required"` - HoneycombAPI string `validate:"required,url"` - Logger string `validate:"required,oneof= logrus honeycomb"` - LoggingLevel string `validate:"required"` - Collector string `validate:"required,oneof= InMemCollector"` - Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"` - Metrics string `validate:"required,oneof= prometheus honeycomb"` - SendDelay time.Duration `validate:"required"` - TraceTimeout time.Duration `validate:"required"` - SendTicker time.Duration `validate:"required"` - UpstreamBufferSize int `validate:"required"` - PeerBufferSize int `validate:"required"` - DebugServiceAddr string - DryRun bool - DryRunFieldName string - PeerManagement PeerManagementConfig `validate:"required"` - InMemCollector InMemoryCollectorCacheCapacity `validate:"required"` + ListenAddr string `validate:"required"` + PeerListenAddr string `validate:"required"` + CompressPeerCommunication bool + GRPCListenAddr string + APIKeys []string `validate:"required"` + HoneycombAPI string `validate:"required,url"` + Logger string `validate:"required,oneof= logrus honeycomb"` + LoggingLevel string `validate:"required"` + Collector string `validate:"required,oneof= InMemCollector"` + Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"` + Metrics string `validate:"required,oneof= prometheus honeycomb"` + SendDelay time.Duration `validate:"required"` + TraceTimeout time.Duration `validate:"required"` + SendTicker time.Duration `validate:"required"` + UpstreamBufferSize int `validate:"required"` + PeerBufferSize int `validate:"required"` + DebugServiceAddr string + DryRun bool + DryRunFieldName string + PeerManagement PeerManagementConfig `validate:"required"` + InMemCollector InMemoryCollectorCacheCapacity `validate:"required"` } type InMemoryCollectorCacheCapacity struct { @@ -122,6 +123,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.BindEnv("PeerManagement.RedisPassword", "REFINERY_REDIS_PASSWORD") c.SetDefault("ListenAddr", "0.0.0.0:8080") c.SetDefault("PeerListenAddr", "0.0.0.0:8081") + c.SetDefault("CompressPeerCommunication", true) c.SetDefault("APIKeys", []string{"*"}) c.SetDefault("PeerManagement.Peers", []string{"http://127.0.0.1:8081"}) c.SetDefault("PeerManagement.Type", "file") @@ -380,6 +382,13 @@ func (f *fileConfig) GetPeerListenAddr() (string, error) { return f.conf.PeerListenAddr, nil } +func (f *fileConfig) GetCompressPeerCommunication() bool { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.CompressPeerCommunication +} + func (f *fileConfig) GetGRPCListenAddr() (string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index 16a41747db..5da76a4280 100644 --- a/config/mock.go +++ b/config/mock.go @@ -22,6 +22,7 @@ type MockConfig struct { GetListenAddrVal string GetPeerListenAddrErr error GetPeerListenAddrVal string + GetCompressPeerCommunicationsVal bool GetGRPCListenAddrErr error GetGRPCListenAddrVal string GetLoggerTypeErr error @@ -116,6 +117,12 @@ func (m *MockConfig) GetPeerListenAddr() (string, error) { return m.GetPeerListenAddrVal, m.GetPeerListenAddrErr } +func (m *MockConfig) GetCompressPeerCommunication() bool { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.GetCompressPeerCommunicationsVal +} func (m *MockConfig) GetGRPCListenAddr() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index 7ff5ae0f69..983d3340df 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -24,6 +24,14 @@ GRPCListenAddr = "0.0.0.0:9090" # Not eligible for live reload. PeerListenAddr = "0.0.0.0:8081" +# CompressPeerCommunication determines whether refinery will compress span data +# it forwards to peers. If it costs money to transmit data between refinery +# instances (e.g. they're spread across AWS availability zones), then you +# almost certainly want compression enabled to reduce your bill. The option to +# disable it is provided as an escape hatch for deployments that value lower CPU +# utilization over data transfer costs. +CompressPeerCommunication = true + # APIKeys is a list of Honeycomb API keys that the proxy will accept. This list # only applies to events - other Honeycomb API actions will fall through to the # upstream API directly. From 5133ba621e2c7c6e731bfba6839cbcaa43b25e66 Mon Sep 17 00:00:00 2001 From: Paul Osman Date: Tue, 23 Feb 2021 10:47:06 -0600 Subject: [PATCH 006/351] detect int64 sample rate (#209) Detect int64 / int32 sample rates. Some OTLP exporters will send them as these types - so handle appropriately. --- route/route.go | 48 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/route/route.go b/route/route.go index eca2635ac1..1616ccb4bc 100644 --- a/route/route.go +++ b/route/route.go @@ -48,6 +48,7 @@ const ( traceIDLongLength = 16 GRPCMessageSizeMax int = 5000000 // 5MB defaultSampleRate = 1 + int32MaxValue = 2147483647 // (2**31)-1 ) type Router struct { @@ -804,21 +805,42 @@ func (r *Router) getSpanStatusCode(status *trace.Status) trace.Status_StatusCode return status.Code } -func getSampleRateFromAttributes(attributes map[string]interface{}) (int, error) { +func getSampleRateFromAttributes(attrs map[string]interface{}) (int, error) { + var sampleRateKey string + if attrs["sampleRate"] != nil { + sampleRateKey = "sampleRate" + } else if attrs["SampleRate"] != nil { + sampleRateKey = "SampleRate" + } + if len(sampleRateKey) == 0 || attrs[sampleRateKey] == nil { + return defaultSampleRate, nil + } + var sampleRate int var err error - sampleRate := defaultSampleRate - if attributes["sampleRate"] != nil { - switch attributes["sampleRate"].(type) { - case string: - sampleRate, err = strconv.Atoi(attributes["sampleRate"].(string)) - case int: - sampleRate = attributes["sampleRate"].(int) - default: - err = fmt.Errorf("Unrecognised sampleRate datatype - %T", attributes["sampleRate"]) + switch v := attrs[sampleRateKey].(type) { + case string: + var i int64 + i, err = strconv.ParseInt(v, 10, 32) + sampleRate = int(i) + case int: + if v > int32MaxValue { + sampleRate = int32MaxValue + } else { + sampleRate = v } - // remove sampleRate from event fields - delete(attributes, "sampleRate") + case int32: + sampleRate = int(v) + case int64: + if v > int32MaxValue { + sampleRate = int32MaxValue + } else { + sampleRate = int(v) + } + default: + err = fmt.Errorf("Unrecognised sampleRate datatype - %T", sampleRate) + sampleRate = defaultSampleRate } - + // remove sampleRate from event fields + delete(attrs, sampleRateKey) return sampleRate, err } From d67f16dc0b56ee8ecc75528e3e34b767eb3af107 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 24 Feb 2021 16:54:01 +0000 Subject: [PATCH 007/351] Add tests for int64 sample rate values (#213) Resolves #211. Adds testing for parsing int64 sample rate values. Test cases include parsing a value that can be parsed into an int32 and a value greater than int32 max where the int32 max value is used instead. Additionally, replace the custom max int32 value with the math package const math.MaxInt32. --- route/route.go | 9 ++++----- route/route_test.go | 13 +++++++++++++ 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/route/route.go b/route/route.go index 1616ccb4bc..5a7ea40a47 100644 --- a/route/route.go +++ b/route/route.go @@ -48,7 +48,6 @@ const ( traceIDLongLength = 16 GRPCMessageSizeMax int = 5000000 // 5MB defaultSampleRate = 1 - int32MaxValue = 2147483647 // (2**31)-1 ) type Router struct { @@ -823,16 +822,16 @@ func getSampleRateFromAttributes(attrs map[string]interface{}) (int, error) { i, err = strconv.ParseInt(v, 10, 32) sampleRate = int(i) case int: - if v > int32MaxValue { - sampleRate = int32MaxValue + if v > math.MaxInt32 { + sampleRate = math.MaxInt32 } else { sampleRate = v } case int32: sampleRate = int(v) case int64: - if v > int32MaxValue { - sampleRate = int32MaxValue + if v > math.MaxInt32 { + sampleRate = math.MaxInt32 } else { sampleRate = int(v) } diff --git a/route/route_test.go b/route/route_test.go index 8a5a5f754b..e2b488046f 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "net/http" "net/http/httptest" "strings" @@ -326,6 +327,18 @@ func TestGetSampleRateFromAttributes(t *testing.T) { attrValue: "5", expectedValue: 5, }, + { + name: "can parse int64 value (less than int32 max)", + attrKey: "sampleRate", + attrValue: int64(100), + expectedValue: 100, + }, + { + name: "can parse int64 value (greater than int32 max)", + attrKey: "sampleRate", + attrValue: int64(math.MaxInt32 + 100), + expectedValue: math.MaxInt32, + }, { name: "does not parse float, gets default value", attrKey: "sampleRate", From a4a33e49644b1b7b4c3e429d6e239bbacaa6af41 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 24 Feb 2021 16:58:44 +0000 Subject: [PATCH 008/351] Add support for gzip compressed OTLP requests (#212) Summary: Many different clients support compressing OTLP requests using gzip. Currently refinery does not support any form of compression and the request will fail. This change adds support for gzip compressed OTLP requests by importing the google.golang.org/grpc/encoding/gzip package. The compressor is automatically registered with gRPC servers during import. Asana Fixes: https://app.asana.com/0/1199604629328245/1199952740433234 Test Plan: I tested by creating a test branch of the golang-otlp example that configured and uses gzip compression. Before applying this change to a local refinery instance, I would receive the following error when it tried to send the spans: 2021/02/22 13:34:04 rpc error: code = Unimplemented desc = grpc: Decompressor is not installed for grpc-encoding "gzip" After the change was applied, the request was received and processed as expected. Observability Plan: A new field is added to the root span that handles incoming OTLP requests named grpc_request_encoding which takes the value from the "grpc-accept-encoding" metadata field. This new field can be used to query for requests that were gzip compressed. --- route/route.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/route/route.go b/route/route.go index 5a7ea40a47..e1f38e1710 100644 --- a/route/route.go +++ b/route/route.go @@ -15,6 +15,7 @@ import ( "net" "net/http" "strconv" + "strings" "sync" "time" @@ -26,6 +27,9 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + // grpc/gzip compressor, auto registers on import + _ "google.golang.org/grpc/encoding/gzip" + "github.com/honeycombio/refinery/collect" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/logger" @@ -411,6 +415,11 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ return &collectortrace.ExportTraceServiceResponse{}, nil } + var grpcRequestEncoding string + if val := md.Get("grpc-accept-encoding"); val != nil { + grpcRequestEncoding = strings.Join(val, ",") + } + for _, resourceSpan := range req.ResourceSpans { resourceAttrs := make(map[string]interface{}) @@ -454,6 +463,9 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ if span.Attributes != nil { addAttributesToMap(eventAttrs, span.Attributes) } + if grpcRequestEncoding != "" { + eventAttrs["grpc_request_encoding"] = grpcRequestEncoding + } sampleRate, err := getSampleRateFromAttributes(eventAttrs) if err != nil { From b8677d26c26c6a9958a74fb212159743e38ade12 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Wed, 24 Feb 2021 09:59:13 -0700 Subject: [PATCH 009/351] Error response from libhoney does not always mean a non-20x response (#210) * it could be a 200 API response that got an error on parsing the body --- transmit/transmit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/transmit/transmit.go b/transmit/transmit.go index 280009656d..8bf87ae29f 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -160,7 +160,7 @@ func (d *DefaultTransmission) processResponses( if r.Err != nil { log = log.WithField("error", r.Err.Error()) } - log.Logf("non-20x response when sending event") + log.Logf("error when sending event") if honeycombAPI == apiHost { // if the API host matches the configured honeycomb API, // count it as an API error From 68eb0ea03738a80f0b3d1d153190d00e42344cee Mon Sep 17 00:00:00 2001 From: Robb Kidd Date: Wed, 3 Mar 2021 10:24:17 -0500 Subject: [PATCH 010/351] add more about Redis peering and bad DNS to the README (#217) --- README.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7c50297ee7..252d505501 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,9 @@ Refinery is a trace-aware sampling proxy. It collects spans emitted by your appl ## Setting up Refinery -Refinery is designed to sit within your infrastructure where all sources of Honeycomb events (aka spans if you're doing tracing) can reach it. A standard deployment will have a cluster of servers running Refinery accessible via a load balancer. Refinery instances must be able to communicate with each other to concentrate traces on single servers. +Refinery is designed to sit within your infrastructure where all sources of Honeycomb events (aka spans if you're doing tracing) can reach it. +A standard deployment will have a cluster of two or more Refinery processes accessible via a separate load balancer. +Refinery processes must be able to communicate with each other to concentrate traces on single servers. Within your application (or other Honeycomb event sources) you would configure the `API Host` to be http(s)://load-balancer/. Everything else remains the same (api key, dataset name, etc. - all that lives with the originating client). @@ -26,7 +28,8 @@ Refinery is built by [CircleCI](https://circleci.com/gh/honeycombio/refinery). R ## Configuration -Configuration is done in one of two ways, either entirely by the config file or a combination of the config file and a Redis backend for managing the list of peers in the cluster. When using Redis, it only manages peers - everything else is managed by the config file. +Configuration is done in one of two ways, either entirely by the config file or a combination of the config file and a Redis service for managing the list of peers in the cluster. +When using Redis, it only manages peers; all other configuration remains managed by the config file. There are a few vital configuration options; read through this list and make sure all the variables are set. @@ -46,9 +49,10 @@ There are a few components of Refinery with multiple implementations; the config When configuration changes, send Refinery a USR1 signal and it will re-read the configuration. -### Redis-based Config +### Redis-based Peer Management -In the Redis-based config mode, all config options _except_ peer management are still handled by the config file. Only coordinating the list of peers in the Refinery cluster is managed with Redis. +With peer management in Redis, all config options _except_ peer management are still handled by the config file. +Only coordinating the list of peers in the Refinery cluster is managed with Redis. To enable the redis-based config: @@ -61,6 +65,10 @@ When launched in redis-config mode, Refinery needs a redis host to use for manag The redis host should be a hostname and a port, for example `redis.mydomain.com:6379`. The example config file has `localhost:6379` which obviously will not work with more than one host. When TLS is required to connect to the redis instance set the `UseTLS` config to `true`. +By default, a Refinery process will register itself in Redis using its local hostname as its identifier for peer communications. +In environments where domain name resolution is slow or unreliable, override the reliance on name lookups by specifying the name of the peering network interface with the `IdentifierInterfaceName` configuration option. +See the [Refinery documentation](https://docs.honeycomb.io/manage-data-volume/refinery/) for more details on tuning a cluster. + ## How sampling decisions are made In the configuration file, you can choose from a few sampling methods and specify options for each. The `DynamicSampler` is the most interesting and most commonly used. It uses the `AvgSampleRate` algorithm from the [`dynsampler-go`](https://github.com/honeycombio/dynsampler-go) package. Briefly described, you configure Refinery to examine the trace for a set of fields (for example, `request.status_code` and `request.method`). It collects all the values found in those fields anywhere in the trace (eg "200" and "GET") together into a key it hands to the dynsampler. The dynsampler code will look at the frequency that key appears during the previous 30 seconds (or other value set by the `ClearFrequencySec` setting) and use that to hand back a desired sample rate. More frequent keys are sampled more heavily, so that an even distribution of traffic across the keyspace is represented in Honeycomb. From 2060250259a8b1f314f8d7ac4ced7c00c9c3b51d Mon Sep 17 00:00:00 2001 From: shelby spees Date: Thu, 4 Mar 2021 05:48:34 -0800 Subject: [PATCH 011/351] Remove Beta label from README (#219) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 252d505501..326978e17a 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ![refinery](https://user-images.githubusercontent.com/6510988/94976958-8cadba80-04cb-11eb-9883-6e8ea554a081.png) -**Beta Release** This is the initial draft. Please expect and help find bugs! :) Refinery [![Build Status](https://circleci.com/gh/honeycombio/refinery.svg?style=shield)](https://circleci.com/gh/honeycombio/refinery) +[![Build Status](https://circleci.com/gh/honeycombio/refinery.svg?style=shield)](https://circleci.com/gh/honeycombio/refinery) ## Purpose From fb17ac9e751be95fe8df1d3384239303b2b63c68 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Thu, 11 Mar 2021 13:11:37 -0500 Subject: [PATCH 012/351] Fix YAML config issues (#220) * yaml config: add int as a type for rules compare * test case for int compare --- sample/rules.go | 25 +++++++++++++++++++++++++ sample/rules_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/sample/rules.go b/sample/rules.go index 01fa8b4172..90000f03f7 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -146,6 +146,11 @@ const ( ) func compare(a, b interface{}) (int, bool) { + // a is the tracing data field value. This can be: float64, int64, bool, or string + // b is the Rule condition value. This can be: float64, int64, int, bool, or string + // Note: in YAML config parsing, the Value may be returned as int + // When comparing numeric values, we need to check across the 3 types: float64, int64, and int + if a == nil { if b == nil { return equal, true @@ -161,6 +166,16 @@ func compare(a, b interface{}) (int, bool) { switch at := a.(type) { case int64: switch bt := b.(type) { + case int: + i := int(at) + switch { + case i < bt: + return less, true + case i > bt: + return more, true + default: + return equal, true + } case int64: switch { case at < bt: @@ -183,6 +198,16 @@ func compare(a, b interface{}) (int, bool) { } case float64: switch bt := b.(type) { + case int: + f := float64(bt) + switch { + case at < f: + return less, true + case at > f: + return more, true + default: + return equal, true + } case int64: f := float64(bt) switch { diff --git a/sample/rules_test.go b/sample/rules_test.go index 4531f2a69a..7add1ae669 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -439,6 +439,34 @@ func TestRules(t *testing.T) { ExpectedKeep: true, ExpectedRate: 4, }, + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "YAMLintgeaterthan", + SampleRate: 10, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "test", + Operator: ">", + Value: int(1), + }, + }, + }, + }, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "test": int64(2), + }, + }, + }, + }, + ExpectedKeep: true, + ExpectedRate: 10, + }, } for _, d := range data { From dcb7fb2270c05e222cd87ad48761c46d5464f9ff Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Fri, 12 Mar 2021 13:34:07 -0500 Subject: [PATCH 013/351] add env var for api keys (#221) --- config/file_config.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/config/file_config.go b/config/file_config.go index eff5daf4aa..af9d1054a5 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -121,6 +121,8 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.BindEnv("PeerManagement.RedisHost", "REFINERY_REDIS_HOST") c.BindEnv("PeerManagement.RedisPassword", "REFINERY_REDIS_PASSWORD") + c.BindEnv("HoneycombLogger.LoggerAPIKey", "REFINERY_HONEYCOMB_API_KEY") + c.BindEnv("HoneycombMetrics.MetricsAPIKey", "REFINERY_HONEYCOMB_API_KEY") c.SetDefault("ListenAddr", "0.0.0.0:8080") c.SetDefault("PeerListenAddr", "0.0.0.0:8081") c.SetDefault("CompressPeerCommunication", true) @@ -498,6 +500,8 @@ func (f *fileConfig) GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) { return *hlConfig, err } + hlConfig.LoggerAPIKey = f.config.GetString("HoneycombLogger.LoggerAPIKey") + // https://github.com/spf13/viper/issues/747 hlConfig.LoggerSamplerEnabled = f.config.GetBool("HoneycombLogger.LoggerSamplerEnabled") hlConfig.LoggerSamplerThroughput = f.config.GetInt("HoneycombLogger.LoggerSamplerThroughput") @@ -606,6 +610,8 @@ func (f *fileConfig) GetHoneycombMetricsConfig() (HoneycombMetricsConfig, error) return *hmConfig, err } + hmConfig.MetricsAPIKey = f.config.GetString("HoneycombMetrics.MetricsAPIKey") + v := validator.New() err = v.Struct(hmConfig) if err != nil { From c8201a6bca6b122481757e91b27a4bfa3848bb78 Mon Sep 17 00:00:00 2001 From: Ben Darfler Date: Mon, 15 Mar 2021 16:09:49 -0400 Subject: [PATCH 014/351] Removes whitelist terminology (#222) --- route/middleware.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/route/middleware.go b/route/middleware.go index e28eae0a29..8264bc7608 100644 --- a/route/middleware.go +++ b/route/middleware.go @@ -40,7 +40,7 @@ func (r *Router) apiKeyChecker(next http.Handler) http.Handler { return } if apiKey == key { - // we're in the whitelist, it's all good + // we're in the allowlist, it's all good next.ServeHTTP(w, req) return } From 8303c7c5a0153f675baa579d4583b8959dfa930e Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Tue, 16 Mar 2021 13:53:26 -0400 Subject: [PATCH 015/351] add env var for api keys (#221) From a802786d90e314575178ef07568aaeb42948e6b7 Mon Sep 17 00:00:00 2001 From: owayss Date: Tue, 16 Mar 2021 22:48:27 +0100 Subject: [PATCH 016/351] guard against nil pointer dereference (#223) The Status field on a Span is declared as optiona in the OTLP spec, and is therefore represented as a pointer in the type generated by protoc. --- route/route.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/route/route.go b/route/route.go index e1f38e1710..7885d4a17f 100644 --- a/route/route.go +++ b/route/route.go @@ -449,7 +449,7 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ "type": getSpanKind(span.Kind), "name": span.Name, "duration_ms": float64(span.EndTimeUnixNano-span.StartTimeUnixNano) / float64(time.Millisecond), - "status_code": int32(span.Status.Code), + "status_code": int32(r.getSpanStatusCode(span.Status)), } if span.ParentSpanId != nil { eventAttrs["trace.parent_id"] = hex.EncodeToString(span.ParentSpanId) @@ -457,7 +457,7 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ if r.getSpanStatusCode(span.Status) == trace.Status_STATUS_CODE_ERROR { eventAttrs["error"] = true } - if len(span.Status.Message) > 0 { + if span.Status != nil && len(span.Status.Message) > 0 { eventAttrs["status_message"] = span.Status.Message } if span.Attributes != nil { @@ -807,6 +807,9 @@ func bytesToTraceID(traceID []byte) string { // // https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L230 func (r *Router) getSpanStatusCode(status *trace.Status) trace.Status_StatusCode { + if status == nil { + return trace.Status_STATUS_CODE_UNSET + } if status.Code == trace.Status_STATUS_CODE_UNSET { if status.DeprecatedCode == trace.Status_DEPRECATED_STATUS_CODE_OK { return trace.Status_STATUS_CODE_UNSET From 4edc6714e4716f8a3a66dfa540be146a423157a0 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Thu, 18 Mar 2021 12:50:29 -0600 Subject: [PATCH 017/351] only publish to s3 when releasing (#218) --- .circleci/config.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6331986054..91e79458a8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -152,10 +152,9 @@ workflows: - build filters: tags: - only: /.*/ + only: /^v.*/ branches: - # Forked pull requests have CIRCLE_BRANCH set to pull/XXX - ignore: /pull\/[0-9]+/ + ignore: /.*/ - docker/publish: tag: latest extra_build_args: --build-arg BUILD_ID=${CIRCLE_SHA1:0:7} From 7882b6cb6fe33e8267432fbe7cfeb2fa4a089579 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Thu, 18 Mar 2021 12:50:44 -0600 Subject: [PATCH 018/351] Add test for OTLP handler, including spans with no status (#225) --- route/route_test.go | 98 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/route/route_test.go b/route/route_test.go index e2b488046f..47c43bf6ff 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -5,7 +5,15 @@ package route import ( "bytes" "compress/gzip" + "context" "fmt" + "github.com/honeycombio/refinery/config" + collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" + common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" + "github.com/honeycombio/refinery/logger" + "github.com/honeycombio/refinery/metrics" + "github.com/honeycombio/refinery/transmit" "io" "io/ioutil" "math" @@ -388,6 +396,96 @@ func TestDebugTrace(t *testing.T) { } } +func TestOTLPHandler(t *testing.T) { + md := metadata.New(map[string]string{"x-honeycomb-team": "meow", "x-honeycomb-dataset": "ds"}) + ctx := metadata.NewIncomingContext(context.Background(), md) + + mockMetrics := metrics.MockMetrics{} + mockMetrics.Start() + router := &Router{ + Config: &config.MockConfig{}, + Metrics: &mockMetrics, + UpstreamTransmission: &transmit.MockTransmission{}, + iopLogger: iopLogger{ + Logger: &logger.MockLogger{}, + incomingOrPeer: "incoming", + }, + } + + t.Run("span with status", func(t *testing.T) { + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: helperOTLPRequestSpansWithStatus(), + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + }) + + t.Run("span without status", func(t *testing.T) { + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: helperOTLPRequestSpansWithoutStatus(), + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + }) +} + +func helperOTLPRequestSpansWithoutStatus() []*trace.Span { + now := time.Now() + return []*trace.Span{ + { + StartTimeUnixNano: uint64(now.UnixNano()), + Events: []*trace.Span_Event{ + { + TimeUnixNano: uint64(now.UnixNano()), + Attributes: []*common.KeyValue{ + { + Key: "attribute_key", + Value: &common.AnyValue{ + Value: &common.AnyValue_StringValue{StringValue: "attribute_value"}, + }, + }, + }, + }, + }, + }, + } +} + +func helperOTLPRequestSpansWithStatus() []*trace.Span { + now := time.Now() + return []*trace.Span{ + { + StartTimeUnixNano: uint64(now.UnixNano()), + Events: []*trace.Span_Event{ + { + TimeUnixNano: uint64(now.UnixNano()), + Attributes: []*common.KeyValue{ + { + Key: "attribute_key", + Value: &common.AnyValue{ + Value: &common.AnyValue_StringValue{StringValue: "attribute_value"}, + }, + }, + }, + }, + }, + Status: &trace.Status{Code: trace.Status_STATUS_CODE_OK}, + }, + } +} + type TestSharder struct{} func (s *TestSharder) MyShard() sharder.Shard { return nil } From 4a0285b72e011e74669658993282fa6ba25f653f Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 24 Mar 2021 10:50:29 +0000 Subject: [PATCH 019/351] Pass along upstream and peer metrics configs to libhoney (#227) Metrics config in refinery configurations files are not passed onto libhoney properly. This PR uses the metrics configuration found in config files to create upstream and peer metrics instances which are then passed into libhoney. It also consolidates the metrics interface to match what is in libhoney. Co-authored-by: Vera Reynolds --- cmd/refinery/main.go | 20 +++++++-------- collect/cache/cache.go | 2 +- collect/collect.go | 12 ++++----- metrics/honeycomb.go | 34 +++++++++++++++----------- metrics/metrics.go | 51 ++++++++++++++++++++++++++++++++++----- metrics/mock.go | 16 ++++++++---- metrics/nullmetrics.go | 7 +++--- metrics/prometheus.go | 22 +++++++++++++---- route/proxy.go | 2 +- route/route.go | 12 ++++----- sample/dynamic.go | 4 +-- sample/dynamic_ema.go | 4 +-- sample/rules.go | 8 +++--- sample/totalthroughput.go | 4 +-- transmit/transmit.go | 8 +++--- 15 files changed, 135 insertions(+), 71 deletions(-) diff --git a/cmd/refinery/main.go b/cmd/refinery/main.go index 9738cc2e1a..effacd60e7 100644 --- a/cmd/refinery/main.go +++ b/cmd/refinery/main.go @@ -9,12 +9,10 @@ import ( "syscall" "time" - libhoney "github.com/honeycombio/libhoney-go" - "github.com/honeycombio/libhoney-go/transmission" - statsd "gopkg.in/alexcesaro/statsd.v2" - "github.com/facebookgo/inject" "github.com/facebookgo/startstop" + libhoney "github.com/honeycombio/libhoney-go" + "github.com/honeycombio/libhoney-go/transmission" flag "github.com/jessevdk/go-flags" "github.com/sirupsen/logrus" @@ -84,7 +82,7 @@ func main() { // get desired implementation for each dependency to inject lgr := logger.GetLoggerImplementation(c) collector := collect.GetCollectorImplementation(c) - metricsr := metrics.GetMetricsImplementation(c) + metricsConfig := metrics.GetMetricsImplementation(c, "") shrdr := sharder.GetSharderImplementation(c) samplerFactory := &sample.SamplerFactory{} @@ -117,8 +115,8 @@ func main() { TLSHandshakeTimeout: 1200 * time.Millisecond, } - sdUpstream, _ := statsd.New(statsd.Prefix("refinery.upstream")) - sdPeer, _ := statsd.New(statsd.Prefix("refinery.peer")) + upstreamMetricsConfig := metrics.GetMetricsImplementation(c, "libhoney_upstream") + peerMetricsConfig := metrics.GetMetricsImplementation(c, "libhoney_peer") userAgentAddition := "refinery/" + version upstreamClient, err := libhoney.NewClient(libhoney.ClientConfig{ @@ -131,7 +129,7 @@ func main() { Transport: upstreamTransport, BlockOnSend: true, EnableMsgpackEncoding: true, - Metrics: sdUpstream, + Metrics: upstreamMetricsConfig, }, }) if err != nil { @@ -149,7 +147,7 @@ func main() { Transport: peerTransport, DisableCompression: !c.GetCompressPeerCommunication(), EnableMsgpackEncoding: true, - Metrics: sdPeer, + Metrics: peerMetricsConfig, }, }) if err != nil { @@ -168,7 +166,9 @@ func main() { &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: peerClient, Name: "peer_"}, Name: "peerTransmission"}, &inject.Object{Value: shrdr}, &inject.Object{Value: collector}, - &inject.Object{Value: metricsr}, + &inject.Object{Value: metricsConfig, Name: "metrics"}, + &inject.Object{Value: upstreamMetricsConfig, Name: "upstreamMetrics"}, + &inject.Object{Value: peerMetricsConfig, Name: "peerMetrics"}, &inject.Object{Value: version, Name: "version"}, &inject.Object{Value: samplerFactory}, &inject.Object{Value: &a}, diff --git a/collect/cache/cache.go b/collect/cache/cache.go index 91a457e0ae..69bf691b12 100644 --- a/collect/cache/cache.go +++ b/collect/cache/cache.go @@ -102,7 +102,7 @@ func (d *DefaultInMemCache) Set(trace *types.Trace) *types.Trace { if !oldTrace.Sent { // if it hasn't already been sent, // record that we're overrunning the buffer - d.Metrics.IncrementCounter("collect_cache_buffer_overrun") + d.Metrics.Increment("collect_cache_buffer_overrun") // and return the trace so it can be sent. retTrace = oldTrace } diff --git a/collect/collect.go b/collect/collect.go index c505bbe711..503fbc4401 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -318,14 +318,14 @@ func (i *InMemCollector) processSpan(sp *types.Span) { // if the trace has already been sent, just pass along the span if sentRecord, found := i.sentTraceCache.Get(sp.TraceID); found { if sr, ok := sentRecord.(*traceSentRecord); ok { - i.Metrics.IncrementCounter("trace_sent_cache_hit") + i.Metrics.Increment("trace_sent_cache_hit") i.dealWithSentTrace(sr.keep, sr.rate, sp) return } } // trace hasn't already been sent (or this span is really old); let's // create a new trace to hold it - i.Metrics.IncrementCounter("trace_accepted") + i.Metrics.Increment("trace_accepted") timeout, err := i.Config.GetTraceTimeout() if err != nil { @@ -420,9 +420,9 @@ func (i *InMemCollector) send(trace *types.Trace) { i.Metrics.Histogram("trace_duration_ms", float64(traceDur.Milliseconds())) i.Metrics.Histogram("trace_span_count", float64(len(trace.GetSpans()))) if trace.HasRootSpan { - i.Metrics.IncrementCounter("trace_send_has_root") + i.Metrics.Increment("trace_send_has_root") } else { - i.Metrics.IncrementCounter("trace_send_no_root") + i.Metrics.Increment("trace_send_no_root") } var sampler sample.Sampler @@ -448,11 +448,11 @@ func (i *InMemCollector) send(trace *types.Trace) { // if we're supposed to drop this trace, and dry run mode is not enabled, then we're done. if !shouldSend && !i.Config.GetIsDryRun() { - i.Metrics.IncrementCounter("trace_send_dropped") + i.Metrics.Increment("trace_send_dropped") i.Logger.Info().WithString("trace_id", trace.TraceID).WithString("dataset", trace.Dataset).Logf("Dropping trace because of sampling, trace to dataset") return } - i.Metrics.IncrementCounter("trace_send_kept") + i.Metrics.Increment("trace_send_kept") // ok, we're not dropping this trace; send all the spans if i.Config.GetIsDryRun() && !shouldSend { diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go index 3e89d635ab..de226a0f7a 100644 --- a/metrics/honeycomb.go +++ b/metrics/honeycomb.go @@ -38,6 +38,8 @@ type HoneycombMetrics struct { //reportingFreq is the interval with which to report statistics reportingFreq int64 reportingCancelFunc func() + + prefix string } type counter struct { @@ -229,7 +231,7 @@ func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) { h.countersLock.Lock() for _, count := range h.counters { count.lock.Lock() - ev.AddField(count.name, count.val) + ev.AddField(PrefixMetricName(h.prefix, count.name), count.val) count.val = 0 count.lock.Unlock() } @@ -238,7 +240,7 @@ func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) { h.gaugesLock.Lock() for _, gauge := range h.gauges { gauge.lock.Lock() - ev.AddField(gauge.name, gauge.val) + ev.AddField(PrefixMetricName(h.prefix, gauge.name), gauge.val) // gauges should remain where they are until changed // gauge.val = 0 gauge.lock.Unlock() @@ -253,12 +255,12 @@ func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) { p50Index := int(math.Floor(float64(len(histogram.vals)) * 0.5)) p95Index := int(math.Floor(float64(len(histogram.vals)) * 0.95)) p99Index := int(math.Floor(float64(len(histogram.vals)) * 0.99)) - ev.AddField(histogram.name+"_p50", histogram.vals[p50Index]) - ev.AddField(histogram.name+"_p95", histogram.vals[p95Index]) - ev.AddField(histogram.name+"_p99", histogram.vals[p99Index]) - ev.AddField(histogram.name+"_min", histogram.vals[0]) - ev.AddField(histogram.name+"_max", histogram.vals[len(histogram.vals)-1]) - ev.AddField(histogram.name+"_avg", average(histogram.vals)) + ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_p50", histogram.vals[p50Index]) + ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_p95", histogram.vals[p95Index]) + ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_p99", histogram.vals[p99Index]) + ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_min", histogram.vals[0]) + ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_max", histogram.vals[len(histogram.vals)-1]) + ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_avg", average(histogram.vals)) histogram.vals = histogram.vals[:0] } histogram.lock.Unlock() @@ -317,7 +319,7 @@ func (h *HoneycombMetrics) Register(name string, metricType string) { } } -func (h *HoneycombMetrics) IncrementCounter(name string) { +func (h *HoneycombMetrics) Count(name string, n interface{}) { count, ok := h.counters[name] if !ok { h.Register(name, "counter") @@ -325,10 +327,14 @@ func (h *HoneycombMetrics) IncrementCounter(name string) { } count.lock.Lock() defer count.lock.Unlock() - count.val++ + count.val = count.val + int(ConvertNumeric(n)) +} + +func (h *HoneycombMetrics) Increment(name string) { + h.Count(name, 1) } -func (h *HoneycombMetrics) Gauge(name string, val float64) { +func (h *HoneycombMetrics) Gauge(name string, val interface{}) { gauge, ok := h.gauges[name] if !ok { h.Register(name, "gauge") @@ -336,10 +342,10 @@ func (h *HoneycombMetrics) Gauge(name string, val float64) { } gauge.lock.Lock() defer gauge.lock.Unlock() - gauge.val = val + gauge.val = ConvertNumeric(val) } -func (h *HoneycombMetrics) Histogram(name string, obs float64) { +func (h *HoneycombMetrics) Histogram(name string, obs interface{}) { histogram, ok := h.histograms[name] if !ok { h.Register(name, "histogram") @@ -347,5 +353,5 @@ func (h *HoneycombMetrics) Histogram(name string, obs float64) { } histogram.lock.Lock() defer histogram.lock.Unlock() - histogram.vals = append(histogram.vals, obs) + histogram.vals = append(histogram.vals, ConvertNumeric(obs)) } diff --git a/metrics/metrics.go b/metrics/metrics.go index e40d02837b..4d41d63807 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -10,12 +10,13 @@ import ( type Metrics interface { // Register declares a metric; metricType should be one of counter, gauge, histogram Register(name string, metricType string) - IncrementCounter(name string) - Gauge(name string, val float64) - Histogram(name string, obs float64) + Increment(name string) + Gauge(name string, val interface{}) + Count(name string, n interface{}) + Histogram(name string, obs interface{}) } -func GetMetricsImplementation(c config.Config) Metrics { +func GetMetricsImplementation(c config.Config, prefix string) Metrics { var metricsr Metrics metricsType, err := c.GetMetricsType() if err != nil { @@ -24,12 +25,50 @@ func GetMetricsImplementation(c config.Config) Metrics { } switch metricsType { case "honeycomb": - metricsr = &HoneycombMetrics{} + metricsr = &HoneycombMetrics{prefix: prefix} case "prometheus": - metricsr = &PromMetrics{} + metricsr = &PromMetrics{prefix: prefix} default: fmt.Printf("unknown metrics type %s. Exiting.\n", metricsType) os.Exit(1) } return metricsr } + +func ConvertNumeric(val interface{}) float64 { + switch n := val.(type) { + case int: + return float64(n) + case uint: + return float64(n) + case int64: + return float64(n) + case uint64: + return float64(n) + case int32: + return float64(n) + case uint32: + return float64(n) + case int16: + return float64(n) + case uint16: + return float64(n) + case int8: + return float64(n) + case uint8: + return float64(n) + case float64: + return n + case float32: + return float64(n) + default: + return 0 + } +} + +func PrefixMetricName(prefix string, name string) string { + if prefix != "" { + return fmt.Sprintf(`%s_%s`, prefix, name) + } + return name +} diff --git a/metrics/mock.go b/metrics/mock.go index 496a319585..a2d6e9dbb6 100644 --- a/metrics/mock.go +++ b/metrics/mock.go @@ -27,19 +27,25 @@ func (m *MockMetrics) Register(name string, metricType string) { m.Registrations[name] = metricType } -func (m *MockMetrics) IncrementCounter(name string) { +func (m *MockMetrics) Increment(name string) { m.lock.Lock() defer m.lock.Unlock() m.CounterIncrements[name] += 1 } -func (m *MockMetrics) Gauge(name string, val float64) { +func (m *MockMetrics) Gauge(name string, val interface{}) { m.lock.Lock() defer m.lock.Unlock() - m.GaugeRecords[name] = val + m.GaugeRecords[name] = ConvertNumeric(val) } -func (m *MockMetrics) Histogram(name string, obs float64) { +func (m *MockMetrics) Count(name string, val interface{}) { + m.lock.Lock() + defer m.lock.Unlock() + + m.CounterIncrements[name] += int(ConvertNumeric(val)) +} +func (m *MockMetrics) Histogram(name string, val interface{}) { m.lock.Lock() defer m.lock.Unlock() @@ -47,5 +53,5 @@ func (m *MockMetrics) Histogram(name string, obs float64) { if !ok { m.Histograms[name] = make([]float64, 0) } - m.Histograms[name] = append(m.Histograms[name], obs) + m.Histograms[name] = append(m.Histograms[name], ConvertNumeric(val)) } diff --git a/metrics/nullmetrics.go b/metrics/nullmetrics.go index cc0985076a..18d7eb5099 100644 --- a/metrics/nullmetrics.go +++ b/metrics/nullmetrics.go @@ -7,6 +7,7 @@ type NullMetrics struct{} func (n *NullMetrics) Start() {} func (n *NullMetrics) Register(name string, metricType string) {} -func (n *NullMetrics) IncrementCounter(name string) {} -func (n *NullMetrics) Gauge(name string, val float64) {} -func (n *NullMetrics) Histogram(name string, obs float64) {} +func (n *NullMetrics) Increment(name string) {} +func (n *NullMetrics) Gauge(name string, val interface{}) {} +func (n *NullMetrics) Count(name string, val interface{}) {} +func (n *NullMetrics) Histogram(name string, obs interface{}) {} diff --git a/metrics/prometheus.go b/metrics/prometheus.go index 88fff63425..24c3df836f 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -20,6 +20,8 @@ type PromMetrics struct { // them by name metrics map[string]interface{} lock sync.Mutex + + prefix string } func (p *PromMetrics) Start() error { @@ -56,16 +58,19 @@ func (p *PromMetrics) Register(name string, metricType string) { case "counter": newmet = promauto.NewCounter(prometheus.CounterOpts{ Name: name, + Namespace: p.prefix, Help: name, }) case "gauge": newmet = promauto.NewGauge(prometheus.GaugeOpts{ Name: name, + Namespace: p.prefix, Help: name, }) case "histogram": newmet = promauto.NewHistogram(prometheus.HistogramOpts{ Name: name, + Namespace: p.prefix, Help: name, }) } @@ -73,24 +78,31 @@ func (p *PromMetrics) Register(name string, metricType string) { p.metrics[name] = newmet } -func (p *PromMetrics) IncrementCounter(name string) { +func (p *PromMetrics) Increment(name string) { if counterIface, ok := p.metrics[name]; ok { if counter, ok := counterIface.(prometheus.Counter); ok { counter.Inc() } } } -func (p *PromMetrics) Gauge(name string, val float64) { +func (p *PromMetrics) Count(name string, n interface{}) { + if counterIface, ok := p.metrics[name]; ok { + if counter, ok := counterIface.(prometheus.Counter); ok { + counter.Add(ConvertNumeric(n)) + } + } +} +func (p *PromMetrics) Gauge(name string, val interface{}) { if gaugeIface, ok := p.metrics[name]; ok { if gauge, ok := gaugeIface.(prometheus.Gauge); ok { - gauge.Set(val) + gauge.Set(ConvertNumeric(val)) } } } -func (p *PromMetrics) Histogram(name string, obs float64) { +func (p *PromMetrics) Histogram(name string, obs interface{}) { if histIface, ok := p.metrics[name]; ok { if hist, ok := histIface.(prometheus.Histogram); ok { - hist.Observe(obs) + hist.Observe(ConvertNumeric(obs)) } } } diff --git a/route/proxy.go b/route/proxy.go index 7916a4f771..354acd2571 100644 --- a/route/proxy.go +++ b/route/proxy.go @@ -12,7 +12,7 @@ import ( // response, blocking until it gets one. This is used for all non-event traffic // (eg team api key verification, markers, etc.) func (r *Router) proxy(w http.ResponseWriter, req *http.Request) { - r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_proxied") + r.Metrics.Increment(r.incomingOrPeer + "_router_proxied") r.Logger.Debug().Logf("proxying request for %s", req.URL.Path) upstreamTarget, err := r.Config.GetHoneycombAPI() if err != nil { diff --git a/route/route.go b/route/route.go index 7885d4a17f..df79743fd8 100644 --- a/route/route.go +++ b/route/route.go @@ -257,7 +257,7 @@ func (r *Router) debugTrace(w http.ResponseWriter, req *http.Request) { // event is handler for /1/event/ func (r *Router) event(w http.ResponseWriter, req *http.Request) { - r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_event") + r.Metrics.Increment(r.incomingOrPeer + "_router_event") defer req.Body.Close() bodyReader, err := r.getMaybeCompressedBody(req) @@ -322,7 +322,7 @@ func (r *Router) requestToEvent(req *http.Request, reqBod []byte) (*types.Event, } func (r *Router) batch(w http.ResponseWriter, req *http.Request) { - r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_batch") + r.Metrics.Increment(r.incomingOrPeer + "_router_batch") defer req.Body.Close() reqID := req.Context().Value(types.RequestIDContextKey{}) @@ -514,7 +514,7 @@ func (r *Router) processEvent(ev *types.Event, reqID interface{}) error { } if traceID == "" { // not part of a trace. send along upstream - r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_nonspan") + r.Metrics.Increment(r.incomingOrPeer + "_router_nonspan") debugLog.WithString("api_host", ev.APIHost). WithString("dataset", ev.Dataset). Logf("sending non-trace event from batch") @@ -526,7 +526,7 @@ func (r *Router) processEvent(ev *types.Event, reqID interface{}) error { // ok, we're a span. Figure out if we should handle locally or pass on to a peer targetShard := r.Sharder.WhichShard(traceID) if r.incomingOrPeer == "incoming" && !targetShard.Equals(r.Sharder.MyShard()) { - r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_peer") + r.Metrics.Increment(r.incomingOrPeer + "_router_peer") debugLog.WithString("peer", targetShard.GetAddress()). Logf("Sending span from batch to my peer") ev.APIHost = targetShard.GetAddress() @@ -550,12 +550,12 @@ func (r *Router) processEvent(ev *types.Event, reqID interface{}) error { err = r.Collector.AddSpanFromPeer(span) } if err != nil { - r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_dropped") + r.Metrics.Increment(r.incomingOrPeer + "_router_dropped") debugLog.Logf("Dropping span from batch, channel full") return err } - r.Metrics.IncrementCounter(r.incomingOrPeer + "_router_span") + r.Metrics.Increment(r.incomingOrPeer + "_router_span") debugLog.Logf("Accepting span from batch for collection into a trace") return nil } diff --git a/sample/dynamic.go b/sample/dynamic.go index 1192ae3132..eda9e3d3ec 100644 --- a/sample/dynamic.go +++ b/sample/dynamic.go @@ -64,9 +64,9 @@ func (d *DynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) { "trace_id": trace.TraceID, }).Logf("got sample rate and decision") if shouldKeep { - d.Metrics.IncrementCounter("dynsampler_num_kept") + d.Metrics.Increment("dynsampler_num_kept") } else { - d.Metrics.IncrementCounter("dynsampler_num_dropped") + d.Metrics.Increment("dynsampler_num_dropped") } d.Metrics.Histogram("dynsampler_sample_rate", float64(rate)) return uint(rate), shouldKeep diff --git a/sample/dynamic_ema.go b/sample/dynamic_ema.go index f36ac5bcd3..1fd44d8b21 100644 --- a/sample/dynamic_ema.go +++ b/sample/dynamic_ema.go @@ -76,9 +76,9 @@ func (d *EMADynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) { "trace_id": trace.TraceID, }).Logf("got sample rate and decision") if shouldKeep { - d.Metrics.IncrementCounter("dynsampler_num_kept") + d.Metrics.Increment("dynsampler_num_kept") } else { - d.Metrics.IncrementCounter("dynsampler_num_dropped") + d.Metrics.Increment("dynsampler_num_dropped") } d.Metrics.Histogram("dynsampler_sample_rate", float64(rate)) return uint(rate), shouldKeep diff --git a/sample/rules.go b/sample/rules.go index 90000f03f7..265565be4b 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -41,9 +41,9 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b if rule.Condition == nil { s.Metrics.Histogram("rulessampler_sample_rate", float64(rule.SampleRate)) if keep { - s.Metrics.IncrementCounter("rulessampler_num_kept") + s.Metrics.Increment("rulessampler_num_kept") } else { - s.Metrics.IncrementCounter("dynsampler_num_dropped") + s.Metrics.Increment("dynsampler_num_dropped") } logger.WithFields(map[string]interface{}{ "rate": rate, @@ -122,9 +122,9 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b if matched == len(rule.Condition) { s.Metrics.Histogram("rulessampler_sample_rate", float64(rule.SampleRate)) if keep { - s.Metrics.IncrementCounter("rulessampler_num_kept") + s.Metrics.Increment("rulessampler_num_kept") } else { - s.Metrics.IncrementCounter("dynsampler_num_dropped") + s.Metrics.Increment("dynsampler_num_dropped") } logger.WithFields(map[string]interface{}{ "rate": rate, diff --git a/sample/totalthroughput.go b/sample/totalthroughput.go index 3752f34d25..d31ca68b81 100644 --- a/sample/totalthroughput.go +++ b/sample/totalthroughput.go @@ -67,9 +67,9 @@ func (d *TotalThroughputSampler) GetSampleRate(trace *types.Trace) (uint, bool) "trace_id": trace.TraceID, }).Logf("got sample rate and decision") if shouldKeep { - d.Metrics.IncrementCounter("dynsampler_num_kept") + d.Metrics.Increment("dynsampler_num_kept") } else { - d.Metrics.IncrementCounter("dynsampler_num_dropped") + d.Metrics.Increment("dynsampler_num_dropped") } d.Metrics.Histogram("dynsampler_sample_rate", float64(rate)) return uint(rate), shouldKeep diff --git a/transmit/transmit.go b/transmit/transmit.go index 8bf87ae29f..ab811ed1e0 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -105,7 +105,7 @@ func (d *DefaultTransmission) EnqueueEvent(ev *types.Event) { err := libhEv.SendPresampled() if err != nil { - d.Metrics.IncrementCounter(d.Name + counterEnqueueErrors) + d.Metrics.Increment(d.Name + counterEnqueueErrors) d.Logger.Error(). WithString("error", err.Error()). WithField("request_id", ev.Context.Value(types.RequestIDContextKey{})). @@ -164,13 +164,13 @@ func (d *DefaultTransmission) processResponses( if honeycombAPI == apiHost { // if the API host matches the configured honeycomb API, // count it as an API error - d.Metrics.IncrementCounter(d.Name + counterResponseErrorsAPI) + d.Metrics.Increment(d.Name + counterResponseErrorsAPI) } else { // otherwise, it's probably a peer error - d.Metrics.IncrementCounter(d.Name + counterResponseErrorsPeer) + d.Metrics.Increment(d.Name + counterResponseErrorsPeer) } } else { - d.Metrics.IncrementCounter(d.Name + counterResponse20x) + d.Metrics.Increment(d.Name + counterResponse20x) } case <-ctx.Done(): return From 129ab32b2641e22ec1a4d29f01da12f46f10d9bc Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Wed, 24 Mar 2021 06:07:28 -0600 Subject: [PATCH 020/351] Add dependabot (#226) --- .github/dependabot.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..caf9e4d020 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,15 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "gomod" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" + labels: + - "type: maintenance" + reviewers: + - "honeycombio/integrations-team" From 8b1595de4ead60d268d2a8e1795aa3354d462144 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 24 Mar 2021 13:19:01 +0000 Subject: [PATCH 021/351] Log sampler config and validation errors (#228) When loading sampler rules from file based config, it can be difficult to see what rules have been set or any validation errors. This PR logs the sampler config in Debug level and also any unmarshalling / validation errors in Warn level. --- config/file_config.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/config/file_config.go b/config/file_config.go index af9d1054a5..1bffdebbb4 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -11,6 +11,7 @@ import ( "github.com/fsnotify/fsnotify" "github.com/go-playground/validator" libhoney "github.com/honeycombio/libhoney-go" + "github.com/sirupsen/logrus" viper "github.com/spf13/viper" ) @@ -287,6 +288,8 @@ func (f *fileConfig) validateConditionalConfigs() error { } func (f *fileConfig) validateSamplerConfigs() error { + logrus.Debugf("Sampler rules config: %+v", f.rules) + keys := f.rules.AllKeys() for _, key := range keys { parts := strings.Split(key, ".") @@ -307,16 +310,16 @@ func (f *fileConfig) validateSamplerConfigs() error { case "TotalThroughputSampler": i = &TotalThroughputSamplerConfig{} default: - return errors.New("Invalid or missing default sampler type") + return fmt.Errorf("Invalid or missing default sampler type: %s", t) } err := f.rules.Unmarshal(i) if err != nil { - return err + return fmt.Errorf("Failed to unmarshal sampler rule: %w", err) } v := validator.New() err = v.Struct(i) if err != nil { - return err + return fmt.Errorf("Failed to validate sampler rule: %w", err) } } @@ -336,18 +339,18 @@ func (f *fileConfig) validateSamplerConfigs() error { case "TotalThroughputSampler": i = &TotalThroughputSamplerConfig{} default: - return errors.New("Invalid or missing dataset sampler type") + return fmt.Errorf("Invalid or missing dataset sampler type: %s", t) } datasetName := parts[0] if sub := f.rules.Sub(datasetName); sub != nil { err := sub.Unmarshal(i) if err != nil { - return err + return fmt.Errorf("Failed to unmarshal dataset sampler rule: %w", err) } v := validator.New() err = v.Struct(i) if err != nil { - return err + return fmt.Errorf("Failed to validate dataset sampler rule: %w", err) } } } From f877519f677a98384fe1bbc8f473fecd11a7c776 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 24 Mar 2021 15:47:17 +0000 Subject: [PATCH 022/351] Prepare v1.1.0 release (#234) Adds changelog for v1.10 and v1.1.0 --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..34ab63e824 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,23 @@ +# Refinery Changelog + +## 1.1.0 + +### Improvements + +- Add support environment variables for API keys (#221) +- Removes whitelist terminology (#222) +- Log sampler config and validation errors (#228) + +### Fixes + +- Pass along upstream and peer metrics configs to libhoney (#227) +- Guard against nil pointer dereference when processing OTLP span.Status (#223) +- Fix YAML config parsing (#220) + +### Maintenance + +- Add test for OTLP handler, including spans with no status (#225) + +## 1.0.0 + +Initial GA release of Refinery From 900035343ff4690ceb053e2f0e9a18b34b11fb77 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Thu, 25 Mar 2021 17:48:17 +0000 Subject: [PATCH 023/351] Add metrics qualifiers for structs that inject metrics (#237) * add metrics qualifiers for structs that inject metrics * add tests for verifying named metrics dependencies --- app/app.go | 2 +- app/app_test.go | 2 +- collect/collect.go | 2 +- collect/collect_test.go | 19 +++++++++++++++++++ route/route.go | 2 +- route/route_test.go | 39 ++++++++++++++++++++++++++++++++------- sample/sample.go | 2 +- sample/sample_test.go | 26 ++++++++++++++++++++++++++ transmit/transmit.go | 2 +- transmit/transmit_test.go | 19 +++++++++++++++++++ 10 files changed, 102 insertions(+), 13 deletions(-) diff --git a/app/app.go b/app/app.go index d6731d14e3..3109a1ba60 100644 --- a/app/app.go +++ b/app/app.go @@ -14,7 +14,7 @@ type App struct { IncomingRouter route.Router `inject:"inline"` PeerRouter route.Router `inject:"inline"` Collector collect.Collector `inject:""` - Metrics metrics.Metrics `inject:""` + Metrics metrics.Metrics `inject:"metrics"` // Version is the build ID for Refinery so that the running process may answer // requests for the version diff --git a/app/app_test.go b/app/app_test.go index eb89bb6ac2..e6cd45ae51 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -185,7 +185,7 @@ func newStartedApp( &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: peerClient, Name: "peer_"}, Name: "peerTransmission"}, &inject.Object{Value: shrdr}, &inject.Object{Value: collector}, - &inject.Object{Value: metricsr}, + &inject.Object{Value: metricsr, Name: "metrics"}, &inject.Object{Value: "test", Name: "version"}, &inject.Object{Value: samplerFactory}, &inject.Object{Value: &a}, diff --git a/collect/collect.go b/collect/collect.go index 503fbc4401..b4cffd3e52 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -51,7 +51,7 @@ type InMemCollector struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` Transmission transmit.Transmission `inject:"upstreamTransmission"` - Metrics metrics.Metrics `inject:""` + Metrics metrics.Metrics `inject:"metrics"` SamplerFactory *sample.SamplerFactory `inject:""` // For test use only diff --git a/collect/collect_test.go b/collect/collect_test.go index bda6a84313..2d912d1056 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/facebookgo/inject" lru "github.com/hashicorp/golang-lru" "github.com/stretchr/testify/assert" @@ -569,3 +570,21 @@ func TestAddSpanNoBlock(t *testing.T) { err = coll.AddSpanFromPeer(span) assert.Error(t, err) } + +func TestDependencyInjection(t *testing.T) { + var g inject.Graph + err := g.Provide( + &inject.Object{Value: &InMemCollector{}}, + &inject.Object{Value: &config.MockConfig{}}, + &inject.Object{Value: &logger.NullLogger{}}, + &inject.Object{Value: &transmit.MockTransmission{}, Name: "upstreamTransmission"}, + &inject.Object{Value: &metrics.NullMetrics{}, Name: "metrics"}, + &inject.Object{Value: &sample.SamplerFactory{}}, + ) + if err != nil { + t.Error(err) + } + if err := g.Populate(); err != nil { + t.Error(err) + } +} diff --git a/route/route.go b/route/route.go index df79743fd8..05630cc201 100644 --- a/route/route.go +++ b/route/route.go @@ -62,7 +62,7 @@ type Router struct { PeerTransmission transmit.Transmission `inject:"peerTransmission"` Sharder sharder.Sharder `inject:""` Collector collect.Collector `inject:""` - Metrics metrics.Metrics `inject:""` + Metrics metrics.Metrics `inject:"metrics"` // version is set on startup so that the router may answer HTTP requests for // the version diff --git a/route/route_test.go b/route/route_test.go index 47c43bf6ff..8cb5bebf1f 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -7,13 +7,6 @@ import ( "compress/gzip" "context" "fmt" - "github.com/honeycombio/refinery/config" - collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" - common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" - trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/transmit" "io" "io/ioutil" "math" @@ -23,6 +16,16 @@ import ( "testing" "time" + "github.com/facebookgo/inject" + "github.com/honeycombio/refinery/collect" + "github.com/honeycombio/refinery/config" + collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" + common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" + "github.com/honeycombio/refinery/logger" + "github.com/honeycombio/refinery/metrics" + "github.com/honeycombio/refinery/transmit" + "github.com/gorilla/mux" "github.com/honeycombio/refinery/sharder" "github.com/klauspost/compress/zstd" @@ -441,6 +444,28 @@ func TestOTLPHandler(t *testing.T) { }) } +func TestDependencyInjection(t *testing.T) { + var g inject.Graph + err := g.Provide( + &inject.Object{Value: &Router{}}, + + &inject.Object{Value: &config.MockConfig{}}, + &inject.Object{Value: &logger.NullLogger{}}, + &inject.Object{Value: http.DefaultTransport, Name: "upstreamTransport"}, + &inject.Object{Value: &transmit.MockTransmission{}, Name: "upstreamTransmission"}, + &inject.Object{Value: &transmit.MockTransmission{}, Name: "peerTransmission"}, + &inject.Object{Value: &TestSharder{}}, + &inject.Object{Value: &collect.InMemCollector{}}, + &inject.Object{Value: &metrics.NullMetrics{}, Name: "metrics"}, + ) + if err != nil { + t.Error(err) + } + if err := g.Populate(); err != nil { + t.Error(err) + } +} + func helperOTLPRequestSpansWithoutStatus() []*trace.Span { now := time.Now() return []*trace.Span{ diff --git a/sample/sample.go b/sample/sample.go index f312162f4c..9fb4539cfb 100644 --- a/sample/sample.go +++ b/sample/sample.go @@ -17,7 +17,7 @@ type Sampler interface { type SamplerFactory struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` - Metrics metrics.Metrics `inject:""` + Metrics metrics.Metrics `inject:"metrics"` } // GetSamplerImplementationForDataset returns the sampler implementation for the dataset, diff --git a/sample/sample_test.go b/sample/sample_test.go index 184c9dc239..53fad51b40 100644 --- a/sample/sample_test.go +++ b/sample/sample_test.go @@ -1,3 +1,29 @@ // +build all race package sample + +import ( + "testing" + + "github.com/facebookgo/inject" + "github.com/honeycombio/refinery/config" + "github.com/honeycombio/refinery/logger" + "github.com/honeycombio/refinery/metrics" +) + +func TestDependencyInjection(t *testing.T) { + var g inject.Graph + err := g.Provide( + &inject.Object{Value: &SamplerFactory{}}, + + &inject.Object{Value: &config.MockConfig{}}, + &inject.Object{Value: &logger.NullLogger{}}, + &inject.Object{Value: &metrics.NullMetrics{}, Name: "metrics"}, + ) + if err != nil { + t.Error(err) + } + if err := g.Populate(); err != nil { + t.Error(err) + } +} diff --git a/transmit/transmit.go b/transmit/transmit.go index ab811ed1e0..aa4f7667f7 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -31,7 +31,7 @@ const ( type DefaultTransmission struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` - Metrics metrics.Metrics `inject:""` + Metrics metrics.Metrics `inject:"metrics"` Version string `inject:"version"` LibhClient *libhoney.Client diff --git a/transmit/transmit_test.go b/transmit/transmit_test.go index 3d35fc2303..efb0a954b0 100644 --- a/transmit/transmit_test.go +++ b/transmit/transmit_test.go @@ -5,6 +5,7 @@ package transmit import ( "testing" + "github.com/facebookgo/inject" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/logger" "github.com/honeycombio/refinery/metrics" @@ -27,3 +28,21 @@ func TestDefaultTransmissionUpdatesUserAgentAdditionAfterStart(t *testing.T) { assert.Nil(t, err) assert.Equal(t, libhoney.UserAgentAddition, "refinery/test") } + +func TestDependencyInjection(t *testing.T) { + var g inject.Graph + err := g.Provide( + &inject.Object{Value: &DefaultTransmission{}}, + + &inject.Object{Value: &config.MockConfig{}}, + &inject.Object{Value: &logger.NullLogger{}}, + &inject.Object{Value: &metrics.NullMetrics{}, Name: "metrics"}, + &inject.Object{Value: "test", Name: "version"}, + ) + if err != nil { + t.Error(err) + } + if err := g.Populate(); err != nil { + t.Error(err) + } +} From 1f5d27cc29d858f79c8ace37203b74f4189586a7 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Thu, 25 Mar 2021 12:01:51 -0600 Subject: [PATCH 024/351] Update changelog (#238) --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 34ab63e824..a9e889d834 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Refinery Changelog +## 1.1.1 + +### Fixes + +- Refinery startup issues in v1.1.0 + ## 1.1.0 ### Improvements From 2e237c50942796e84f5529938a26bb7347afae8c Mon Sep 17 00:00:00 2001 From: Joe Date: Mon, 5 Apr 2021 17:04:17 -0400 Subject: [PATCH 025/351] Update README content (#239) Just some minor grammatical corrections from getting familiar with the project. Co-authored-by: Joe Zatkovich --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 326978e17a..394ec9de6c 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ When launched in redis-config mode, Refinery needs a redis host to use for manag - set the `REFINERY_REDIS_HOST` environment variable (and optionally the `REFINERY_REDIS_PASSWORD` environment variable) - set the `RedisHost` field in the config file (and optionally the `RedisPassword` field in the config file) -The redis host should be a hostname and a port, for example `redis.mydomain.com:6379`. The example config file has `localhost:6379` which obviously will not work with more than one host. When TLS is required to connect to the redis instance set the `UseTLS` config to `true`. +The Redis host should be a hostname and a port, for example `redis.mydomain.com:6379`. The example config file has `localhost:6379` which obviously will not work with more than one host. When TLS is required to connect to the Redis instance, set the `UseTLS` config to `true`. By default, a Refinery process will register itself in Redis using its local hostname as its identifier for peer communications. In environments where domain name resolution is slow or unreliable, override the reliance on name lookups by specifying the name of the peering network interface with the `IdentifierInterfaceName` configuration option. @@ -73,7 +73,7 @@ See the [Refinery documentation](https://docs.honeycomb.io/manage-data-volume/re In the configuration file, you can choose from a few sampling methods and specify options for each. The `DynamicSampler` is the most interesting and most commonly used. It uses the `AvgSampleRate` algorithm from the [`dynsampler-go`](https://github.com/honeycombio/dynsampler-go) package. Briefly described, you configure Refinery to examine the trace for a set of fields (for example, `request.status_code` and `request.method`). It collects all the values found in those fields anywhere in the trace (eg "200" and "GET") together into a key it hands to the dynsampler. The dynsampler code will look at the frequency that key appears during the previous 30 seconds (or other value set by the `ClearFrequencySec` setting) and use that to hand back a desired sample rate. More frequent keys are sampled more heavily, so that an even distribution of traffic across the keyspace is represented in Honeycomb. -By selecting fields well, you can drop significant amounts of traffic while still retaining good visibility into the areas of traffic that interest you. For example, if you want to make sure you have a complete list of all URL handlers invoked, you would add the URL (or a normalized form) as one of the fields to include. Be careful in your selection though, because if the combination of fields cretes a unique key each time, you won't sample out any traffic. Because of this it is not effective to use fields that have unique values (like a UUID) as one of the sampling fields. Each field included should ideally have values that appear many times within any given 30 second window in order to effectively turn in to a sample rate. +By selecting fields well, you can drop significant amounts of traffic while still retaining good visibility into the areas of traffic that interest you. For example, if you want to make sure you have a complete list of all URL handlers invoked, you would add the URL (or a normalized form) as one of the fields to include. Be careful in your selection though, because if the combination of fields creates a unique key each time, you won't sample out any traffic. Because of this it is not effective to use fields that have unique values (like a UUID) as one of the sampling fields. Each field included should ideally have values that appear many times within any given 30 second window in order to effectively turn in to a sample rate. For more detail on how this algorithm works, please refer to the `dynsampler` package itself. @@ -85,7 +85,7 @@ When dry run mode is enabled, the metric `trace_send_kept` will increment for ea ## Scaling Up -Refinery uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces exceeds the size of the buffer, things will start to go wrong. If you have stastics configured, a counter named `collect_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the throughput of traces (eg traces / second started) and multiply that by the maximum duration of a trace (say, 3 seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom. +Refinery uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces exceeds the size of the buffer, things will start to go wrong. If you have statistics configured, a counter named `collect_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the throughput of traces (e.g. traces / second started) and multiply that by the maximum duration of a trace (say, 3 seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom. Determining the number of machines necessary in the cluster is not an exact science, and is best influenced by watching for buffer overruns. But for a rough heuristic, count on a single machine using about 2G of memory to handle 5000 incoming events and tracking 500 sub-second traces per second (for each full trace lasting less than a second and an average size of 10 spans per trace). @@ -100,7 +100,7 @@ Refinery emits a number of metrics to give some indication about the health of t ## Troubleshooting -The default logging level of `warn` is almost entirely silent. The `debug` level emits too much data to be used in production, but contains excellent information in a pre-production enviromnent. Setting the logging level to `debug` during initial configuration will help understand what's working and what's not, but when traffic volumes increase it should be set to `warn`. +The default logging level of `warn` is almost entirely silent. The `debug` level emits too much data to be used in production, but contains excellent information in a pre-production environment. Setting the logging level to `debug` during initial configuration will help understand what's working and what's not, but when traffic volumes increase it should be set to `warn`. ## Restarts @@ -124,6 +124,6 @@ Within each directory, the interface the dependency exports is in the file with `sampler` contains algorithms to compute sample rates based on the traces provided. -`sharder` determines which peer in a clustered Refinery config is supposed to handle and individual trace. +`sharder` determines which peer in a clustered Refinery config is supposed to handle an individual trace. `types` contains a few type definitions that are used to hand data in between packages. From 128a715523bf342044fb277512d1bd157c5a2801 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Apr 2021 15:21:08 -0600 Subject: [PATCH 026/351] Bump github.com/hashicorp/golang-lru from 0.5.1 to 0.5.4 (#229) Bumps [github.com/hashicorp/golang-lru](https://github.com/hashicorp/golang-lru) from 0.5.1 to 0.5.4. - [Release notes](https://github.com/hashicorp/golang-lru/releases) - [Commits](https://github.com/hashicorp/golang-lru/compare/v0.5.1...v0.5.4) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 34 ++++------------------------------ 2 files changed, 5 insertions(+), 31 deletions(-) diff --git a/go.mod b/go.mod index e0f94849c6..2db3dc0973 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/golang/protobuf v1.4.3 github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d github.com/grpc-ecosystem/grpc-gateway v1.12.1 - github.com/hashicorp/golang-lru v0.5.1 + github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/libhoney-go v1.12.4 github.com/jessevdk/go-flags v1.4.0 diff --git a/go.sum b/go.sum index 40ad21cbe9..5daa9b468f 100644 --- a/go.sum +++ b/go.sum @@ -24,7 +24,6 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -66,7 +65,6 @@ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691/go.mod h1:sKL github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -94,13 +92,10 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -114,6 +109,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -144,10 +140,10 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -196,7 +192,6 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -210,7 +205,6 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -221,16 +215,13 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -250,19 +241,15 @@ github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIK github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.3.2 h1:GDarE4TJQI52kYSbSAmLiId1Elfj+xgSDqrUZxFhxlU= github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -270,11 +257,8 @@ github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= @@ -292,13 +276,10 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -332,9 +313,7 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -350,19 +329,16 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -370,7 +346,6 @@ golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6 h1:X9xIZ1YU8bLZA3l6gqDUHSFiD golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -396,6 +371,7 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -437,14 +413,12 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= From b956ff321d5513316bc7c34a0b3b76ec62e9ba86 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Apr 2021 16:17:24 -0600 Subject: [PATCH 027/351] Bump github.com/jessevdk/go-flags from 1.4.0 to 1.5.0 (#230) Bumps [github.com/jessevdk/go-flags](https://github.com/jessevdk/go-flags) from 1.4.0 to 1.5.0. - [Release notes](https://github.com/jessevdk/go-flags/releases) - [Commits](https://github.com/jessevdk/go-flags/compare/v1.4.0...v1.5.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 3 +-- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 2db3dc0973..feaed3d48b 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/libhoney-go v1.12.4 - github.com/jessevdk/go-flags v1.4.0 + github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.6 github.com/klauspost/compress v1.10.3 github.com/leodido/go-urn v1.2.0 // indirect @@ -36,7 +36,6 @@ require ( github.com/spf13/viper v1.7.0 github.com/stretchr/testify v1.5.1 github.com/vmihailenco/msgpack/v4 v4.3.11 - golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6 // indirect golang.org/x/text v0.3.3 // indirect google.golang.org/grpc v1.32.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 diff --git a/go.sum b/go.sum index 5daa9b468f..557f7fb510 100644 --- a/go.sum +++ b/go.sum @@ -154,8 +154,8 @@ github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+D github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= github.com/honeycombio/libhoney-go v1.12.4 h1:rWAoxhpvu2briq85wZc04osHgKtueCLAk/3igqTX3+Q= github.com/honeycombio/libhoney-go v1.12.4/go.mod h1:tp2qtK0xMZyG/ZfykkebQESKFS78xpyPr2wEswZ1j6U= -github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -342,8 +342,8 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6 h1:X9xIZ1YU8bLZA3l6gqDUHSFiD0GFI9S548h6C8nDtOY= -golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= From ff487f063fbd63879975b50bfce556b5b4d37109 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Apr 2021 16:39:40 -0600 Subject: [PATCH 028/351] Bump github.com/stretchr/testify from 1.5.1 to 1.7.0 (#231) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.5.1 to 1.7.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.5.1...v1.7.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index feaed3d48b..01dcf72520 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.7.0 - github.com/stretchr/testify v1.5.1 + github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 golang.org/x/text v0.3.3 // indirect google.golang.org/grpc v1.32.0 diff --git a/go.sum b/go.sum index 557f7fb510..2d6bc90789 100644 --- a/go.sum +++ b/go.sum @@ -260,8 +260,9 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -430,6 +431,8 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 3bc828bb35470d3bcae9dde27fc9180a498e0ffc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Apr 2021 16:53:56 -0600 Subject: [PATCH 029/351] Bump github.com/pkg/errors from 0.8.1 to 0.9.1 (#232) Bumps [github.com/pkg/errors](https://github.com/pkg/errors) from 0.8.1 to 0.9.1. - [Release notes](https://github.com/pkg/errors/releases) - [Commits](https://github.com/pkg/errors/compare/v0.8.1...v0.9.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 01dcf72520..9b390b457d 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/leodido/go-urn v1.2.0 // indirect github.com/mitchellh/mapstructure v1.3.3 // indirect github.com/pelletier/go-toml v1.8.0 // indirect - github.com/pkg/errors v0.8.1 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v0.9.3 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.2.0 diff --git a/go.sum b/go.sum index 2d6bc90789..c7c38f310b 100644 --- a/go.sum +++ b/go.sum @@ -206,8 +206,9 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= From dd637e090a4f01c9faf1b2422a64321505808563 Mon Sep 17 00:00:00 2001 From: Kevan Carstensen Date: Mon, 12 Apr 2021 10:30:29 -0700 Subject: [PATCH 030/351] sample: change dynsampler metric names to match rulessampler convention (#236) --- sample/rules.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sample/rules.go b/sample/rules.go index 265565be4b..da9d997297 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -43,7 +43,7 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b if keep { s.Metrics.Increment("rulessampler_num_kept") } else { - s.Metrics.Increment("dynsampler_num_dropped") + s.Metrics.Increment("rulessampler_num_dropped") } logger.WithFields(map[string]interface{}{ "rate": rate, @@ -124,7 +124,7 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b if keep { s.Metrics.Increment("rulessampler_num_kept") } else { - s.Metrics.Increment("dynsampler_num_dropped") + s.Metrics.Increment("rulessampler_num_dropped") } logger.WithFields(map[string]interface{}{ "rate": rate, From 8d583e47e79eefe01c4a5491da1fc89fa3ab3883 Mon Sep 17 00:00:00 2001 From: Paul Osman Date: Tue, 4 May 2021 19:48:34 -0500 Subject: [PATCH 031/351] Verify that sample rate trace field key is specified, if needed. (#248) * Verify that a sample key field is specified, if needed --- config/config_test.go | 41 ++++++++++++++++++++++++++++++++++++++++ config/sampler_config.go | 6 +++--- 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index a4470d3661..4d9cbeec31 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -255,6 +255,46 @@ func TestPeerManagementType(t *testing.T) { } } +func TestAbsentTraceKeyField(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + assert.NoError(t, err) + defer os.RemoveAll(tmpDir) + + configFile, err := ioutil.TempFile(tmpDir, "*.toml") + assert.NoError(t, err) + + rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + assert.NoError(t, err) + + _, err = configFile.Write([]byte(` + [InMemCollector] + CacheCapacity=1000 + + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + `)) + assert.NoError(t, err) + + _, err = rulesFile.Write([]byte(` + [dataset1] + Sampler = "EMADynamicSampler" + GoalSampleRate = 10 + UseTraceLength = true + AddSampleRateKeyToTrace = true + FieldList = "[request.method]" + Weight = 0.4 + `)) + + rulesFile.Close() + + _, err = NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Error:Field validation for 'AddSampleRateKeyToTraceField'") +} + func TestDebugServiceAddr(t *testing.T) { tmpDir, err := ioutil.TempDir("", "") assert.NoError(t, err) @@ -399,6 +439,7 @@ func TestGetSamplerTypes(t *testing.T) { GoalSampleRate = 10 UseTraceLength = true AddSampleRateKeyToTrace = true + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" FieldList = "[request.method]" Weight = 0.3 diff --git a/config/sampler_config.go b/config/sampler_config.go index 170ed932c8..84c3bc299a 100644 --- a/config/sampler_config.go +++ b/config/sampler_config.go @@ -10,7 +10,7 @@ type DynamicSamplerConfig struct { FieldList []string `validate:"required"` UseTraceLength bool AddSampleRateKeyToTrace bool - AddSampleRateKeyToTraceField string + AddSampleRateKeyToTraceField string `validate:"required_with=AddSampleRateKeyToTrace"` } type EMADynamicSamplerConfig struct { @@ -25,7 +25,7 @@ type EMADynamicSamplerConfig struct { FieldList []string `validate:"required"` UseTraceLength bool AddSampleRateKeyToTrace bool - AddSampleRateKeyToTraceField string + AddSampleRateKeyToTraceField string `validate:"required_with=AddSampleRateKeyToTrace"` } type TotalThroughputSamplerConfig struct { @@ -34,5 +34,5 @@ type TotalThroughputSamplerConfig struct { FieldList []string `validate:"required"` UseTraceLength bool AddSampleRateKeyToTrace bool - AddSampleRateKeyToTraceField string + AddSampleRateKeyToTraceField string `validate:"required_with=AddSampleRateKeyToTrace"` } From d2b3c8106ecbc44db93643ebd44429572dd08700 Mon Sep 17 00:00:00 2001 From: Vera Johne Date: Wed, 12 May 2021 12:02:14 -0400 Subject: [PATCH 032/351] Move from garyburd Redigo to supported redigo (#249) * Moving from garyburd to gomodule redigo/redis --- cmd/test_redimem/main.go | 2 +- go.mod | 2 +- go.sum | 4 ++-- internal/peer/redis.go | 2 +- internal/redimem/redimem.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/test_redimem/main.go b/cmd/test_redimem/main.go index 6448d0d196..161f3c153b 100644 --- a/cmd/test_redimem/main.go +++ b/cmd/test_redimem/main.go @@ -10,7 +10,7 @@ import ( "time" "github.com/davecgh/go-spew/spew" - "github.com/garyburd/redigo/redis" + "github.com/gomodule/redigo/redis" "github.com/sirupsen/logrus" "github.com/honeycombio/refinery/internal/redimem" diff --git a/go.mod b/go.mod index 9b390b457d..5a12b39381 100644 --- a/go.mod +++ b/go.mod @@ -9,11 +9,11 @@ require ( github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect github.com/fsnotify/fsnotify v1.4.9 - github.com/garyburd/redigo v1.6.0 github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible github.com/gogo/protobuf v1.3.1 github.com/golang/protobuf v1.4.3 + github.com/gomodule/redigo v1.8.4 github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d github.com/grpc-ecosystem/grpc-gateway v1.12.1 github.com/hashicorp/golang-lru v0.5.4 diff --git a/go.sum b/go.sum index c7c38f310b..5058ca81ff 100644 --- a/go.sum +++ b/go.sum @@ -68,8 +68,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc= -github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -104,6 +102,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/gomodule/redigo v1.8.4 h1:Z5JUg94HMTR1XpwBaSH4vq3+PNSIykBLxMdglbw10gg= +github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 70ba3c3d3b..370cdf3ea5 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -12,7 +12,7 @@ import ( "sync" "time" - "github.com/garyburd/redigo/redis" + "github.com/gomodule/redigo/redis" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/internal/redimem" "github.com/sirupsen/logrus" diff --git a/internal/redimem/redimem.go b/internal/redimem/redimem.go index 600502d0a3..4def176a5a 100644 --- a/internal/redimem/redimem.go +++ b/internal/redimem/redimem.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/garyburd/redigo/redis" + "github.com/gomodule/redigo/redis" "github.com/sirupsen/logrus" ) From d9848f4e5429cac79f0c4ab67106ba828bd17c81 Mon Sep 17 00:00:00 2001 From: Jason Harley Date: Wed, 12 May 2021 15:16:30 -0400 Subject: [PATCH 033/351] Add 'meta.refinery.local_hostname' to all spans (#250) New config option AddHostMetadataToTrace Currently just adding 'meta.refinery.local_hostname' to all spans. The intention with this (and any additional) field is to assist in troubleshooting issues with a single Refinery instance. --- app/app_test.go | 55 +++++++++++++++++++++++++++++++++++++++---- config/config.go | 2 ++ config/config_test.go | 4 ++++ config/file_config.go | 9 +++++++ config/mock.go | 8 +++++++ config_complete.toml | 8 +++++++ transmit/transmit.go | 9 +++++++ 7 files changed, 90 insertions(+), 5 deletions(-) diff --git a/app/app_test.go b/app/app_test.go index e6cd45ae51..22019fcc29 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -12,6 +12,7 @@ import ( "net" "net/http" "net/http/httptest" + "os" "strconv" "strings" "sync" @@ -99,6 +100,7 @@ func newStartedApp( libhoneyT transmission.Sender, basePort int, peers peer.Peers, + enableHostMetadata bool, ) (*App, inject.Graph) { c := &config.MockConfig{ GetSendDelayVal: 0, @@ -113,6 +115,7 @@ func newStartedApp( GetAPIKeysVal: []string{"KEY"}, GetHoneycombAPIVal: "http://api.honeycomb.io", GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{CacheCapacity: 10000}, + AddHostMetadataToTrace: enableHostMetadata, } var err error @@ -215,7 +218,7 @@ func TestAppIntegration(t *testing.T) { t.Parallel() var out bytes.Buffer - _, graph := newStartedApp(t, &transmission.WriterSender{W: &out}, 10000, nil) + _, graph := newStartedApp(t, &transmission.WriterSender{W: &out}, 10000, nil, false) // Send a root span, it should be sent in short order. req := httptest.NewRequest( @@ -267,7 +270,7 @@ func TestPeerRouting(t *testing.T) { var graph inject.Graph basePort := 11000 + (i * 2) senders[i] = &transmission.MockSender{} - apps[i], graph = newStartedApp(t, senders[i], basePort, peers) + apps[i], graph = newStartedApp(t, senders[i], basePort, peers, false) defer startstop.Stop(graph.Objects(), nil) addrs[i] = "localhost:" + strconv.Itoa(basePort) @@ -337,6 +340,48 @@ func TestPeerRouting(t *testing.T) { assert.Equal(t, expectedEvent, senders[0].Events()[0]) } +func TestHostMetadataSpanAdditions(t *testing.T) { + t.Parallel() + + var out bytes.Buffer + _, graph := newStartedApp(t, &transmission.WriterSender{W: &out}, 14000, nil, true) + hostname, _ := os.Hostname() + + // Send a root span, it should be sent in short order. + req := httptest.NewRequest( + "POST", + "http://localhost:14000/1/batch/dataset", + strings.NewReader(`[{"data":{"foo":"bar","trace.trace_id":"1"}}]`), + ) + req.Header.Set("X-Honeycomb-Team", "KEY") + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultTransport.RoundTrip(req) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + resp.Body.Close() + + err = startstop.Stop(graph.Objects(), nil) + assert.NoError(t, err) + + // Wait for span to be sent. + deadline := time.After(time.Second) + for { + if out.Len() > 62 { + break + } + select { + case <-deadline: + t.Error("timed out waiting for output") + return + case <-time.After(time.Millisecond): + } + } + + expectedSpan := `{"data":{"foo":"bar","meta.refinery.local_hostname":"%s","trace.trace_id":"1"},"dataset":"dataset"}` + "\n" + assert.Equal(t, fmt.Sprintf(expectedSpan, hostname), out.String()) +} + func TestEventsEndpoint(t *testing.T) { t.Parallel() @@ -354,7 +399,7 @@ func TestEventsEndpoint(t *testing.T) { var graph inject.Graph basePort := 13000 + (i * 2) senders[i] = &transmission.MockSender{} - apps[i], graph = newStartedApp(t, senders[i], basePort, peers) + apps[i], graph = newStartedApp(t, senders[i], basePort, peers, false) defer startstop.Stop(graph.Objects(), nil) addrs[i] = "localhost:" + strconv.Itoa(basePort) @@ -499,7 +544,7 @@ func BenchmarkTraces(b *testing.B) { W: ioutil.Discard, }, } - _, graph := newStartedApp(b, sender, 11000, nil) + _, graph := newStartedApp(b, sender, 11000, nil, false) req, err := http.NewRequest( "POST", @@ -598,7 +643,7 @@ func BenchmarkDistributedTraces(b *testing.B) { for i := range apps { var graph inject.Graph basePort := 12000 + (i * 2) - apps[i], graph = newStartedApp(b, sender, basePort, peers) + apps[i], graph = newStartedApp(b, sender, basePort, peers, false) defer startstop.Stop(graph.Objects(), nil) addrs[i] = "localhost:" + strconv.Itoa(basePort) diff --git a/config/config.go b/config/config.go index b1ae39df67..d6ed96879f 100644 --- a/config/config.go +++ b/config/config.go @@ -125,4 +125,6 @@ type Config interface { GetIsDryRun() bool GetDryRunFieldName() string + + GetAddHostMetadataToTrace() bool } diff --git a/config/config_test.go b/config/config_test.go index 4d9cbeec31..8a2ac87b32 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -168,6 +168,10 @@ func TestReadDefaults(t *testing.T) { t.Error("received", d, "expected", "refinery_kept") } + if d := c.GetAddHostMetadataToTrace(); d != false { + t.Error("received", d, "expected", false) + } + d, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist") assert.NoError(t, err) assert.IsType(t, &DeterministicSamplerConfig{}, d) diff --git a/config/file_config.go b/config/file_config.go index 1bffdebbb4..60f75de1ca 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -75,6 +75,7 @@ type configContents struct { DryRunFieldName string PeerManagement PeerManagementConfig `validate:"required"` InMemCollector InMemoryCollectorCacheCapacity `validate:"required"` + AddHostMetadataToTrace bool } type InMemoryCollectorCacheCapacity struct { @@ -145,6 +146,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("MaxAlloc", uint64(0)) c.SetDefault("HoneycombLogger.LoggerSamplerEnabled", false) c.SetDefault("HoneycombLogger.LoggerSamplerThroughput", 5) + c.SetDefault("AddHostMetadataToTrace", false) c.SetConfigFile(config) err := c.ReadInConfig() @@ -722,3 +724,10 @@ func (f *fileConfig) GetDryRunFieldName() string { return f.conf.DryRunFieldName } + +func (f *fileConfig) GetAddHostMetadataToTrace() bool { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.AddHostMetadataToTrace +} diff --git a/config/mock.go b/config/mock.go index 5da76a4280..bcd50e086b 100644 --- a/config/mock.go +++ b/config/mock.go @@ -64,6 +64,7 @@ type MockConfig struct { DebugServiceAddr string DryRun bool DryRunFieldName string + AddHostMetadataToTrace bool Mux sync.RWMutex } @@ -288,3 +289,10 @@ func (m *MockConfig) GetDryRunFieldName() string { return m.DryRunFieldName } + +func (m *MockConfig) GetAddHostMetadataToTrace() bool { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.AddHostMetadataToTrace +} diff --git a/config_complete.toml b/config_complete.toml index 983d3340df..67be1efeb0 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -87,6 +87,14 @@ PeerBufferSize = 10000 # The debug service runs on the first open port between localhost:6060 and :6069 by default # DebugServiceAddr = "localhost:8085" +# AddHostMetadataToTrace determines whether or not to add information about +# the host that Refinery is running on to the spans that it processes. +# If enabled, information about the host will be added to each span with the +# prefix `meta.refinery.`. +# Currently the only value added is 'meta.refinery.local_hostname'. +# Not eligible for live reload +AddHostMetadataToTrace = false + ############################ ## Implementation Choices ## ############################ diff --git a/transmit/transmit.go b/transmit/transmit.go index aa4f7667f7..fc6d7d1ab1 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -2,6 +2,7 @@ package transmit import ( "context" + "os" "sync" libhoney "github.com/honeycombio/libhoney-go" @@ -54,6 +55,14 @@ func (d *DefaultTransmission) Start() error { if err != nil { return err } + + if d.Config.GetAddHostMetadataToTrace() { + if hostname, err := os.Hostname(); err == nil && hostname != "" { + // add hostname to spans + d.LibhClient.AddField("meta.refinery.local_hostname", hostname) + } + } + d.builder = d.LibhClient.NewBuilder() d.builder.APIHost = upstreamAPI From 419ec7353a5635829ede0374116a0b8106daedc4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 May 2021 15:03:31 -0600 Subject: [PATCH 034/351] Bump google.golang.org/grpc from 1.32.0 to 1.37.1 (#253) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.32.0 to 1.37.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.32.0...v1.37.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 25 +++++++++++++++++-------- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 5a12b39381..dfaf6f5d11 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 golang.org/x/text v0.3.3 // indirect - google.golang.org/grpc v1.32.0 + google.golang.org/grpc v1.37.1 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.57.0 // indirect diff --git a/go.sum b/go.sum index 5058ca81ff..0850042d59 100644 --- a/go.sum +++ b/go.sum @@ -32,7 +32,7 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -44,7 +44,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= @@ -92,7 +93,6 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= @@ -100,6 +100,8 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/gomodule/redigo v1.8.4 h1:Z5JUg94HMTR1XpwBaSH4vq3+PNSIykBLxMdglbw10gg= @@ -109,12 +111,14 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -395,23 +399,28 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.37.1 h1:ARnQJNWxGyYJpdf/JXscNlQr/uv607ZPU9Z7ogHi+iI= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= From fbe2c159979f00cc1f0d9485bbb732dbe4eec64b Mon Sep 17 00:00:00 2001 From: Ben Fritsch Date: Mon, 17 May 2021 23:14:43 +0200 Subject: [PATCH 035/351] make insecure TLS settings a config option (#254) --- config/config.go | 3 +++ config/file_config.go | 9 +++++++++ config/mock.go | 8 ++++++++ config_complete.toml | 4 ++++ internal/peer/redis.go | 7 ++++++- 5 files changed, 30 insertions(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index d6ed96879f..a24beffd0e 100644 --- a/config/config.go +++ b/config/config.go @@ -53,6 +53,9 @@ type Config interface { // use for peer management. GetUseTLS() (bool, error) + // UseTLSInsecure returns true when certificate checks are disabled + GetUseTLSInsecure() (bool, error) + // GetHoneycombAPI returns the base URL (protocol, hostname, and port) of // the upstream Honeycomb API server GetHoneycombAPI() (string, error) diff --git a/config/file_config.go b/config/file_config.go index 60f75de1ca..536ac5a997 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -112,6 +112,7 @@ type PeerManagementConfig struct { RedisHost string RedisPassword string UseTLS bool + UseTLSInsecure bool IdentifierInterfaceName string UseIPV6Identifier bool RedisIdentifier string @@ -132,6 +133,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerManagement.Peers", []string{"http://127.0.0.1:8081"}) c.SetDefault("PeerManagement.Type", "file") c.SetDefault("PeerManagement.UseTLS", false) + c.SetDefault("PeerManagement.UseTLSInsecure", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) c.SetDefault("HoneycombAPI", "https://api.honeycomb.io") c.SetDefault("Logger", "logrus") @@ -452,6 +454,13 @@ func (f *fileConfig) GetUseTLS() (bool, error) { return f.config.GetBool("PeerManagement.UseTLS"), nil } +func (f *fileConfig) GetUseTLSInsecure() (bool, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.config.GetBool("PeerManagement.UseTLSInsecure"), nil +} + func (f *fileConfig) GetIdentifierInterfaceName() (string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index bcd50e086b..7bf07c2a0a 100644 --- a/config/mock.go +++ b/config/mock.go @@ -42,6 +42,8 @@ type MockConfig struct { GetRedisPasswordVal string GetUseTLSErr error GetUseTLSVal bool + GetUseTLSInsecureErr error + GetUseTLSInsecureVal bool GetSamplerTypeErr error GetSamplerTypeVal interface{} GetMetricsTypeErr error @@ -182,6 +184,12 @@ func (m *MockConfig) GetUseTLS() (bool, error) { return m.GetUseTLSVal, m.GetUseTLSErr } +func (m *MockConfig) GetUseTLSInsecure() (bool, error) { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.GetUseTLSInsecureVal, m.GetUseTLSInsecureErr +} func (m *MockConfig) GetMetricsType() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index 67be1efeb0..70978294a7 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -159,6 +159,10 @@ Metrics = "honeycomb" # Not eligible for live reload. # UseTLS = false +# UseTLSInsecure disables certificate checks +# Not eligible for live reload. +# UseTLSInsecure = false + # IdentifierInterfaceName is optional. By default, when using RedisHost, Refinery will use # the local hostname to identify itself to other peers in Redis. If your environment # requires that you use IPs as identifiers (for example, if peers can't resolve eachother diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 370cdf3ea5..aa88bb06e7 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -177,13 +177,18 @@ func buildOptions(c config.Config) []redis.DialOption { } useTLS, _ := c.GetUseTLS() + tlsInsecure, _ := c.GetUseTLSInsecure() if useTLS { tlsConfig := &tls.Config{ MinVersion: tls.VersionTLS12, } + + if tlsInsecure { + tlsConfig.InsecureSkipVerify = true + } + options = append(options, redis.DialTLSConfig(tlsConfig), - redis.DialTLSSkipVerify(true), redis.DialUseTLS(true)) } From b0dcbbcd9364c17ca36f2fc0d58e6718173ef6f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 May 2021 16:28:47 -0600 Subject: [PATCH 036/351] Bump github.com/prometheus/client_golang from 0.9.3 to 0.9.4 (#240) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 0.9.3 to 0.9.4. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/master/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v0.9.3...v0.9.4) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index dfaf6f5d11..f63a8706b0 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/mitchellh/mapstructure v1.3.3 // indirect github.com/pelletier/go-toml v1.8.0 // indirect github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v0.9.3 + github.com/prometheus/client_golang v0.9.4 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.2.0 github.com/spf13/afero v1.3.2 // indirect diff --git a/go.sum b/go.sum index 0850042d59..18cdb61801 100644 --- a/go.sum +++ b/go.sum @@ -218,18 +218,21 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v0.9.4 h1:Y8E/JaaPbmFSW2V81Ab/d8yZFYQQGbni1b1jPcG9Y6A= +github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= From c72ad6e5e5d5df5d00343240255b311b2a533192 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Wed, 19 May 2021 10:25:26 -0600 Subject: [PATCH 037/351] Remove redundant peer/api suffix from response error metric (#247) * transmission metrics are already prefixed with peer/upstream --- transmit/transmit.go | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/transmit/transmit.go b/transmit/transmit.go index fc6d7d1ab1..fa46711211 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -23,10 +23,9 @@ type Transmission interface { } const ( - counterEnqueueErrors = "enqueue_errors" - counterResponse20x = "response_20x" - counterResponseErrorsAPI = "response_errors_api" - counterResponseErrorsPeer = "response_errors_peer" + counterEnqueueErrors = "enqueue_errors" + counterResponse20x = "response_20x" + counterResponseErrors = "response_errors" ) type DefaultTransmission struct { @@ -72,8 +71,7 @@ func (d *DefaultTransmission) Start() error { d.Metrics.Register(d.Name+counterEnqueueErrors, "counter") d.Metrics.Register(d.Name+counterResponse20x, "counter") - d.Metrics.Register(d.Name+counterResponseErrorsAPI, "counter") - d.Metrics.Register(d.Name+counterResponseErrorsPeer, "counter") + d.Metrics.Register(d.Name+counterResponseErrors, "counter") processCtx, canceler := context.WithCancel(context.Background()) d.responseCanceler = canceler @@ -147,7 +145,6 @@ func (d *DefaultTransmission) processResponses( ctx context.Context, responses chan transmission.Response, ) { - honeycombAPI, _ := d.Config.GetHoneycombAPI() for { select { case r := <-responses: @@ -170,14 +167,7 @@ func (d *DefaultTransmission) processResponses( log = log.WithField("error", r.Err.Error()) } log.Logf("error when sending event") - if honeycombAPI == apiHost { - // if the API host matches the configured honeycomb API, - // count it as an API error - d.Metrics.Increment(d.Name + counterResponseErrorsAPI) - } else { - // otherwise, it's probably a peer error - d.Metrics.Increment(d.Name + counterResponseErrorsPeer) - } + d.Metrics.Increment(d.Name + counterResponseErrors) } else { d.Metrics.Increment(d.Name + counterResponse20x) } From 96abbf5059b9df90a8612e0531f371f073ee3dcb Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Thu, 20 May 2021 10:11:20 -0600 Subject: [PATCH 038/351] Prepare for 1.2.0 release (#257) --- CHANGELOG.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9e889d834..dcb644f96c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Refinery Changelog +## 1.2.0 + +### Added + +- Add `UseTLSInsecure` config option to skip TLS verification with Redis (#254) | [@beanieboi](https://github.com/beanieboi) +- Add `AddHostMetadataToTrace` config option to add Refinery hostname information to spans (#250) | [@jharley](https://github.com/jharley) +- Additional config validation: verify that sample rate trace field key is specified, if needed (#248) | [@paulosman](https://github.com/paulosman) + +### Changed + +- Remove redundant peer/api suffix from response error metrics (#247) | [@vreynolds](https://github.com/vreynolds) + - `api_response_errors_api`, `api_response_errors_peer`, `peer_response_errors_api`, `peer_response_errors_peer` + - replaced by `api_response_errors`, `peer_response_errors` +- Fix rules sampler to emit correct metric (#236) | [@isnotajoke](https://github.com/isnotajoke) + - Previously `dynsampler_num_dropped` was emitted, now `rulessampler_num_dropped` will be emitted + +### Maintenance + +- Update README content (#239) | [@jjziv](https://github.com/jjziv) +- Move from garyburd Redigo to supported redigo (#249) | [@verajohne](https://github.com/verajohne) +- Bump google.golang.org/grpc from 1.32.0 to 1.37.1 (#253) +- Bump github.com/prometheus/client_golang from 0.9.3 to 0.9.4 (#240) +- Bump github.com/pkg/errors from 0.8.1 to 0.9.1 (#232) +- Bump github.com/stretchr/testify from 1.5.1 to 1.7.0 (#231) +- Bump github.com/jessevdk/go-flags from 1.4.0 to 1.5.0 (#230) +- Bump github.com/hashicorp/golang-lru from 0.5.1 to 0.5.4 (#229) + ## 1.1.1 ### Fixes From 439d5a49c1ca162deef02f3247df3f22ec5bdcd2 Mon Sep 17 00:00:00 2001 From: Dean Strelau Date: Fri, 28 May 2021 09:01:40 -0500 Subject: [PATCH 039/351] Propagate span events. Fix #258 (#261) Implements #258 to pass through OTLP span events as regular events with meta.annotation_type: "span_event". --- route/route.go | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/route/route.go b/route/route.go index 05630cc201..a43d9f5de8 100644 --- a/route/route.go +++ b/route/route.go @@ -477,7 +477,8 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ eventAttrs[k] = v } - event := &types.Event{ + events := make([]*types.Event, len(span.Events)+1) + events[0] = &types.Event{ Context: ctx, APIHost: apiHost, APIKey: apiKey, @@ -487,10 +488,44 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ Data: eventAttrs, } - err = r.processEvent(event, requestID) - if err != nil { - r.Logger.Error().Logf("Error processing event: " + err.Error()) + for i, sevent := range span.Events { + timestamp := time.Unix(0, int64(sevent.TimeUnixNano)).UTC() + attrs := map[string]interface{}{ + "trace.trace_id": traceID, + "trace.parent_id": spanID, + "name": sevent.Name, + "parent_name": span.Name, + "meta.annotation_type": "span_event", + } + + if sevent.Attributes != nil { + addAttributesToMap(attrs, sevent.Attributes) + } + sampleRate, err := getSampleRateFromAttributes(attrs) + if err != nil { + debugLog. + WithField("error", err.Error()). + WithField("sampleRate", attrs["sampleRate"]). + Logf("error parsing sampleRate") + } + events[i+1] = &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + Dataset: dataset, + SampleRate: uint(sampleRate), + Timestamp: timestamp, + Data: attrs, + } } + + for _, evt := range events { + err = r.processEvent(evt, requestID) + if err != nil { + r.Logger.Error().Logf("Error processing event: " + err.Error()) + } + } + } } } From d5e79ce99c4d290bae6f7cdfad546026c50f42fc Mon Sep 17 00:00:00 2001 From: Paul Osman Date: Fri, 28 May 2021 17:37:53 -0500 Subject: [PATCH 040/351] Add changelog for v1.2.1 (#262) --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dcb644f96c..1a3e34a683 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Refinery Changelog +## 1.2.1 + +### Fixes + +- OTLP span events are now supported, they were being dropped on the floor previously (#261) | [@dstrelau](https://github.com/dstrelau) + ## 1.2.0 ### Added From e671dfecf263aabe70c0c5a75b1687983a590c55 Mon Sep 17 00:00:00 2001 From: tr-fteixeira <55003893+tr-fteixeira@users.noreply.github.com> Date: Tue, 15 Jun 2021 10:34:51 -0400 Subject: [PATCH 041/351] Add support for "does-not-contain" operator in RulesBasedSampler (#267) --- sample/rules.go | 8 ++++++++ sample/rules_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/sample/rules.go b/sample/rules.go index da9d997297..4f035c3c4a 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -104,6 +104,14 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b match = strings.Contains(a, b) } } + case "does-not-contain": + switch a := value.(type) { + case string: + switch b := condition.Value.(type) { + case string: + match = !strings.Contains(a, b) + } + } } case false: switch condition.Operator { diff --git a/sample/rules_test.go b/sample/rules_test.go index 7add1ae669..2965c70285 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -439,6 +439,34 @@ func TestRules(t *testing.T) { ExpectedKeep: true, ExpectedRate: 4, }, + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "does not contain test", + SampleRate: 4, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "first", + Operator: "does-not-contain", + Value: "noteyco", + }, + }, + }, + }, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "first": "honeycomb", + }, + }, + }, + }, + ExpectedKeep: true, + ExpectedRate: 4, + }, { Rules: &config.RulesBasedSamplerConfig{ Rule: []*config.RulesBasedSamplerRule{ From 965ef56dd8b40bc44d7ff679ceff36622bed7125 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 25 Jun 2021 09:54:03 +0100 Subject: [PATCH 042/351] Ensure span links and events generate events and get resource attrs (#264) When generating events for OTLP span events and links, each sub-event should also get resource attributes added. --- route/middleware.go | 2 +- route/route.go | 49 ++++++++++++++++--- route/route_test.go | 117 ++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 157 insertions(+), 11 deletions(-) diff --git a/route/middleware.go b/route/middleware.go index 8264bc7608..7836433853 100644 --- a/route/middleware.go +++ b/route/middleware.go @@ -45,7 +45,7 @@ func (r *Router) apiKeyChecker(next http.Handler) http.Handler { return } } - err = errors.New(fmt.Sprintf("api key %s not found in list of authed keys", apiKey)) + err = fmt.Errorf("api key %s not found in list of authed keys", apiKey) r.handlerReturnWithError(w, ErrAuthNeeded, err) }) } diff --git a/route/route.go b/route/route.go index a43d9f5de8..437a7c1b19 100644 --- a/route/route.go +++ b/route/route.go @@ -477,8 +477,8 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ eventAttrs[k] = v } - events := make([]*types.Event, len(span.Events)+1) - events[0] = &types.Event{ + events := make([]*types.Event, 0, 1+len(span.Events)+len(span.Links)) + events = append(events, &types.Event{ Context: ctx, APIHost: apiHost, APIKey: apiKey, @@ -486,9 +486,9 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ SampleRate: uint(sampleRate), Timestamp: timestamp, Data: eventAttrs, - } + }) - for i, sevent := range span.Events { + for _, sevent := range span.Events { timestamp := time.Unix(0, int64(sevent.TimeUnixNano)).UTC() attrs := map[string]interface{}{ "trace.trace_id": traceID, @@ -501,6 +501,9 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ if sevent.Attributes != nil { addAttributesToMap(attrs, sevent.Attributes) } + for k, v := range resourceAttrs { + attrs[k] = v + } sampleRate, err := getSampleRateFromAttributes(attrs) if err != nil { debugLog. @@ -508,7 +511,7 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ WithField("sampleRate", attrs["sampleRate"]). Logf("error parsing sampleRate") } - events[i+1] = &types.Event{ + events = append(events, &types.Event{ Context: ctx, APIHost: apiHost, APIKey: apiKey, @@ -516,7 +519,41 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ SampleRate: uint(sampleRate), Timestamp: timestamp, Data: attrs, + }) + } + + for _, slink := range span.Links { + attrs := map[string]interface{}{ + "trace.trace_id": traceID, + "trace.parent_id": spanID, + "trace.link.trace_id": bytesToTraceID(slink.TraceId), + "trace.link.span_id": hex.EncodeToString(slink.SpanId), + "parent_name": span.Name, + "meta.annotation_type": "link", + } + + if slink.Attributes != nil { + addAttributesToMap(attrs, slink.Attributes) + } + for k, v := range resourceAttrs { + attrs[k] = v + } + sampleRate, err := getSampleRateFromAttributes(attrs) + if err != nil { + debugLog. + WithField("error", err.Error()). + WithField("sampleRate", attrs["sampleRate"]). + Logf("error parsing sampleRate") } + events = append(events, &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + Dataset: dataset, + SampleRate: uint(sampleRate), + Timestamp: time.Time{}, //links don't have timestamps, so use empty time + Data: attrs, + }) } for _, evt := range events { @@ -886,7 +923,7 @@ func getSampleRateFromAttributes(attrs map[string]interface{}) (int, error) { sampleRate = int(v) } default: - err = fmt.Errorf("Unrecognised sampleRate datatype - %T", sampleRate) + err = fmt.Errorf("unrecognised sampleRate datatype - %T", sampleRate) sampleRate = defaultSampleRate } // remove sampleRate from event fields diff --git a/route/route_test.go b/route/route_test.go index 8cb5bebf1f..1522d7df59 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -1,11 +1,10 @@ -// +build all race - package route import ( "bytes" "compress/gzip" "context" + "encoding/hex" "fmt" "io" "io/ioutil" @@ -25,6 +24,7 @@ import ( "github.com/honeycombio/refinery/logger" "github.com/honeycombio/refinery/metrics" "github.com/honeycombio/refinery/transmit" + "github.com/stretchr/testify/assert" "github.com/gorilla/mux" "github.com/honeycombio/refinery/sharder" @@ -405,16 +405,29 @@ func TestOTLPHandler(t *testing.T) { mockMetrics := metrics.MockMetrics{} mockMetrics.Start() + mockTransmission := &transmit.MockTransmission{} + mockTransmission.Start() router := &Router{ Config: &config.MockConfig{}, Metrics: &mockMetrics, - UpstreamTransmission: &transmit.MockTransmission{}, + UpstreamTransmission: mockTransmission, iopLogger: iopLogger{ - Logger: &logger.MockLogger{}, + Logger: &logger.NullLogger{}, incomingOrPeer: "incoming", }, } + conf := &config.MockConfig{ + GetSendDelayVal: 0, + GetTraceTimeoutVal: 60 * time.Second, + GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1}, + SendTickerVal: 2 * time.Millisecond, + GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{ + CacheCapacity: 100, + MaxAlloc: 100, + }, + } + t.Run("span with status", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ @@ -427,6 +440,8 @@ func TestOTLPHandler(t *testing.T) { if err != nil { t.Errorf(`Unexpected error: %s`, err) } + assert.Equal(t, 2, len(mockTransmission.Events)) + mockTransmission.Flush() }) t.Run("span without status", func(t *testing.T) { @@ -441,6 +456,100 @@ func TestOTLPHandler(t *testing.T) { if err != nil { t.Errorf(`Unexpected error: %s`, err) } + assert.Equal(t, 2, len(mockTransmission.Events)) + mockTransmission.Flush() + }) + + // TODO: (MG) figuure out how we can test JSON created from OTLP requests + // Below is example, but requires significant usage of collector, sampler, conf, etc + t.Run("creates events for span events", func(t *testing.T) { + t.Skip("need additional work to support inspecting outbound JSON") + + traceID := []byte{0, 0, 0, 0, 1} + spanID := []byte{1, 0, 0, 0, 0} + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: []*trace.Span{{ + TraceId: traceID, + SpanId: spanID, + Name: "span_with_event", + Events: []*trace.Span_Event{{ + TimeUnixNano: 12345, + Name: "span_link", + Attributes: []*common.KeyValue{{ + Key: "event_attr_key", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "event_attr_val"}}, + }}, + }}, + }}, + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + + time.Sleep(conf.SendTickerVal * 2) + + mockTransmission.Mux.Lock() + assert.Equal(t, 2, len(mockTransmission.Events)) + + spanEvent := mockTransmission.Events[0] + // assert.Equal(t, time.Unix(0, int64(12345)).UTC(), spanEvent.Timestamp) + assert.Equal(t, bytesToTraceID(traceID), spanEvent.Data["trace.trace_id"]) + assert.Equal(t, hex.EncodeToString(spanID), spanEvent.Data["trace.span_id"]) + assert.Equal(t, "span_link", spanEvent.Data["span.name"]) + assert.Equal(t, "span_with_event", spanEvent.Data["parent.name"]) + assert.Equal(t, "span_event", spanEvent.Data["meta.annotation_type"]) + assert.Equal(t, "event_attr_key", spanEvent.Data["event_attr_val"]) + mockTransmission.Mux.Unlock() + mockTransmission.Flush() + }) + + t.Run("creates events for span links", func(t *testing.T) { + t.Skip("need additional work to support inspecting outbound JSON") + + traceID := []byte{0, 0, 0, 0, 1} + spanID := []byte{1, 0, 0, 0, 0} + linkTraceID := []byte{0, 0, 0, 0, 2} + linkSpanID := []byte{2, 0, 0, 0, 0} + + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: []*trace.Span{{ + Name: "span_with_link", + TraceId: traceID, + SpanId: spanID, + Links: []*trace.Span_Link{{ + TraceId: traceID, + SpanId: spanID, + TraceState: "link_trace_state", + Attributes: []*common.KeyValue{{ + Key: "link_attr_key", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "link_attr_val"}}, + }}, + }}, + }}, + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + + time.Sleep(conf.SendTickerVal * 2) + assert.Equal(t, 2, len(mockTransmission.Events)) + + spanLink := mockTransmission.Events[1] + assert.Equal(t, bytesToTraceID(traceID), spanLink.Data["trace.trace_id"]) + assert.Equal(t, hex.EncodeToString(spanID), spanLink.Data["trace.span_id"]) + assert.Equal(t, bytesToTraceID(linkTraceID), spanLink.Data["trace.link.trace_id"]) + assert.Equal(t, hex.EncodeToString(linkSpanID), spanLink.Data["trace.link.span_id"]) + assert.Equal(t, "link", spanLink.Data["meta.annotation_type"]) + assert.Equal(t, "link_attr_val", spanLink.Data["link_attr_key"]) + mockTransmission.Flush() }) } From 4ed7aff1c44518accbab31b20a6380081c5baed3 Mon Sep 17 00:00:00 2001 From: Ben Darfler Date: Tue, 29 Jun 2021 13:26:45 -0400 Subject: [PATCH 043/351] Switches CODEOWNERS to telemetry-team (#276) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8dcecb488f..bc0633463f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @honeycombio/integrations-team +* @honeycombio/telemetry-team From a23283c5ab599c494915a577cf02eefff51f791e Mon Sep 17 00:00:00 2001 From: Ben Darfler Date: Thu, 1 Jul 2021 07:20:48 -0400 Subject: [PATCH 044/351] Updates Dependabot Config (#277) --- .github/dependabot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index caf9e4d020..2a0f635d7d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -10,6 +10,6 @@ updates: schedule: interval: "weekly" labels: - - "type: maintenance" + - "type: dependencies" reviewers: - - "honeycombio/integrations-team" + - "honeycombio/telemetry-team" From 820a1379ddfdbc0307e47bf2dd2afd721dc9df9d Mon Sep 17 00:00:00 2001 From: Ben Darfler Date: Thu, 8 Jul 2021 10:04:36 -0400 Subject: [PATCH 045/351] Updates Github Action Workflows (#280) --- .github/workflows/add-to-project.yml | 14 ++++++++++++++ .github/workflows/apply-labels.yml | 10 ++-------- 2 files changed, 16 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/add-to-project.yml diff --git a/.github/workflows/add-to-project.yml b/.github/workflows/add-to-project.yml new file mode 100644 index 0000000000..ac02faa17b --- /dev/null +++ b/.github/workflows/add-to-project.yml @@ -0,0 +1,14 @@ +name: Apply project management flow +on: + issues: + types: [opened] + pull_request_target: + types: [opened] +jobs: + project-management: + runs-on: ubuntu-latest + name: Apply project management flow + steps: + - uses: honeycombio/oss-management-actions/projects@v1 + with: + ghprojects-token: ${{ secrets.GHPROJECTS_TOKEN }} diff --git a/.github/workflows/apply-labels.yml b/.github/workflows/apply-labels.yml index b6aeae298a..7d90af5148 100644 --- a/.github/workflows/apply-labels.yml +++ b/.github/workflows/apply-labels.yml @@ -1,16 +1,10 @@ name: Apply project labels - -on: - - issues - - label - - pull_request_target - - pull_request - +on: [issues, pull_request, label] jobs: apply-labels: runs-on: ubuntu-latest name: Apply common project labels steps: - - uses: honeycombio/integrations-labels@v1 + - uses: honeycombio/oss-management-actions/labels@v1 with: github-token: ${{ secrets.GITHUB_TOKEN }} From 2df7e0087ce2fcfafbf92712c0b66b5486729488 Mon Sep 17 00:00:00 2001 From: Robb Kidd Date: Thu, 8 Jul 2021 15:21:26 -0400 Subject: [PATCH 046/351] update changelog for 1.3.0 (#282) --- CHANGELOG.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a3e34a683..305965c4a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Refinery Changelog +## 1.3.0 + +### Added + +- Add support to "does-not-contain" operator on RulesBasedSampler [#267](https://github.com/honeycombio/refinery/pull/267) | [@tr-fteixeira](https://github.com/tr-fteixeira) + +### Fixes + +- Ensure span links and events generate events and get resource attrs [#264](https://github.com/honeycombio/refinery/pull/264) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) + ## 1.2.1 ### Fixes @@ -17,7 +27,7 @@ ### Changed - Remove redundant peer/api suffix from response error metrics (#247) | [@vreynolds](https://github.com/vreynolds) - - `api_response_errors_api`, `api_response_errors_peer`, `peer_response_errors_api`, `peer_response_errors_peer` + - `api_response_errors_api`, `api_response_errors_peer`, `peer_response_errors_api`, `peer_response_errors_peer` - replaced by `api_response_errors`, `peer_response_errors` - Fix rules sampler to emit correct metric (#236) | [@isnotajoke](https://github.com/isnotajoke) - Previously `dynsampler_num_dropped` was emitted, now `rulessampler_num_dropped` will be emitted From 6c1cb828bab30612ab3cd73ead06302eff9e8e36 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 14 Jul 2021 10:40:57 +0100 Subject: [PATCH 047/351] Add support for OTLP over HTTP/protobuf (#279) Adds support for ingesting OTLP requests over HTTP with protobuf data. Changs in this PR: - Move the existing gRPC export handler and helpers to to otlp_trace.go and tests into otlp_trace_test.go - Add HTTP handler postOTLP that retrieves honeycomb headers, decodes the request body into a OTLP - ExportTraceRequest and hands to internal function to process request - Update OTLP grpc Export handler to retrieve honeycomb headers from metadata and hand request to new internal function to process - Adds tests to verify rejection behaviour for non-protobuf based requests to postOTLP and test to pass request to postOTLP handler endpoint --- route/errors.go | 1 + route/otlp_trace.go | 404 +++++++++++++++++++++++++++++++++++++++ route/otlp_trace_test.go | 348 +++++++++++++++++++++++++++++++++ route/route.go | 281 +-------------------------- route/route_test.go | 205 -------------------- 5 files changed, 760 insertions(+), 479 deletions(-) create mode 100644 route/otlp_trace.go create mode 100644 route/otlp_trace_test.go diff --git a/route/errors.go b/route/errors.go index 5e347eccff..ff697d9546 100644 --- a/route/errors.go +++ b/route/errors.go @@ -33,6 +33,7 @@ var ( ErrUpstreamUnavailable = handlerError{nil, "upstream target unavailable", http.StatusServiceUnavailable, true, true} ErrReqToEvent = handlerError{nil, "failed to parse event", http.StatusBadRequest, false, true} ErrBatchToEvent = handlerError{nil, "failed to parse event within batch", http.StatusBadRequest, false, true} + ErrInvalidContentType = handlerError{nil, "invalid content-type - only 'application/protobuf' is supported", http.StatusNotImplemented, false, true} ) func (r *Router) handlerReturnWithError(w http.ResponseWriter, he handlerError, err error) { diff --git a/route/otlp_trace.go b/route/otlp_trace.go new file mode 100644 index 0000000000..dca390e10a --- /dev/null +++ b/route/otlp_trace.go @@ -0,0 +1,404 @@ +package route + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/golang/protobuf/proto" + "github.com/honeycombio/refinery/types" + "github.com/klauspost/compress/zstd" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" + common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" +) + +func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + contentType := req.Header.Get("content-type") + if contentType != "application/protobuf" && contentType != "application/x-protobuf" { + router.handlerReturnWithError(w, ErrInvalidContentType, errors.New("invalid content-type")) + return + } + + apiKey, datasetName, err := getAPIKeyDatasetAndTokenFromHttpHeaders(req) + if err != nil { + router.handlerReturnWithError(w, ErrAuthNeeded, err) + return + } + + request, cleanup, err := parseOTLPBody(req, router.zstdDecoders) + defer cleanup() + if err != nil { + router.handlerReturnWithError(w, ErrPostBody, err) + } + + if err := processTraceRequest(req.Context(), router, request, apiKey, datasetName); err != nil { + router.handlerReturnWithError(w, ErrUpstreamFailed, err) + } +} + +func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { + apiKey, datasetName, err := getAPIKeyDatasetAndTokenFromMetadata(ctx) + if err != nil { + return nil, status.Error(codes.Unauthenticated, err.Error()) + } + + if err := processTraceRequest(ctx, router, req, apiKey, datasetName); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &collectortrace.ExportTraceServiceResponse{}, nil +} + +func processTraceRequest( + ctx context.Context, + router *Router, + request *collectortrace.ExportTraceServiceRequest, + apiKey string, + datasetName string) error { + + var requestID types.RequestIDContextKey + debugLog := router.iopLogger.Debug().WithField("request_id", requestID) + + apiHost, err := router.Config.GetHoneycombAPI() + if err != nil { + router.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") + return err + } + + for _, resourceSpan := range request.ResourceSpans { + resourceAttrs := make(map[string]interface{}) + + if resourceSpan.Resource != nil { + addAttributesToMap(resourceAttrs, resourceSpan.Resource.Attributes) + } + + for _, librarySpan := range resourceSpan.InstrumentationLibrarySpans { + library := librarySpan.InstrumentationLibrary + if library != nil { + if len(library.Name) > 0 { + resourceAttrs["library.name"] = library.Name + } + if len(library.Version) > 0 { + resourceAttrs["library.version"] = library.Version + } + } + + for _, span := range librarySpan.GetSpans() { + traceID := bytesToTraceID(span.TraceId) + spanID := hex.EncodeToString(span.SpanId) + timestamp := time.Unix(0, int64(span.StartTimeUnixNano)).UTC() + + eventAttrs := map[string]interface{}{ + "trace.trace_id": traceID, + "trace.span_id": spanID, + "type": getSpanKind(span.Kind), + "name": span.Name, + "duration_ms": float64(span.EndTimeUnixNano-span.StartTimeUnixNano) / float64(time.Millisecond), + "status_code": int32(getSpanStatusCode(span.Status)), + } + if span.ParentSpanId != nil { + eventAttrs["trace.parent_id"] = hex.EncodeToString(span.ParentSpanId) + } + if getSpanStatusCode(span.Status) == trace.Status_STATUS_CODE_ERROR { + eventAttrs["error"] = true + } + if span.Status != nil && len(span.Status.Message) > 0 { + eventAttrs["status_message"] = span.Status.Message + } + if span.Attributes != nil { + addAttributesToMap(eventAttrs, span.Attributes) + } + + sampleRate, err := getSampleRateFromAttributes(eventAttrs) + if err != nil { + debugLog.WithField("error", err.Error()).WithField("sampleRate", eventAttrs["sampleRate"]).Logf("error parsing sampleRate") + } + + // copy resource attributes to event attributes + for k, v := range resourceAttrs { + eventAttrs[k] = v + } + + events := make([]*types.Event, 0, 1+len(span.Events)+len(span.Links)) + events = append(events, &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + Dataset: datasetName, + SampleRate: uint(sampleRate), + Timestamp: timestamp, + Data: eventAttrs, + }) + + for _, sevent := range span.Events { + timestamp := time.Unix(0, int64(sevent.TimeUnixNano)).UTC() + attrs := map[string]interface{}{ + "trace.trace_id": traceID, + "trace.parent_id": spanID, + "name": sevent.Name, + "parent_name": span.Name, + "meta.annotation_type": "span_event", + } + + if sevent.Attributes != nil { + addAttributesToMap(attrs, sevent.Attributes) + } + for k, v := range resourceAttrs { + attrs[k] = v + } + sampleRate, err := getSampleRateFromAttributes(attrs) + if err != nil { + debugLog. + WithField("error", err.Error()). + WithField("sampleRate", attrs["sampleRate"]). + Logf("error parsing sampleRate") + } + events = append(events, &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + Dataset: datasetName, + SampleRate: uint(sampleRate), + Timestamp: timestamp, + Data: attrs, + }) + } + + for _, slink := range span.Links { + attrs := map[string]interface{}{ + "trace.trace_id": traceID, + "trace.parent_id": spanID, + "trace.link.trace_id": bytesToTraceID(slink.TraceId), + "trace.link.span_id": hex.EncodeToString(slink.SpanId), + "parent_name": span.Name, + "meta.annotation_type": "link", + } + + if slink.Attributes != nil { + addAttributesToMap(attrs, slink.Attributes) + } + for k, v := range resourceAttrs { + attrs[k] = v + } + sampleRate, err := getSampleRateFromAttributes(attrs) + if err != nil { + debugLog. + WithField("error", err.Error()). + WithField("sampleRate", attrs["sampleRate"]). + Logf("error parsing sampleRate") + } + events = append(events, &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + Dataset: datasetName, + SampleRate: uint(sampleRate), + Timestamp: time.Time{}, //links don't have timestamps, so use empty time + Data: attrs, + }) + } + + for _, evt := range events { + err = router.processEvent(evt, requestID) + if err != nil { + router.Logger.Error().Logf("Error processing event: " + err.Error()) + } + } + + } + } + } + + return nil +} + +func addAttributesToMap(attrs map[string]interface{}, attributes []*common.KeyValue) { + for _, attr := range attributes { + if attr.Key == "" { + continue + } + switch attr.Value.Value.(type) { + case *common.AnyValue_StringValue: + attrs[attr.Key] = attr.Value.GetStringValue() + case *common.AnyValue_BoolValue: + attrs[attr.Key] = attr.Value.GetBoolValue() + case *common.AnyValue_DoubleValue: + attrs[attr.Key] = attr.Value.GetDoubleValue() + case *common.AnyValue_IntValue: + attrs[attr.Key] = attr.Value.GetIntValue() + } + } +} + +func getSpanKind(kind trace.Span_SpanKind) string { + switch kind { + case trace.Span_SPAN_KIND_CLIENT: + return "client" + case trace.Span_SPAN_KIND_SERVER: + return "server" + case trace.Span_SPAN_KIND_PRODUCER: + return "producer" + case trace.Span_SPAN_KIND_CONSUMER: + return "consumer" + case trace.Span_SPAN_KIND_INTERNAL: + return "internal" + case trace.Span_SPAN_KIND_UNSPECIFIED: + fallthrough + default: + return "unspecified" + } +} + +// bytesToTraceID returns an ID suitable for use for spans and traces. Before +// encoding the bytes as a hex string, we want to handle cases where we are +// given 128-bit IDs with zero padding, e.g. 0000000000000000f798a1e7f33c8af6. +// To do this, we borrow a strategy from Jaeger [1] wherein we split the byte +// sequence into two parts. The leftmost part could contain all zeros. We use +// that to determine whether to return a 64-bit hex encoded string or a 128-bit +// one. +// +// [1]: https://github.com/jaegertracing/jaeger/blob/cd19b64413eca0f06b61d92fe29bebce1321d0b0/model/ids.go#L81 +func bytesToTraceID(traceID []byte) string { + // binary.BigEndian.Uint64() does a bounds check on traceID which will + // cause a panic if traceID is fewer than 8 bytes. In this case, we don't + // need to check for zero padding on the high part anyway, so just return a + // hex string. + if len(traceID) < traceIDShortLength { + return fmt.Sprintf("%x", traceID) + } + var low uint64 + if len(traceID) == traceIDLongLength { + low = binary.BigEndian.Uint64(traceID[traceIDShortLength:]) + if high := binary.BigEndian.Uint64(traceID[:traceIDShortLength]); high != 0 { + return fmt.Sprintf("%016x%016x", high, low) + } + } else { + low = binary.BigEndian.Uint64(traceID) + } + + return fmt.Sprintf("%016x", low) +} + +// getSpanStatusCode checks the value of both the deprecated code and code fields +// on the span status and using the rules specified in the backward compatibility +// notes in the protobuf definitions. See: +// +// https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L230 +func getSpanStatusCode(status *trace.Status) trace.Status_StatusCode { + if status == nil { + return trace.Status_STATUS_CODE_UNSET + } + if status.Code == trace.Status_STATUS_CODE_UNSET { + if status.DeprecatedCode == trace.Status_DEPRECATED_STATUS_CODE_OK { + return trace.Status_STATUS_CODE_UNSET + } + return trace.Status_STATUS_CODE_ERROR + } + return status.Code +} + +func getAPIKeyDatasetAndTokenFromMetadata(ctx context.Context) ( + apiKey string, + datasetName string, + err error) { + if md, ok := metadata.FromIncomingContext(ctx); ok { + apiKey = getValueFromMetadata(md, "x-honeycomb-team") + datasetName = getValueFromMetadata(md, "x-honeycomb-dataset") + } + + if err := validateHeaders(apiKey, datasetName); err != nil { + return "", "", err + } + return apiKey, datasetName, nil +} + +func getValueFromMetadata(md metadata.MD, key string) string { + if vals := md.Get(key); len(vals) > 0 { + return vals[0] + } + return "" +} + +func getAPIKeyDatasetAndTokenFromHttpHeaders(r *http.Request) ( + apiKey string, + datasetName string, + err error) { + apiKey = r.Header.Get("x-honeycomb-team") + datasetName = r.Header.Get("x-honeycomb-dataset") + + if err := validateHeaders(apiKey, datasetName); err != nil { + return "", "", err + } + return apiKey, datasetName, nil +} + +func validateHeaders(apiKey string, datasetName string) error { + if apiKey == "" { + return errors.New("missing x-honeycomb-team header") + } + if datasetName == "" { + return errors.New("missing x-honeycomb-team header") + } + return nil +} + +func parseOTLPBody(r *http.Request, zstdDecoders chan *zstd.Decoder) (request *collectortrace.ExportTraceServiceRequest, cleanup func(), err error) { + cleanup = func() { /* empty cleanup */ } + bodyBytes, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, cleanup, err + } + bodyReader := bytes.NewReader(bodyBytes) + + var reader io.Reader + switch r.Header.Get("Content-Encoding") { + case "gzip": + var err error + reader, err = gzip.NewReader(bodyReader) + if err != nil { + return nil, cleanup, err + } + case "zstd": + zReader := <-zstdDecoders + cleanup = func() { + zReader.Reset(nil) + zstdDecoders <- zReader + } + + err = zReader.Reset(bodyReader) + if err != nil { + return nil, cleanup, err + } + + reader = zReader + default: + reader = bodyReader + } + + bytes, err := ioutil.ReadAll(reader) + if err != nil { + return nil, cleanup, err + } + + otlpRequet := &collectortrace.ExportTraceServiceRequest{} + err = proto.Unmarshal(bytes, otlpRequet) + if err != nil { + return nil, cleanup, err + } + + return otlpRequet, cleanup, nil +} diff --git a/route/otlp_trace_test.go b/route/otlp_trace_test.go new file mode 100644 index 0000000000..2354b29031 --- /dev/null +++ b/route/otlp_trace_test.go @@ -0,0 +1,348 @@ +package route + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/hex" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/honeycombio/refinery/config" + collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" + common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" + trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" + "github.com/honeycombio/refinery/logger" + "github.com/honeycombio/refinery/metrics" + "github.com/honeycombio/refinery/transmit" + "github.com/klauspost/compress/zstd" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/metadata" +) + +func TestOTLPHandler(t *testing.T) { + md := metadata.New(map[string]string{"x-honeycomb-team": "meow", "x-honeycomb-dataset": "ds"}) + ctx := metadata.NewIncomingContext(context.Background(), md) + + mockMetrics := metrics.MockMetrics{} + mockMetrics.Start() + mockTransmission := &transmit.MockTransmission{} + mockTransmission.Start() + decoders, err := makeDecoders(1) + if err != nil { + t.Error(err) + } + router := &Router{ + Config: &config.MockConfig{}, + Metrics: &mockMetrics, + UpstreamTransmission: mockTransmission, + iopLogger: iopLogger{ + Logger: &logger.MockLogger{}, + incomingOrPeer: "incoming", + }, + Logger: &logger.MockLogger{}, + zstdDecoders: decoders, + } + + conf := &config.MockConfig{ + GetSendDelayVal: 0, + GetTraceTimeoutVal: 60 * time.Second, + GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1}, + SendTickerVal: 2 * time.Millisecond, + GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{ + CacheCapacity: 100, + MaxAlloc: 100, + }, + } + + t.Run("span with status", func(t *testing.T) { + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: helperOTLPRequestSpansWithStatus(), + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + assert.Equal(t, 2, len(mockTransmission.Events)) + mockTransmission.Flush() + }) + + t.Run("span without status", func(t *testing.T) { + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: helperOTLPRequestSpansWithoutStatus(), + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + assert.Equal(t, 2, len(mockTransmission.Events)) + mockTransmission.Flush() + }) + + // TODO: (MG) figuure out how we can test JSON created from OTLP requests + // Below is example, but requires significant usage of collector, sampler, conf, etc + t.Run("creates events for span events", func(t *testing.T) { + t.Skip("need additional work to support inspecting outbound JSON") + + traceID := []byte{0, 0, 0, 0, 1} + spanID := []byte{1, 0, 0, 0, 0} + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: []*trace.Span{{ + TraceId: traceID, + SpanId: spanID, + Name: "span_with_event", + Events: []*trace.Span_Event{{ + TimeUnixNano: 12345, + Name: "span_link", + Attributes: []*common.KeyValue{{ + Key: "event_attr_key", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "event_attr_val"}}, + }}, + }}, + }}, + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + + time.Sleep(conf.SendTickerVal * 2) + + mockTransmission.Mux.Lock() + assert.Equal(t, 2, len(mockTransmission.Events)) + + spanEvent := mockTransmission.Events[0] + // assert.Equal(t, time.Unix(0, int64(12345)).UTC(), spanEvent.Timestamp) + assert.Equal(t, bytesToTraceID(traceID), spanEvent.Data["trace.trace_id"]) + assert.Equal(t, hex.EncodeToString(spanID), spanEvent.Data["trace.span_id"]) + assert.Equal(t, "span_link", spanEvent.Data["span.name"]) + assert.Equal(t, "span_with_event", spanEvent.Data["parent.name"]) + assert.Equal(t, "span_event", spanEvent.Data["meta.annotation_type"]) + assert.Equal(t, "event_attr_key", spanEvent.Data["event_attr_val"]) + mockTransmission.Mux.Unlock() + mockTransmission.Flush() + }) + + t.Run("creates events for span links", func(t *testing.T) { + t.Skip("need additional work to support inspecting outbound JSON") + + traceID := []byte{0, 0, 0, 0, 1} + spanID := []byte{1, 0, 0, 0, 0} + linkTraceID := []byte{0, 0, 0, 0, 2} + linkSpanID := []byte{2, 0, 0, 0, 0} + + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: []*trace.Span{{ + Name: "span_with_link", + TraceId: traceID, + SpanId: spanID, + Links: []*trace.Span_Link{{ + TraceId: traceID, + SpanId: spanID, + TraceState: "link_trace_state", + Attributes: []*common.KeyValue{{ + Key: "link_attr_key", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "link_attr_val"}}, + }}, + }}, + }}, + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + + time.Sleep(conf.SendTickerVal * 2) + assert.Equal(t, 2, len(mockTransmission.Events)) + + spanLink := mockTransmission.Events[1] + assert.Equal(t, bytesToTraceID(traceID), spanLink.Data["trace.trace_id"]) + assert.Equal(t, hex.EncodeToString(spanID), spanLink.Data["trace.span_id"]) + assert.Equal(t, bytesToTraceID(linkTraceID), spanLink.Data["trace.link.trace_id"]) + assert.Equal(t, hex.EncodeToString(linkSpanID), spanLink.Data["trace.link.span_id"]) + assert.Equal(t, "link", spanLink.Data["meta.annotation_type"]) + assert.Equal(t, "link_attr_val", spanLink.Data["link_attr_key"]) + mockTransmission.Flush() + }) + + t.Run("can receive OTLP over HTTP/protobuf", func(t *testing.T) { + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: helperOTLPRequestSpansWithStatus(), + }}, + }}, + } + body, err := proto.Marshal(req) + if err != nil { + t.Error(err) + } + + request, _ := http.NewRequest("POST", "/v1/traces", strings.NewReader(string(body))) + request.Header = http.Header{} + request.Header.Set("content-type", "application/protobuf") + request.Header.Set("x-honeycomb-team", "apikey") + request.Header.Set("x-honeycomb-dataset", "dataset") + + w := httptest.NewRecorder() + router.postOTLP(w, request) + assert.Equal(t, w.Code, http.StatusOK) + + assert.Equal(t, 2, len(mockTransmission.Events)) + mockTransmission.Flush() + }) + + t.Run("can receive OTLP over HTTP/protobuf with gzip encoding", func(t *testing.T) { + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: helperOTLPRequestSpansWithStatus(), + }}, + }}, + } + body, err := proto.Marshal(req) + if err != nil { + t.Error(err) + } + + buf := new(bytes.Buffer) + writer := gzip.NewWriter(buf) + writer.Write(body) + writer.Close() + if err != nil { + t.Error(err) + } + + request, _ := http.NewRequest("POST", "/v1/traces", strings.NewReader(buf.String())) + request.Header = http.Header{} + request.Header.Set("content-type", "application/protobuf") + request.Header.Set("content-encoding", "gzip") + request.Header.Set("x-honeycomb-team", "apikey") + request.Header.Set("x-honeycomb-dataset", "dataset") + + w := httptest.NewRecorder() + router.postOTLP(w, request) + assert.Equal(t, w.Code, http.StatusOK) + + assert.Equal(t, 2, len(mockTransmission.Events)) + mockTransmission.Flush() + }) + + t.Run("can receive OTLP over HTTP/protobuf with zstd encoding", func(t *testing.T) { + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: helperOTLPRequestSpansWithStatus(), + }}, + }}, + } + body, err := proto.Marshal(req) + if err != nil { + t.Error(err) + } + + buf := new(bytes.Buffer) + writer, err := zstd.NewWriter(buf) + if err != nil { + t.Error(err) + } + writer.Write(body) + writer.Close() + if err != nil { + t.Error(err) + } + + request, _ := http.NewRequest("POST", "/v1/traces", strings.NewReader(buf.String())) + request.Header = http.Header{} + request.Header.Set("content-type", "application/protobuf") + request.Header.Set("content-encoding", "zstd") + request.Header.Set("x-honeycomb-team", "apikey") + request.Header.Set("x-honeycomb-dataset", "dataset") + + w := httptest.NewRecorder() + router.postOTLP(w, request) + assert.Equal(t, w.Code, http.StatusOK) + + assert.Equal(t, 2, len(mockTransmission.Events)) + mockTransmission.Flush() + }) + + t.Run("rejects OTLP over HTTP/JSON ", func(t *testing.T) { + request, _ := http.NewRequest("POST", "/v1/traces", strings.NewReader("{}")) + request.Header = http.Header{} + request.Header.Set("content-type", "application/json") + request.Header.Set("x-honeycomb-team", "apikey") + request.Header.Set("x-honeycomb-dataset", "dataset") + + w := httptest.NewRecorder() + router.postOTLP(w, request) + assert.Equal(t, w.Code, http.StatusNotImplemented) + assert.Equal(t, `{"source":"refinery","error":"invalid content-type - only 'application/protobuf' is supported"}`, string(w.Body.String())) + + assert.Equal(t, 0, len(mockTransmission.Events)) + mockTransmission.Flush() + }) +} + +func helperOTLPRequestSpansWithoutStatus() []*trace.Span { + now := time.Now() + return []*trace.Span{ + { + StartTimeUnixNano: uint64(now.UnixNano()), + Events: []*trace.Span_Event{ + { + TimeUnixNano: uint64(now.UnixNano()), + Attributes: []*common.KeyValue{ + { + Key: "attribute_key", + Value: &common.AnyValue{ + Value: &common.AnyValue_StringValue{StringValue: "attribute_value"}, + }, + }, + }, + }, + }, + }, + } +} + +func helperOTLPRequestSpansWithStatus() []*trace.Span { + now := time.Now() + return []*trace.Span{ + { + StartTimeUnixNano: uint64(now.UnixNano()), + Events: []*trace.Span_Event{ + { + TimeUnixNano: uint64(now.UnixNano()), + Attributes: []*common.KeyValue{ + { + Key: "attribute_key", + Value: &common.AnyValue{ + Value: &common.AnyValue_StringValue{StringValue: "attribute_value"}, + }, + }, + }, + }, + }, + Status: &trace.Status{Code: trace.Status_STATUS_CODE_OK}, + }, + } +} diff --git a/route/route.go b/route/route.go index 437a7c1b19..6597318cc1 100644 --- a/route/route.go +++ b/route/route.go @@ -4,8 +4,6 @@ import ( "bytes" "compress/gzip" "context" - "encoding/binary" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -15,7 +13,6 @@ import ( "net" "net/http" "strconv" - "strings" "sync" "time" @@ -39,8 +36,6 @@ import ( "github.com/honeycombio/refinery/types" collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" - common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" - trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" ) const ( @@ -161,6 +156,13 @@ func (r *Router) LnS(incomingOrPeer string) { authedMuxxer.HandleFunc("/events/{datasetName}", r.event).Name("event") authedMuxxer.HandleFunc("/batch/{datasetName}", r.batch).Name("batch") + // require an auth header for OTLP requests + otlpMuxxer := muxxer.PathPrefix("/v1/").Methods("POST").Subrouter() + otlpMuxxer.Use(r.apiKeyChecker) + + // handle OTLP trace requests + otlpMuxxer.HandleFunc("/traces", r.postOTLP).Name("otlp") + // pass everything else through unmolested muxxer.PathPrefix("/").HandlerFunc(r.proxy).Name("proxy") @@ -386,190 +388,6 @@ func (r *Router) batch(w http.ResponseWriter, req *http.Request) { w.Write(response) } -func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - r.Logger.Error().Logf("Unable to retreive metadata from OTLP request.") - return &collectortrace.ExportTraceServiceResponse{}, nil - } - - // requestID is used to track a requst as it moves between refinery nodes (peers) - // the OTLP handler only receives incoming (not peer) requests for now so will be empty here - var requestID types.RequestIDContextKey - debugLog := r.iopLogger.Debug().WithField("request_id", requestID) - - apiKey, dataset := getAPIKeyAndDatasetFromMetadata(md) - if apiKey == "" { - r.Logger.Error().Logf("Received OTLP request without Honeycomb APIKey header") - return &collectortrace.ExportTraceServiceResponse{}, nil - } - if dataset == "" { - r.Logger.Error().Logf("Received OTLP request without Honeycomb dataset header") - return &collectortrace.ExportTraceServiceResponse{}, nil - } - - apiHost, err := r.Config.GetHoneycombAPI() - if err != nil { - r.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") - return &collectortrace.ExportTraceServiceResponse{}, nil - } - - var grpcRequestEncoding string - if val := md.Get("grpc-accept-encoding"); val != nil { - grpcRequestEncoding = strings.Join(val, ",") - } - - for _, resourceSpan := range req.ResourceSpans { - resourceAttrs := make(map[string]interface{}) - - if resourceSpan.Resource != nil { - addAttributesToMap(resourceAttrs, resourceSpan.Resource.Attributes) - } - - for _, librarySpan := range resourceSpan.InstrumentationLibrarySpans { - library := librarySpan.InstrumentationLibrary - if library != nil { - if len(library.Name) > 0 { - resourceAttrs["library.name"] = library.Name - } - if len(library.Version) > 0 { - resourceAttrs["library.version"] = library.Version - } - } - - for _, span := range librarySpan.GetSpans() { - traceID := bytesToTraceID(span.TraceId) - spanID := hex.EncodeToString(span.SpanId) - timestamp := time.Unix(0, int64(span.StartTimeUnixNano)).UTC() - - eventAttrs := map[string]interface{}{ - "trace.trace_id": traceID, - "trace.span_id": spanID, - "type": getSpanKind(span.Kind), - "name": span.Name, - "duration_ms": float64(span.EndTimeUnixNano-span.StartTimeUnixNano) / float64(time.Millisecond), - "status_code": int32(r.getSpanStatusCode(span.Status)), - } - if span.ParentSpanId != nil { - eventAttrs["trace.parent_id"] = hex.EncodeToString(span.ParentSpanId) - } - if r.getSpanStatusCode(span.Status) == trace.Status_STATUS_CODE_ERROR { - eventAttrs["error"] = true - } - if span.Status != nil && len(span.Status.Message) > 0 { - eventAttrs["status_message"] = span.Status.Message - } - if span.Attributes != nil { - addAttributesToMap(eventAttrs, span.Attributes) - } - if grpcRequestEncoding != "" { - eventAttrs["grpc_request_encoding"] = grpcRequestEncoding - } - - sampleRate, err := getSampleRateFromAttributes(eventAttrs) - if err != nil { - debugLog.WithField("error", err.Error()).WithField("sampleRate", eventAttrs["sampleRate"]).Logf("error parsing sampleRate") - } - - // copy resource attributes to event attributes - for k, v := range resourceAttrs { - eventAttrs[k] = v - } - - events := make([]*types.Event, 0, 1+len(span.Events)+len(span.Links)) - events = append(events, &types.Event{ - Context: ctx, - APIHost: apiHost, - APIKey: apiKey, - Dataset: dataset, - SampleRate: uint(sampleRate), - Timestamp: timestamp, - Data: eventAttrs, - }) - - for _, sevent := range span.Events { - timestamp := time.Unix(0, int64(sevent.TimeUnixNano)).UTC() - attrs := map[string]interface{}{ - "trace.trace_id": traceID, - "trace.parent_id": spanID, - "name": sevent.Name, - "parent_name": span.Name, - "meta.annotation_type": "span_event", - } - - if sevent.Attributes != nil { - addAttributesToMap(attrs, sevent.Attributes) - } - for k, v := range resourceAttrs { - attrs[k] = v - } - sampleRate, err := getSampleRateFromAttributes(attrs) - if err != nil { - debugLog. - WithField("error", err.Error()). - WithField("sampleRate", attrs["sampleRate"]). - Logf("error parsing sampleRate") - } - events = append(events, &types.Event{ - Context: ctx, - APIHost: apiHost, - APIKey: apiKey, - Dataset: dataset, - SampleRate: uint(sampleRate), - Timestamp: timestamp, - Data: attrs, - }) - } - - for _, slink := range span.Links { - attrs := map[string]interface{}{ - "trace.trace_id": traceID, - "trace.parent_id": spanID, - "trace.link.trace_id": bytesToTraceID(slink.TraceId), - "trace.link.span_id": hex.EncodeToString(slink.SpanId), - "parent_name": span.Name, - "meta.annotation_type": "link", - } - - if slink.Attributes != nil { - addAttributesToMap(attrs, slink.Attributes) - } - for k, v := range resourceAttrs { - attrs[k] = v - } - sampleRate, err := getSampleRateFromAttributes(attrs) - if err != nil { - debugLog. - WithField("error", err.Error()). - WithField("sampleRate", attrs["sampleRate"]). - Logf("error parsing sampleRate") - } - events = append(events, &types.Event{ - Context: ctx, - APIHost: apiHost, - APIKey: apiKey, - Dataset: dataset, - SampleRate: uint(sampleRate), - Timestamp: time.Time{}, //links don't have timestamps, so use empty time - Data: attrs, - }) - } - - for _, evt := range events { - err = r.processEvent(evt, requestID) - if err != nil { - r.Logger.Error().Logf("Error processing event: " + err.Error()) - } - } - - } - } - } - - return &collectortrace.ExportTraceServiceResponse{}, nil -} - func (r *Router) processEvent(ev *types.Event, reqID interface{}) error { debugLog := r.iopLogger.Debug(). WithField("request_id", reqID). @@ -806,91 +624,6 @@ func getFirstValueFromMetadata(key string, md metadata.MD) string { return "" } -func addAttributesToMap(attrs map[string]interface{}, attributes []*common.KeyValue) { - for _, attr := range attributes { - if attr.Key == "" { - continue - } - switch attr.Value.Value.(type) { - case *common.AnyValue_StringValue: - attrs[attr.Key] = attr.Value.GetStringValue() - case *common.AnyValue_BoolValue: - attrs[attr.Key] = attr.Value.GetBoolValue() - case *common.AnyValue_DoubleValue: - attrs[attr.Key] = attr.Value.GetDoubleValue() - case *common.AnyValue_IntValue: - attrs[attr.Key] = attr.Value.GetIntValue() - } - } -} - -func getSpanKind(kind trace.Span_SpanKind) string { - switch kind { - case trace.Span_SPAN_KIND_CLIENT: - return "client" - case trace.Span_SPAN_KIND_SERVER: - return "server" - case trace.Span_SPAN_KIND_PRODUCER: - return "producer" - case trace.Span_SPAN_KIND_CONSUMER: - return "consumer" - case trace.Span_SPAN_KIND_INTERNAL: - return "internal" - case trace.Span_SPAN_KIND_UNSPECIFIED: - fallthrough - default: - return "unspecified" - } -} - -// bytesToTraceID returns an ID suitable for use for spans and traces. Before -// encoding the bytes as a hex string, we want to handle cases where we are -// given 128-bit IDs with zero padding, e.g. 0000000000000000f798a1e7f33c8af6. -// To do this, we borrow a strategy from Jaeger [1] wherein we split the byte -// sequence into two parts. The leftmost part could contain all zeros. We use -// that to determine whether to return a 64-bit hex encoded string or a 128-bit -// one. -// -// [1]: https://github.com/jaegertracing/jaeger/blob/cd19b64413eca0f06b61d92fe29bebce1321d0b0/model/ids.go#L81 -func bytesToTraceID(traceID []byte) string { - // binary.BigEndian.Uint64() does a bounds check on traceID which will - // cause a panic if traceID is fewer than 8 bytes. In this case, we don't - // need to check for zero padding on the high part anyway, so just return a - // hex string. - if len(traceID) < traceIDShortLength { - return fmt.Sprintf("%x", traceID) - } - var low uint64 - if len(traceID) == traceIDLongLength { - low = binary.BigEndian.Uint64(traceID[traceIDShortLength:]) - if high := binary.BigEndian.Uint64(traceID[:traceIDShortLength]); high != 0 { - return fmt.Sprintf("%016x%016x", high, low) - } - } else { - low = binary.BigEndian.Uint64(traceID) - } - - return fmt.Sprintf("%016x", low) -} - -// getSpanStatusCode checks the value of both the deprecated code and code fields -// on the span status and using the rules specified in the backward compatibility -// notes in the protobuf definitions. See: -// -// https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L230 -func (r *Router) getSpanStatusCode(status *trace.Status) trace.Status_StatusCode { - if status == nil { - return trace.Status_STATUS_CODE_UNSET - } - if status.Code == trace.Status_STATUS_CODE_UNSET { - if status.DeprecatedCode == trace.Status_DEPRECATED_STATUS_CODE_OK { - return trace.Status_STATUS_CODE_UNSET - } - return trace.Status_STATUS_CODE_ERROR - } - return status.Code -} - func getSampleRateFromAttributes(attrs map[string]interface{}) (int, error) { var sampleRateKey string if attrs["sampleRate"] != nil { diff --git a/route/route_test.go b/route/route_test.go index 1522d7df59..2080396a87 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -3,8 +3,6 @@ package route import ( "bytes" "compress/gzip" - "context" - "encoding/hex" "fmt" "io" "io/ioutil" @@ -18,13 +16,9 @@ import ( "github.com/facebookgo/inject" "github.com/honeycombio/refinery/collect" "github.com/honeycombio/refinery/config" - collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" - common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" - trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" "github.com/honeycombio/refinery/logger" "github.com/honeycombio/refinery/metrics" "github.com/honeycombio/refinery/transmit" - "github.com/stretchr/testify/assert" "github.com/gorilla/mux" "github.com/honeycombio/refinery/sharder" @@ -399,160 +393,6 @@ func TestDebugTrace(t *testing.T) { } } -func TestOTLPHandler(t *testing.T) { - md := metadata.New(map[string]string{"x-honeycomb-team": "meow", "x-honeycomb-dataset": "ds"}) - ctx := metadata.NewIncomingContext(context.Background(), md) - - mockMetrics := metrics.MockMetrics{} - mockMetrics.Start() - mockTransmission := &transmit.MockTransmission{} - mockTransmission.Start() - router := &Router{ - Config: &config.MockConfig{}, - Metrics: &mockMetrics, - UpstreamTransmission: mockTransmission, - iopLogger: iopLogger{ - Logger: &logger.NullLogger{}, - incomingOrPeer: "incoming", - }, - } - - conf := &config.MockConfig{ - GetSendDelayVal: 0, - GetTraceTimeoutVal: 60 * time.Second, - GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1}, - SendTickerVal: 2 * time.Millisecond, - GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{ - CacheCapacity: 100, - MaxAlloc: 100, - }, - } - - t.Run("span with status", func(t *testing.T) { - req := &collectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ - Spans: helperOTLPRequestSpansWithStatus(), - }}, - }}, - } - _, err := router.Export(ctx, req) - if err != nil { - t.Errorf(`Unexpected error: %s`, err) - } - assert.Equal(t, 2, len(mockTransmission.Events)) - mockTransmission.Flush() - }) - - t.Run("span without status", func(t *testing.T) { - req := &collectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ - Spans: helperOTLPRequestSpansWithoutStatus(), - }}, - }}, - } - _, err := router.Export(ctx, req) - if err != nil { - t.Errorf(`Unexpected error: %s`, err) - } - assert.Equal(t, 2, len(mockTransmission.Events)) - mockTransmission.Flush() - }) - - // TODO: (MG) figuure out how we can test JSON created from OTLP requests - // Below is example, but requires significant usage of collector, sampler, conf, etc - t.Run("creates events for span events", func(t *testing.T) { - t.Skip("need additional work to support inspecting outbound JSON") - - traceID := []byte{0, 0, 0, 0, 1} - spanID := []byte{1, 0, 0, 0, 0} - req := &collectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ - Spans: []*trace.Span{{ - TraceId: traceID, - SpanId: spanID, - Name: "span_with_event", - Events: []*trace.Span_Event{{ - TimeUnixNano: 12345, - Name: "span_link", - Attributes: []*common.KeyValue{{ - Key: "event_attr_key", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "event_attr_val"}}, - }}, - }}, - }}, - }}, - }}, - } - _, err := router.Export(ctx, req) - if err != nil { - t.Errorf(`Unexpected error: %s`, err) - } - - time.Sleep(conf.SendTickerVal * 2) - - mockTransmission.Mux.Lock() - assert.Equal(t, 2, len(mockTransmission.Events)) - - spanEvent := mockTransmission.Events[0] - // assert.Equal(t, time.Unix(0, int64(12345)).UTC(), spanEvent.Timestamp) - assert.Equal(t, bytesToTraceID(traceID), spanEvent.Data["trace.trace_id"]) - assert.Equal(t, hex.EncodeToString(spanID), spanEvent.Data["trace.span_id"]) - assert.Equal(t, "span_link", spanEvent.Data["span.name"]) - assert.Equal(t, "span_with_event", spanEvent.Data["parent.name"]) - assert.Equal(t, "span_event", spanEvent.Data["meta.annotation_type"]) - assert.Equal(t, "event_attr_key", spanEvent.Data["event_attr_val"]) - mockTransmission.Mux.Unlock() - mockTransmission.Flush() - }) - - t.Run("creates events for span links", func(t *testing.T) { - t.Skip("need additional work to support inspecting outbound JSON") - - traceID := []byte{0, 0, 0, 0, 1} - spanID := []byte{1, 0, 0, 0, 0} - linkTraceID := []byte{0, 0, 0, 0, 2} - linkSpanID := []byte{2, 0, 0, 0, 0} - - req := &collectortrace.ExportTraceServiceRequest{ - ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ - Spans: []*trace.Span{{ - Name: "span_with_link", - TraceId: traceID, - SpanId: spanID, - Links: []*trace.Span_Link{{ - TraceId: traceID, - SpanId: spanID, - TraceState: "link_trace_state", - Attributes: []*common.KeyValue{{ - Key: "link_attr_key", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "link_attr_val"}}, - }}, - }}, - }}, - }}, - }}, - } - _, err := router.Export(ctx, req) - if err != nil { - t.Errorf(`Unexpected error: %s`, err) - } - - time.Sleep(conf.SendTickerVal * 2) - assert.Equal(t, 2, len(mockTransmission.Events)) - - spanLink := mockTransmission.Events[1] - assert.Equal(t, bytesToTraceID(traceID), spanLink.Data["trace.trace_id"]) - assert.Equal(t, hex.EncodeToString(spanID), spanLink.Data["trace.span_id"]) - assert.Equal(t, bytesToTraceID(linkTraceID), spanLink.Data["trace.link.trace_id"]) - assert.Equal(t, hex.EncodeToString(linkSpanID), spanLink.Data["trace.link.span_id"]) - assert.Equal(t, "link", spanLink.Data["meta.annotation_type"]) - assert.Equal(t, "link_attr_val", spanLink.Data["link_attr_key"]) - mockTransmission.Flush() - }) -} - func TestDependencyInjection(t *testing.T) { var g inject.Graph err := g.Provide( @@ -575,51 +415,6 @@ func TestDependencyInjection(t *testing.T) { } } -func helperOTLPRequestSpansWithoutStatus() []*trace.Span { - now := time.Now() - return []*trace.Span{ - { - StartTimeUnixNano: uint64(now.UnixNano()), - Events: []*trace.Span_Event{ - { - TimeUnixNano: uint64(now.UnixNano()), - Attributes: []*common.KeyValue{ - { - Key: "attribute_key", - Value: &common.AnyValue{ - Value: &common.AnyValue_StringValue{StringValue: "attribute_value"}, - }, - }, - }, - }, - }, - }, - } -} - -func helperOTLPRequestSpansWithStatus() []*trace.Span { - now := time.Now() - return []*trace.Span{ - { - StartTimeUnixNano: uint64(now.UnixNano()), - Events: []*trace.Span_Event{ - { - TimeUnixNano: uint64(now.UnixNano()), - Attributes: []*common.KeyValue{ - { - Key: "attribute_key", - Value: &common.AnyValue{ - Value: &common.AnyValue_StringValue{StringValue: "attribute_value"}, - }, - }, - }, - }, - }, - Status: &trace.Status{Code: trace.Status_STATUS_CODE_OK}, - }, - } -} - type TestSharder struct{} func (s *TestSharder) MyShard() sharder.Shard { return nil } From 90ae222fc6d00150f6e83e88ab41f46036318752 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jul 2021 15:04:37 +0100 Subject: [PATCH 048/351] Bump github.com/grpc-ecosystem/grpc-gateway from 1.12.1 to 1.16.0 (#233) Bumps [github.com/grpc-ecosystem/grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway) from 1.12.1 to 1.16.0. - [Release notes](https://github.com/grpc-ecosystem/grpc-gateway/releases) - [Changelog](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/CHANGELOG.md) - [Commits](https://github.com/grpc-ecosystem/grpc-gateway/compare/v1.12.1...v1.16.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 24 +++++++++++++++--------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index f63a8706b0..5bd4205cc4 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/golang/protobuf v1.4.3 github.com/gomodule/redigo v1.8.4 github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d - github.com/grpc-ecosystem/grpc-gateway v1.12.1 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/libhoney-go v1.12.4 diff --git a/go.sum b/go.sum index 18cdb61801..1732b0e98e 100644 --- a/go.sum +++ b/go.sum @@ -20,7 +20,7 @@ github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -32,6 +32,7 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -45,6 +46,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= @@ -93,6 +95,7 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= @@ -129,8 +132,8 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -290,8 +293,9 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -324,13 +328,14 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -380,8 +385,9 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -401,17 +407,17 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.37.1 h1:ARnQJNWxGyYJpdf/JXscNlQr/uv607ZPU9Z7ogHi+iI= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= From c1cf8643390e13b6e9f93b81316ee6baab41c98b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jul 2021 15:05:59 +0100 Subject: [PATCH 049/351] Bump github.com/golang/protobuf from 1.4.3 to 1.5.2 (#252) Bumps [github.com/golang/protobuf](https://github.com/golang/protobuf) from 1.4.3 to 1.5.2. - [Release notes](https://github.com/golang/protobuf/releases) - [Commits](https://github.com/golang/protobuf/compare/v1.4.3...v1.5.2) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 5bd4205cc4..9664168a20 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible github.com/gogo/protobuf v1.3.1 - github.com/golang/protobuf v1.4.3 + github.com/golang/protobuf v1.5.2 github.com/gomodule/redigo v1.8.4 github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d github.com/grpc-ecosystem/grpc-gateway v1.16.0 diff --git a/go.sum b/go.sum index 1732b0e98e..923f4a545a 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,9 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/gomodule/redigo v1.8.4 h1:Z5JUg94HMTR1XpwBaSH4vq3+PNSIykBLxMdglbw10gg= github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -115,8 +116,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -428,8 +430,10 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= From 0f9e6984c677502f7be617e1b6b0d6ab8fedf5ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jul 2021 15:14:23 +0100 Subject: [PATCH 050/351] Bump github.com/gogo/protobuf from 1.3.1 to 1.3.2 (#242) Bumps [github.com/gogo/protobuf](https://github.com/gogo/protobuf) from 1.3.1 to 1.3.2. - [Release notes](https://github.com/gogo/protobuf/releases) - [Commits](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 3 +-- go.sum | 23 ++++++++++++++++++----- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 9664168a20..da2df73d3f 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/fsnotify/fsnotify v1.4.9 github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible - github.com/gogo/protobuf v1.3.1 + github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 github.com/gomodule/redigo v1.8.4 github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d @@ -36,7 +36,6 @@ require ( github.com/spf13/viper v1.7.0 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 - golang.org/x/text v0.3.3 // indirect google.golang.org/grpc v1.37.1 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect diff --git a/go.sum b/go.sum index 923f4a545a..5582b5e57a 100644 --- a/go.sum +++ b/go.sum @@ -85,8 +85,8 @@ github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+Pugk github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -173,7 +173,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= @@ -284,6 +284,8 @@ github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+ github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -296,6 +298,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -316,6 +319,8 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -330,10 +335,12 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -343,6 +350,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -358,6 +367,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -369,7 +379,6 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -386,7 +395,11 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From a3652a5ba13812240af0017678cf64e7472cf1a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jul 2021 15:24:19 +0100 Subject: [PATCH 051/351] Bump github.com/spf13/viper from 1.7.0 to 1.8.1 (#274) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.7.0 to 1.8.1. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.7.0...v1.8.1) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 13 +-- go.sum | 355 +++++++++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 288 insertions(+), 80 deletions(-) diff --git a/go.mod b/go.mod index da2df73d3f..bdccb8892c 100644 --- a/go.mod +++ b/go.mod @@ -20,24 +20,17 @@ require ( github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/libhoney-go v1.12.4 github.com/jessevdk/go-flags v1.5.0 - github.com/json-iterator/go v1.1.6 + github.com/json-iterator/go v1.1.11 github.com/klauspost/compress v1.10.3 github.com/leodido/go-urn v1.2.0 // indirect - github.com/mitchellh/mapstructure v1.3.3 // indirect - github.com/pelletier/go-toml v1.8.0 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v0.9.4 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.2.0 - github.com/spf13/afero v1.3.2 // indirect - github.com/spf13/cast v1.3.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.7.0 + github.com/spf13/viper v1.8.1 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 - google.golang.org/grpc v1.37.1 + google.golang.org/grpc v1.38.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect - gopkg.in/ini.v1 v1.57.0 // indirect ) diff --git a/go.sum b/go.sum index 5582b5e57a..deb94c36c9 100644 --- a/go.sum +++ b/go.sum @@ -5,19 +5,43 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -28,25 +52,25 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= @@ -68,14 +92,14 @@ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691/go.mod h1:sKL github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= @@ -83,15 +107,22 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+ github.com/go-playground/validator v9.31.0+incompatible h1:UA72EPEogEnq76ehGdEDp4Mit+3FDh548oRqwVgNsHA= github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+PugkyDjY2bRrL/UBU4f3rvrgkN3V8JEig= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -105,7 +136,9 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/gomodule/redigo v1.8.4 h1:Z5JUg94HMTR1XpwBaSH4vq3+PNSIykBLxMdglbw10gg= @@ -116,12 +149,29 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -130,10 +180,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d h1:mksP7mUlZu0fpgMVMfDnaVvErqRL05HM3Kk+rBkZK54= github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= @@ -163,16 +209,18 @@ github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+D github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= github.com/honeycombio/libhoney-go v1.12.4 h1:rWAoxhpvu2briq85wZc04osHgKtueCLAk/3igqTX3+Q= github.com/honeycombio/libhoney-go v1.12.4/go.mod h1:tp2qtK0xMZyG/ZfykkebQESKFS78xpyPr2wEswZ1j6U= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= @@ -188,8 +236,8 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -202,18 +250,18 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= -github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -223,25 +271,19 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.4 h1:Y8E/JaaPbmFSW2V81Ab/d8yZFYQQGbni1b1jPcG9Y6A= github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -252,46 +294,49 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.3.2 h1:GDarE4TJQI52kYSbSAmLiId1Elfj+xgSDqrUZxFhxlU= -github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -306,6 +351,11 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -315,18 +365,27 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -335,28 +394,58 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -365,19 +454,50 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -395,9 +515,40 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -408,12 +559,31 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -423,18 +593,60 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.37.1 h1:ARnQJNWxGyYJpdf/JXscNlQr/uv607ZPU9Z7ogHi+iI= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -443,6 +655,7 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= @@ -456,22 +669,24 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= -gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= From fc28064bdd04b14dcb138f77e30a70ab1718a1fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jul 2021 15:32:45 +0100 Subject: [PATCH 052/351] Bump github.com/gomodule/redigo from 1.8.4 to 1.8.5 (#287) Bumps [github.com/gomodule/redigo](https://github.com/gomodule/redigo) from 1.8.4 to 1.8.5. - [Release notes](https://github.com/gomodule/redigo/releases) - [Commits](https://github.com/gomodule/redigo/compare/v1.8.4...v1.8.5) --- updated-dependencies: - dependency-name: github.com/gomodule/redigo dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bdccb8892c..5c3fc8801b 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/go-playground/validator v9.31.0+incompatible github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 - github.com/gomodule/redigo v1.8.4 + github.com/gomodule/redigo v1.8.5 github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/golang-lru v0.5.4 diff --git a/go.sum b/go.sum index deb94c36c9..6d60f6881d 100644 --- a/go.sum +++ b/go.sum @@ -141,8 +141,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/gomodule/redigo v1.8.4 h1:Z5JUg94HMTR1XpwBaSH4vq3+PNSIykBLxMdglbw10gg= -github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/gomodule/redigo v1.8.5 h1:nRAxCa+SVsyjSBrtZmG/cqb6VbTmuRzpg/PoTFlpumc= +github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= From 6484b38c3fcb34cd06fc3e6cab86fb81dc2327c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jul 2021 15:36:47 +0100 Subject: [PATCH 053/351] Bump google.golang.org/grpc from 1.37.1 to 1.39.0 (#288) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.37.1 to 1.39.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.37.1...v1.39.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 5c3fc8801b..9844d7c0e2 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/spf13/viper v1.8.1 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 - google.golang.org/grpc v1.38.0 + google.golang.org/grpc v1.39.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect ) diff --git a/go.sum b/go.sum index 6d60f6881d..288c8dd7cb 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -72,6 +73,7 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= @@ -334,6 +336,7 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= @@ -645,8 +648,9 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From d761e757c784fcca94fdf596ee739289452dd19f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jul 2021 15:41:31 +0100 Subject: [PATCH 054/351] Bump github.com/sirupsen/logrus from 1.2.0 to 1.8.1 (#290) Bumps [github.com/sirupsen/logrus](https://github.com/sirupsen/logrus) from 1.2.0 to 1.8.1. - [Release notes](https://github.com/sirupsen/logrus/releases) - [Changelog](https://github.com/sirupsen/logrus/blob/master/CHANGELOG.md) - [Commits](https://github.com/sirupsen/logrus/compare/v1.2.0...v1.8.1) --- updated-dependencies: - dependency-name: github.com/sirupsen/logrus dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 9844d7c0e2..08372cd97b 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v0.9.4 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 - github.com/sirupsen/logrus v1.2.0 + github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.8.1 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 diff --git a/go.sum b/go.sum index 288c8dd7cb..4806381556 100644 --- a/go.sum +++ b/go.sum @@ -227,7 +227,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -290,8 +289,9 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -347,7 +347,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -460,6 +459,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From a4794cc3c53c97686b6bb55d1eb1b70bf8a7541f Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 16 Jul 2021 16:55:46 +0100 Subject: [PATCH 055/351] Prepare v1.4.0 release (#291) --- CHANGELOG.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 305965c4a0..db87c97df0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Refinery Changelog +## 1.4.0 + +### Added + +- Add support for OTLP over HTTP/protobuf [#279](https://github.com/honeycombio/refinery/pull/279) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) + +### Maintenance + +- Bump github.com/sirupsen/logrus from 1.2.0 to 1.8.1 (#290) +- Bump google.golang.org/grpc from 1.37.1 to 1.39.0 (#288) +- Bump github.com/gomodule/redigo from 1.8.4 to 1.8.5 (#287) +- Bump github.com/spf13/viper from 1.7.0 to 1.8.1 (#274) +- Bump github.com/gogo/protobuf from 1.3.1 to 1.3.2 (#242) +- Bump github.com/golang/protobuf from 1.4.3 to 1.5.2 (#252) +- Bump github.com/grpc-ecosystem/grpc-gateway from 1.12.1 to 1.16.0 (#233) + ## 1.3.0 ### Added From 170c6b62caded8e835baa7f19927eba98c3ccea7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Aug 2021 16:01:12 +0100 Subject: [PATCH 056/351] Bump github.com/klauspost/compress from 1.10.3 to 1.13.2 (#297) Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.10.3 to 1.13.2. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.10.3...v1.13.2) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 08372cd97b..570759a321 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/honeycombio/libhoney-go v1.12.4 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.11 - github.com/klauspost/compress v1.10.3 + github.com/klauspost/compress v1.13.2 github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v0.9.4 diff --git a/go.sum b/go.sum index 4806381556..fd6faaeba9 100644 --- a/go.sum +++ b/go.sum @@ -143,6 +143,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.5 h1:nRAxCa+SVsyjSBrtZmG/cqb6VbTmuRzpg/PoTFlpumc= github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -225,8 +227,9 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.2 h1:YecVYiuZPySnUyT8Ar9d5VawPwybeQw9IcwsLfdfdEk= +github.com/klauspost/compress v1.13.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= From 92d047995a8b762de1a1ceee69841af02ab86cd8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Aug 2021 16:43:48 +0100 Subject: [PATCH 057/351] Bump github.com/honeycombio/libhoney-go from 1.12.4 to 1.15.4 (#295) Bumps [github.com/honeycombio/libhoney-go](https://github.com/honeycombio/libhoney-go) from 1.12.4 to 1.15.4. - [Release notes](https://github.com/honeycombio/libhoney-go/releases) - [Changelog](https://github.com/honeycombio/libhoney-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/honeycombio/libhoney-go/compare/v1.12.4...v1.15.4) --- updated-dependencies: - dependency-name: github.com/honeycombio/libhoney-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 3 +-- go.sum | 15 +++++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 570759a321..7c2758b36d 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/honeycombio/refinery go 1.14 require ( - github.com/DataDog/zstd v1.4.5 // indirect github.com/davecgh/go-spew v1.1.1 github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d @@ -18,7 +17,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/libhoney-go v1.12.4 + github.com/honeycombio/libhoney-go v1.15.4 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.11 github.com/klauspost/compress v1.13.2 diff --git a/go.sum b/go.sum index fd6faaeba9..49f64b8784 100644 --- a/go.sum +++ b/go.sum @@ -39,9 +39,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= -github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.8 h1:Rpmta4xZ/MgZnriKNd24iZMhGpP5dvUcs/uqfBapKZY= +github.com/DataDog/zstd v1.4.8/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -211,8 +210,8 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/libhoney-go v1.12.4 h1:rWAoxhpvu2briq85wZc04osHgKtueCLAk/3igqTX3+Q= -github.com/honeycombio/libhoney-go v1.12.4/go.mod h1:tp2qtK0xMZyG/ZfykkebQESKFS78xpyPr2wEswZ1j6U= +github.com/honeycombio/libhoney-go v1.15.4 h1:D6UftkvQC9ZnPXK00wET9Le8zxdc+vPeGlqCpWWHS5Y= +github.com/honeycombio/libhoney-go v1.15.4/go.mod h1:heFH+SMgmpF2m3aHwnQqUS5feImLHnzP6RaIvFkWsKU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= @@ -227,7 +226,7 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.2 h1:YecVYiuZPySnUyT8Ar9d5VawPwybeQw9IcwsLfdfdEk= github.com/klauspost/compress v1.13.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -322,8 +321,12 @@ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/msgpack/v5 v5.2.0 h1:ZhIAtVUP1mme8GIlpiAnmTzjSWMexA/uNF2We85DR0w= +github.com/vmihailenco/msgpack/v5 v5.2.0/go.mod h1:fEM7KuHcnm0GvDCztRpw9hV0PuoO2ciTismP6vjggcM= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= From 79457538e845d2fdb304f7dc0e13682316ddd414 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Thu, 5 Aug 2021 15:32:27 -0600 Subject: [PATCH 058/351] Add span.kind when ingesting OTLP (#299) --- route/otlp_trace.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/route/otlp_trace.go b/route/otlp_trace.go index dca390e10a..11bd45d032 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -102,10 +102,12 @@ func processTraceRequest( spanID := hex.EncodeToString(span.SpanId) timestamp := time.Unix(0, int64(span.StartTimeUnixNano)).UTC() + spanKind := getSpanKind(span.Kind) eventAttrs := map[string]interface{}{ "trace.trace_id": traceID, "trace.span_id": spanID, - "type": getSpanKind(span.Kind), + "type": spanKind, + "span.kind": spanKind, "name": span.Name, "duration_ms": float64(span.EndTimeUnixNano-span.StartTimeUnixNano) / float64(time.Millisecond), "status_code": int32(getSpanStatusCode(span.Status)), From c38ca1951e8c54944f19df8927e60a6d891e09bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Aug 2021 14:56:52 +0100 Subject: [PATCH 059/351] Bump github.com/klauspost/compress from 1.13.2 to 1.13.3 (#301) Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.13.2 to 1.13.3. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.13.2...v1.13.3) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7c2758b36d..d5544cba5e 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/honeycombio/libhoney-go v1.15.4 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.11 - github.com/klauspost/compress v1.13.2 + github.com/klauspost/compress v1.13.3 github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v0.9.4 diff --git a/go.sum b/go.sum index 49f64b8784..775ba043f0 100644 --- a/go.sum +++ b/go.sum @@ -227,8 +227,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.2 h1:YecVYiuZPySnUyT8Ar9d5VawPwybeQw9IcwsLfdfdEk= -github.com/klauspost/compress v1.13.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.3 h1:BtAvtV1+h0YwSVwWoYXMREPpYu9VzTJ9QDI1TEg/iQQ= +github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= From e5f95530c437b125c5efde3363ff5aac09b14eee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Aug 2021 14:57:13 +0100 Subject: [PATCH 060/351] Bump google.golang.org/grpc from 1.39.0 to 1.39.1 (#300) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.39.0 to 1.39.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.39.0...v1.39.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d5544cba5e..bd0e7cd05e 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/spf13/viper v1.8.1 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 - google.golang.org/grpc v1.39.0 + google.golang.org/grpc v1.39.1 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect ) diff --git a/go.sum b/go.sum index 775ba043f0..44d464c771 100644 --- a/go.sum +++ b/go.sum @@ -655,8 +655,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1 h1:f37vZbBVTiJ6jKG5mWz8ySOBxNqy6ViPgyhSdVnxF3E= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From ee5cb7382600dbd24763862d527e979da5dce7dd Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Mon, 9 Aug 2021 15:49:03 +0100 Subject: [PATCH 061/351] prepare v1.3.1 release (#302) --- CHANGELOG.md | 13 +++++++++++++ RELEASE.md | 7 +++++++ 2 files changed, 20 insertions(+) create mode 100644 RELEASE.md diff --git a/CHANGELOG.md b/CHANGELOG.md index db87c97df0..2294c1db72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Refinery Changelog +## 1.4.1 + +### Fixes + +- Add span.kind when ingesting OTLP (#299) + +### Maintenance + +- Bump google.golang.org/grpc from 1.39.0 to 1.39.1 (#300) +- Bump github.com/klauspost/compress from 1.13.2 to 1.13.3 (#301) +- Bump github.com/honeycombio/libhoney-go from 1.12.4 to 1.15.4 (#295) +- Bump github.com/klauspost/compress from 1.10.3 to 1.13.2 (#297) + ## 1.4.0 ### Added diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..2e245e277d --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,7 @@ +# Release Process + +1. Add release entry to [changelog](./CHANGELOG.md) +3. Open a PR with the above, and merge that into main +4. Create new tag on merged commit with the new version (e.g. `v1.4.1`) +5. Push the tag upstream (this will kick off the release pipeline in CI) +6. Copy change log entry for newest version into draft GitHub release created as part of CI publish steps From 5b51929764ed82a9d8d38c1190eae0b381518d58 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Wed, 11 Aug 2021 14:56:46 -0600 Subject: [PATCH 062/351] Add community health files (#303) --- CODE_OF_CONDUCT.md | 5 +++++ CONTRIBUTING.md | 3 +++ SECURITY.md | 3 +++ SUPPORT.md | 3 +++ 4 files changed, 14 insertions(+) create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 SECURITY.md create mode 100644 SUPPORT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..bf46524ab7 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +This project has adopted the Honeycomb User Community Code of Conduct to clarify expected behavior in our community. + +https://www.honeycomb.io/honeycomb-user-community-code-of-conduct/ \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..0cc0b86141 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,3 @@ +# Contributing Guide + +Please see our [general guide for OSS lifecycle and practices.](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..c0ce73b5ca --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,3 @@ +# Reporting Security Issues + +If you discover a security vulnerability, please open an issue with label `type: security`. diff --git a/SUPPORT.md b/SUPPORT.md new file mode 100644 index 0000000000..9164e0642d --- /dev/null +++ b/SUPPORT.md @@ -0,0 +1,3 @@ +# How to Get Help + +This project uses GitHub issues to track bugs, feature requests, and questions about using the project. Please search for existing issues before filing a new one. From ac845e3617e74400f6c3ec9a2b2b011dd27221ae Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Wed, 11 Aug 2021 19:19:20 -0600 Subject: [PATCH 063/351] Add OSS lifecycle badge (#304) --- OSSMETADATA | 1 + README.md | 1 + 2 files changed, 2 insertions(+) create mode 100755 OSSMETADATA diff --git a/OSSMETADATA b/OSSMETADATA new file mode 100755 index 0000000000..8bff0a1d31 --- /dev/null +++ b/OSSMETADATA @@ -0,0 +1 @@ +osslifecycle=maintenance diff --git a/README.md b/README.md index 394ec9de6c..69628ed5bb 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ ![refinery](https://user-images.githubusercontent.com/6510988/94976958-8cadba80-04cb-11eb-9883-6e8ea554a081.png) +[![OSS Lifecycle](https://img.shields.io/osslifecycle/honeycombio/refinery)](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) [![Build Status](https://circleci.com/gh/honeycombio/refinery.svg?style=shield)](https://circleci.com/gh/honeycombio/refinery) ## Purpose From 451bc8b1988097a1e55d7360863630562fc36623 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Mon, 16 Aug 2021 17:34:30 -0600 Subject: [PATCH 064/351] Add issue and PR templates (#307) --- .github/ISSUE_TEMPLATE/bug_report.md | 28 +++++++++++++++++++ .github/ISSUE_TEMPLATE/feature_request.md | 25 +++++++++++++++++ .github/ISSUE_TEMPLATE/question-discussion.md | 14 ++++++++++ .../security-vulnerability-report.md | 22 +++++++++++++++ .github/PULL_REQUEST_TEMPLATE.md | 20 +++++++++++++ 5 files changed, 109 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/question-discussion.md create mode 100644 .github/ISSUE_TEMPLATE/security-vulnerability-report.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..9cd88bd5fe --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,28 @@ +--- +name: Bug report +about: Let us know if something is not working as expected +title: '' +labels: 'type: bug' +assignees: '' + +--- + + + +**Versions** + +- Go: +- Refinery: + + +**Steps to reproduce** + +1. + +**Additional context** diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..457405f3e6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,25 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: 'type: enhancement' +assignees: '' + +--- + + + +**Is your feature request related to a problem? Please describe.** + + +**Describe the solution you'd like** + + +**Describe alternatives you've considered** + + +**Additional context** diff --git a/.github/ISSUE_TEMPLATE/question-discussion.md b/.github/ISSUE_TEMPLATE/question-discussion.md new file mode 100644 index 0000000000..63cc4a175c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question-discussion.md @@ -0,0 +1,14 @@ +--- +name: Question/Discussion +about: General question about how things work or a discussion +title: '' +labels: 'type: discussion' +assignees: '' + +--- + + diff --git a/.github/ISSUE_TEMPLATE/security-vulnerability-report.md b/.github/ISSUE_TEMPLATE/security-vulnerability-report.md new file mode 100644 index 0000000000..41337972a3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/security-vulnerability-report.md @@ -0,0 +1,22 @@ +--- +name: Security vulnerability report +about: Let us know if you discover a security vulnerability +title: '' +labels: 'type: security' +assignees: '' + +--- + + +**Versions** + +- Go: +- Refinery: + +**Description** + +(Please include any relevant CVE advisory links) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..fd8fd3b3f1 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,20 @@ + + +## Which problem is this PR solving? + +- + +## Short description of the changes + +- + From 8fd092b84a4888db35fd8a2f6c520fdef54bdccb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Aug 2021 20:33:55 -0600 Subject: [PATCH 065/351] Bump github.com/klauspost/compress from 1.13.3 to 1.13.4 (#306) Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.13.3 to 1.13.4. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.13.3...v1.13.4) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bd0e7cd05e..feec64e4af 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/honeycombio/libhoney-go v1.15.4 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.11 - github.com/klauspost/compress v1.13.3 + github.com/klauspost/compress v1.13.4 github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v0.9.4 diff --git a/go.sum b/go.sum index 44d464c771..0c4ef824ab 100644 --- a/go.sum +++ b/go.sum @@ -227,8 +227,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.3 h1:BtAvtV1+h0YwSVwWoYXMREPpYu9VzTJ9QDI1TEg/iQQ= -github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= From 2e787744e98dff492f1ae347dbb4e795f27a33e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Aug 2021 13:58:40 -0600 Subject: [PATCH 066/351] Bump github.com/fsnotify/fsnotify from 1.4.9 to 1.5.0 (#308) Bumps [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify) from 1.4.9 to 1.5.0. - [Release notes](https://github.com/fsnotify/fsnotify/releases) - [Changelog](https://github.com/fsnotify/fsnotify/blob/master/CHANGELOG.md) - [Commits](https://github.com/fsnotify/fsnotify/compare/v1.4.9...v1.5.0) --- updated-dependencies: - dependency-name: github.com/fsnotify/fsnotify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index feec64e4af..4bfd077dd1 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect - github.com/fsnotify/fsnotify v1.4.9 + github.com/fsnotify/fsnotify v1.5.0 github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible github.com/gogo/protobuf v1.3.2 diff --git a/go.sum b/go.sum index 0c4ef824ab..fc96a2bfbb 100644 --- a/go.sum +++ b/go.sum @@ -93,8 +93,9 @@ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691/go.mod h1:sKL github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.0 h1:NO5hkcB+srp1x6QmwvNZLeaOgbM8cmBTN32THzjvu2k= +github.com/fsnotify/fsnotify v1.5.0/go.mod h1:BX0DCEr5pT4jm2CnQdVP1lFV521fcCNcyEeNp4DQQDk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -493,8 +494,9 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 615021aa669590e882d73336502e64b99279e5df Mon Sep 17 00:00:00 2001 From: Phillip Carter Date: Wed, 8 Sep 2021 13:59:18 -0700 Subject: [PATCH 067/351] Add NOTICE (#314) --- NOTICE | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 NOTICE diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000000..9b25a98729 --- /dev/null +++ b/NOTICE @@ -0,0 +1,13 @@ +Copyright (c) 2016-Present Honeycomb, Hound Technology, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. From 310fa1b4c357774952a5bc8eb880d2a4766c3585 Mon Sep 17 00:00:00 2001 From: Phillip Carter Date: Fri, 10 Sep 2021 13:31:24 -0700 Subject: [PATCH 068/351] Switch licecycle terminology to maintained (#315) --- OSSMETADATA | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/OSSMETADATA b/OSSMETADATA index 8bff0a1d31..58d43b81d1 100755 --- a/OSSMETADATA +++ b/OSSMETADATA @@ -1 +1 @@ -osslifecycle=maintenance +osslifecycle=maintained diff --git a/README.md b/README.md index 69628ed5bb..7385f2a82f 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ![refinery](https://user-images.githubusercontent.com/6510988/94976958-8cadba80-04cb-11eb-9883-6e8ea554a081.png) -[![OSS Lifecycle](https://img.shields.io/osslifecycle/honeycombio/refinery)](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) +[![OSS Lifecycle](https://img.shields.io/osslifecycle/honeycombio/refinery?color=success)](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) [![Build Status](https://circleci.com/gh/honeycombio/refinery.svg?style=shield)](https://circleci.com/gh/honeycombio/refinery) ## Purpose From dfbb7c7081496f260acc4fdaf31d4be2469568d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 16:48:10 -0600 Subject: [PATCH 069/351] Bump google.golang.org/grpc from 1.39.1 to 1.40.0 (#305) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.39.1 to 1.40.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.39.1...v1.40.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4bfd077dd1..b824cd5ea2 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/spf13/viper v1.8.1 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 - google.golang.org/grpc v1.39.1 + google.golang.org/grpc v1.40.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect ) diff --git a/go.sum b/go.sum index fc96a2bfbb..d84fc0dde2 100644 --- a/go.sum +++ b/go.sum @@ -41,6 +41,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.8 h1:Rpmta4xZ/MgZnriKNd24iZMhGpP5dvUcs/uqfBapKZY= github.com/DataDog/zstd v1.4.8/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -53,6 +54,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -299,6 +301,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= @@ -657,8 +660,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.1 h1:f37vZbBVTiJ6jKG5mWz8ySOBxNqy6ViPgyhSdVnxF3E= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 5d1b76ebe5619848c9e473dc21ec3a91f0590aef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 18:03:14 -0600 Subject: [PATCH 070/351] Bump github.com/fsnotify/fsnotify from 1.5.0 to 1.5.1 (#311) Bumps [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify) from 1.5.0 to 1.5.1. - [Release notes](https://github.com/fsnotify/fsnotify/releases) - [Changelog](https://github.com/fsnotify/fsnotify/blob/master/CHANGELOG.md) - [Commits](https://github.com/fsnotify/fsnotify/compare/v1.5.0...v1.5.1) --- updated-dependencies: - dependency-name: github.com/fsnotify/fsnotify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b824cd5ea2..a0f882cfcd 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect - github.com/fsnotify/fsnotify v1.5.0 + github.com/fsnotify/fsnotify v1.5.1 github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible github.com/gogo/protobuf v1.3.2 diff --git a/go.sum b/go.sum index d84fc0dde2..1820c769bd 100644 --- a/go.sum +++ b/go.sum @@ -96,8 +96,8 @@ github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpm github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.0 h1:NO5hkcB+srp1x6QmwvNZLeaOgbM8cmBTN32THzjvu2k= -github.com/fsnotify/fsnotify v1.5.0/go.mod h1:BX0DCEr5pT4jm2CnQdVP1lFV521fcCNcyEeNp4DQQDk= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= From a882138a977e02561d06839a9435273bbf5cd881 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 18:11:16 -0600 Subject: [PATCH 071/351] Bump github.com/klauspost/compress from 1.13.4 to 1.13.6 (#319) Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.13.4 to 1.13.6. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.13.4...v1.13.6) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index a0f882cfcd..e86745d564 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/honeycombio/libhoney-go v1.15.4 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.11 - github.com/klauspost/compress v1.13.4 + github.com/klauspost/compress v1.13.6 github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v0.9.4 diff --git a/go.sum b/go.sum index 1820c769bd..f31343cea3 100644 --- a/go.sum +++ b/go.sum @@ -145,7 +145,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.5 h1:nRAxCa+SVsyjSBrtZmG/cqb6VbTmuRzpg/PoTFlpumc= github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= @@ -230,8 +229,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= From 7f9af0e3a58df4164340f1cb7195359dbc86e3d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 19:07:12 -0600 Subject: [PATCH 072/351] Bump github.com/json-iterator/go from 1.1.11 to 1.1.12 (#316) Bumps [github.com/json-iterator/go](https://github.com/json-iterator/go) from 1.1.11 to 1.1.12. - [Release notes](https://github.com/json-iterator/go/releases) - [Commits](https://github.com/json-iterator/go/compare/v1.1.11...v1.1.12) --- updated-dependencies: - dependency-name: github.com/json-iterator/go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e86745d564..d8da485d1d 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/libhoney-go v1.15.4 github.com/jessevdk/go-flags v1.5.0 - github.com/json-iterator/go v1.1.11 + github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.13.6 github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index f31343cea3..2d60f220fb 100644 --- a/go.sum +++ b/go.sum @@ -219,8 +219,9 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -261,8 +262,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= From 9e568d4000ef38655aeb7df47a2a89f8edd73f1a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 19:07:41 -0600 Subject: [PATCH 073/351] Bump github.com/spf13/viper from 1.8.1 to 1.9.0 (#320) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.8.1 to 1.9.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.8.1...v1.9.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 150 +++++++++++++++++++++++++++++++++++++++------------------ 2 files changed, 105 insertions(+), 47 deletions(-) diff --git a/go.mod b/go.mod index d8da485d1d..37809dcc1a 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/prometheus/client_golang v0.9.4 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 - github.com/spf13/viper v1.8.1 + github.com/spf13/viper v1.9.0 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 google.golang.org/grpc v1.40.0 diff --git a/go.sum b/go.sum index 2d60f220fb..dd84af5811 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,11 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -26,7 +31,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -48,11 +53,11 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -95,7 +100,7 @@ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691/go.mod h1:sKL github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -127,6 +132,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -160,12 +166,14 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -177,29 +185,32 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d h1:mksP7mUlZu0fpgMVMfDnaVvErqRL05HM3Kk+rBkZK54= github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -207,9 +218,9 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= github.com/honeycombio/libhoney-go v1.15.4 h1:D6UftkvQC9ZnPXK00wET9Le8zxdc+vPeGlqCpWWHS5Y= @@ -224,8 +235,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -235,8 +244,9 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -245,19 +255,24 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -267,8 +282,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -277,6 +292,7 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.4 h1:Y8E/JaaPbmFSW2V81Ab/d8yZFYQQGbni1b1jPcG9Y6A= github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= @@ -294,25 +310,22 @@ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqn github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= +github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -357,8 +370,10 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -398,7 +413,6 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -409,6 +423,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -430,8 +445,9 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -443,7 +459,10 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -461,6 +480,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -468,13 +488,17 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -498,9 +522,16 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -508,8 +539,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -519,7 +551,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -527,9 +558,9 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -563,7 +594,11 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -590,7 +625,12 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -639,8 +679,19 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -660,9 +711,14 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -674,19 +730,21 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= +gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From 8fa68d5c20411a61554443231d4a000c60985f6a Mon Sep 17 00:00:00 2001 From: JamieDanielson Date: Fri, 24 Sep 2021 16:48:26 -0400 Subject: [PATCH 074/351] Adds Stalebot (#321) --- .github/workflows/stale.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000..d4a449e250 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,26 @@ +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '30 1 * * *' + +jobs: + stale: + name: 'Close stale issues and PRs' + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v4 + with: + start-date: '2021-09-01T00:00:00Z' + stale-issue-message: 'Marking this issue as stale because it has been open 14 days with no activity. Please add a comment if this is still an ongoing issue; otherwise this issue will be automatically closed in 7 days.' + stale-pr-message: 'Marking this PR as stale because it has been open 30 days with no activity. Please add a comment if this PR is still relevant; otherwise this PR will be automatically closed in 7 days.' + close-issue-message: 'Closing this issue due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' + close-pr-message: 'Closing this PR due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' + days-before-issue-stale: 14 + days-before-pr-stale: 30 + days-before-issue-close: 7 + days-before-pr-close: 7 + any-of-labels: 'status: info needed,status: revision needed' From 11d10bb32f6274cc65f149de104b3c926c559c7f Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Wed, 29 Sep 2021 07:39:42 -0400 Subject: [PATCH 075/351] Add dynamic sampler support to rules based samplers (#317) The RulesBased sample is exclusive to the other samplers. This PR allows you to use Dynamic and EMADynamic samplers to determine the sample rate of a RulesBased sampler rule. Co-authored-by: Mike Goldsmth --- config/config_test.go | 12 +-- config/file_config.go | 29 ------- config/sampler_config.go | 40 ++++++++++ rules_complete.toml | 39 ++++++--- sample/rules.go | 75 ++++++++++++------ sample/rules_test.go | 166 +++++++++++++++++++++++++++++++++++++++ sample/sample.go | 27 +++---- 7 files changed, 302 insertions(+), 86 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 8a2ac87b32..f2c0c6f825 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -206,20 +206,20 @@ func TestReadRulesConfig(t *testing.T) { assert.NoError(t, err) switch r := d.(type) { case *RulesBasedSamplerConfig: - assert.Len(t, r.Rule, 3) + assert.Len(t, r.Rule, 4) var rule *RulesBasedSamplerRule rule = r.Rule[0] - assert.Equal(t, 1, rule.SampleRate) - assert.Equal(t, "500 errors", rule.Name) - assert.Len(t, rule.Condition, 2) - - rule = r.Rule[1] assert.True(t, rule.Drop) assert.Equal(t, 0, rule.SampleRate) assert.Len(t, rule.Condition, 1) + rule = r.Rule[1] + assert.Equal(t, 1, rule.SampleRate) + assert.Equal(t, "500 errors or slow", rule.Name) + assert.Len(t, rule.Condition, 2) + default: assert.Fail(t, "dataset4 should have a rules based sampler", d) } diff --git a/config/file_config.go b/config/file_config.go index 536ac5a997..587e356b51 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -24,35 +24,6 @@ type fileConfig struct { mux sync.RWMutex } -type RulesBasedSamplerCondition struct { - Field string - Operator string - Value interface{} -} - -func (r *RulesBasedSamplerCondition) String() string { - return fmt.Sprintf("%+v", *r) -} - -type RulesBasedSamplerRule struct { - Name string - SampleRate int - Drop bool - Condition []*RulesBasedSamplerCondition -} - -func (r *RulesBasedSamplerRule) String() string { - return fmt.Sprintf("%+v", *r) -} - -type RulesBasedSamplerConfig struct { - Rule []*RulesBasedSamplerRule -} - -func (r *RulesBasedSamplerConfig) String() string { - return fmt.Sprintf("%+v", *r) -} - type configContents struct { ListenAddr string `validate:"required"` PeerListenAddr string `validate:"required"` diff --git a/config/sampler_config.go b/config/sampler_config.go index 84c3bc299a..4812ccaba7 100644 --- a/config/sampler_config.go +++ b/config/sampler_config.go @@ -1,5 +1,9 @@ package config +import ( + "fmt" +) + type DeterministicSamplerConfig struct { SampleRate int `validate:"required,gte=1"` } @@ -36,3 +40,39 @@ type TotalThroughputSamplerConfig struct { AddSampleRateKeyToTrace bool AddSampleRateKeyToTraceField string `validate:"required_with=AddSampleRateKeyToTrace"` } + +type RulesBasedSamplerCondition struct { + Field string + Operator string + Value interface{} +} + +func (r *RulesBasedSamplerCondition) String() string { + return fmt.Sprintf("%+v", *r) +} + +type RulesBasedDownstreamSampler struct { + DynamicSampler *DynamicSamplerConfig + EMADynamicSampler *EMADynamicSamplerConfig + TotalThroughputSampler *TotalThroughputSamplerConfig +} + +type RulesBasedSamplerRule struct { + Name string + SampleRate int + Sampler *RulesBasedDownstreamSampler + Drop bool + Condition []*RulesBasedSamplerCondition +} + +func (r *RulesBasedSamplerRule) String() string { + return fmt.Sprintf("%+v", *r) +} + +type RulesBasedSamplerConfig struct { + Rule []*RulesBasedSamplerRule +} + +func (r *RulesBasedSamplerConfig) String() string { + return fmt.Sprintf("%+v", *r) +} diff --git a/rules_complete.toml b/rules_complete.toml index aabb5f7e06..acb738e850 100644 --- a/rules_complete.toml +++ b/rules_complete.toml @@ -208,27 +208,40 @@ SampleRate = 1 Sampler = "RulesBasedSampler" [[dataset4.rule]] - name = "500 errors" + name = "drop healtchecks" + drop = true + [[dataset4.rule.condition]] + field = "http.route" + operator = "=" + value = "/health-check" + + [[dataset4.rule]] + name = "500 errors or slow" SampleRate = 1 [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 500 + field = "status_code" + operator = "=" + value = 500 [[dataset4.rule.condition]] - field = "duration_ms" - operator = ">=" - value = 1000.789 + field = "duration_ms" + operator = ">=" + value = 1000.789 [[dataset4.rule]] - name = "drop 200 responses" - drop = true + name = "dynamic sample 200 responses" [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 200 + field = "status_code" + operator = "=" + value = 200 + [dataset4.rule.sampler.EMADynamicSampler] + Sampler = "EMADynamicSampler" + GoalSampleRate = 15 + FieldList = ["request.method", "request.route"] + AddSampleRateKeyToTrace = true + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" [[dataset4.rule]] - SampleRate = 10 # default when no rules match, if missing defaults to 1 + SampleRate = 10 # default when no rules match, if missing defaults to 10 [dataset5] diff --git a/sample/rules.go b/sample/rules.go index 4f035c3c4a..3a56d24040 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -11,9 +11,10 @@ import ( ) type RulesBasedSampler struct { - Config *config.RulesBasedSamplerConfig - Logger logger.Logger - Metrics metrics.Metrics + Config *config.RulesBasedSamplerConfig + Logger logger.Logger + Metrics metrics.Metrics + samplers map[string]Sampler } func (s *RulesBasedSampler) Start() error { @@ -24,6 +25,35 @@ func (s *RulesBasedSampler) Start() error { s.Metrics.Register("rulessampler_num_kept", "counter") s.Metrics.Register("rulessampler_sample_rate", "histogram") + s.samplers = make(map[string]Sampler) + + // Check if any rule has a downstream sampler and create it + for _, rule := range s.Config.Rule { + if rule.Sampler != nil { + var sampler Sampler + if rule.Sampler.DynamicSampler != nil { + sampler = &DynamicSampler{Config: rule.Sampler.DynamicSampler, Logger: s.Logger, Metrics: s.Metrics} + } else if rule.Sampler.EMADynamicSampler != nil { + sampler = &EMADynamicSampler{Config: rule.Sampler.EMADynamicSampler, Logger: s.Logger, Metrics: s.Metrics} + } else if rule.Sampler.TotalThroughputSampler != nil { + sampler = &TotalThroughputSampler{Config: rule.Sampler.TotalThroughputSampler, Logger: s.Logger, Metrics: s.Metrics} + } else { + s.Logger.Debug().WithFields(map[string]interface{}{ + "rule_name": rule.Name, + }).Logf("invalid or missing downstream sampler") + continue + } + + err := sampler.Start() + if err != nil { + s.Logger.Debug().WithFields(map[string]interface{}{ + "rule_name": rule.Name, + }).Logf("error creating downstream sampler: %s", err) + continue + } + s.samplers[rule.String()] = sampler + } + } return nil } @@ -34,24 +64,6 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b for _, rule := range s.Config.Rule { var matched int - rate := uint(rule.SampleRate) - keep := !rule.Drop && rule.SampleRate > 0 && rand.Intn(rule.SampleRate) == 0 - - // no condition signifies the default - if rule.Condition == nil { - s.Metrics.Histogram("rulessampler_sample_rate", float64(rule.SampleRate)) - if keep { - s.Metrics.Increment("rulessampler_num_kept") - } else { - s.Metrics.Increment("rulessampler_num_dropped") - } - logger.WithFields(map[string]interface{}{ - "rate": rate, - "keep": keep, - "drop_rule": rule.Drop, - }).Logf("got sample rate and decision") - return rate, keep - } for _, condition := range rule.Condition { span: @@ -127,7 +139,25 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b } } - if matched == len(rule.Condition) { + if rule.Condition == nil || matched == len(rule.Condition) { + var rate uint + var keep bool + + if rule.Sampler != nil { + var sampler Sampler + var found bool + if sampler, found = s.samplers[rule.String()]; !found { + logger.WithFields(map[string]interface{}{ + "rule_name": rule.Name, + }).Logf("could not find downstream sampler for rule: %s", rule.Name) + return 1, true + } + rate, keep = sampler.GetSampleRate(trace) + } else { + rate = uint(rule.SampleRate) + keep = !rule.Drop && rule.SampleRate > 0 && rand.Intn(rule.SampleRate) == 0 + } + s.Metrics.Histogram("rulessampler_sample_rate", float64(rule.SampleRate)) if keep { s.Metrics.Increment("rulessampler_num_kept") @@ -138,7 +168,6 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b "rate": rate, "keep": keep, "drop_rule": rule.Drop, - "rule_name": rule.Name, }).Logf("got sample rate and decision") return rate, keep } diff --git a/sample/rules_test.go b/sample/rules_test.go index 2965c70285..6bcb5b747d 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -520,3 +520,169 @@ func TestRules(t *testing.T) { } } } + +func TestRulesWithDynamicSampler(t *testing.T) { + data := []TestRulesData{ + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "downstream-dynamic", + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "rule_test", + Operator: "=", + Value: int64(1), + }, + }, + Sampler: &config.RulesBasedDownstreamSampler{ + DynamicSampler: &config.DynamicSamplerConfig{ + SampleRate: 10, + FieldList: []string{"http.status_code"}, + AddSampleRateKeyToTrace: true, + AddSampleRateKeyToTraceField: "meta.key", + }, + }, + }, + }, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "rule_test": int64(1), + "http.status_code": "200", + }, + }, + }, + { + Event: types.Event{ + Data: map[string]interface{}{ + "rule_test": int64(1), + "http.status_code": "200", + }, + }, + }, + }, + ExpectedKeep: true, + ExpectedRate: 10, + }, + } + + for _, d := range data { + sampler := &RulesBasedSampler{ + Config: d.Rules, + Logger: &logger.NullLogger{}, + Metrics: &metrics.NullMetrics{}, + } + + trace := &types.Trace{} + + for _, span := range d.Spans { + trace.AddSpan(span) + } + + sampler.Start() + rate, keep := sampler.GetSampleRate(trace) + + assert.Equal(t, d.ExpectedRate, rate, d.Rules) + + // we can only test when we don't expect to keep the trace + if !d.ExpectedKeep { + assert.Equal(t, d.ExpectedKeep, keep, d.Rules) + } + + spans := trace.GetSpans() + assert.Len(t, spans, len(d.Spans), "should have the same number of spans as input") + for _, span := range spans { + assert.Equal(t, span.Event.Data, map[string]interface{}{ + "rule_test": int64(1), + "http.status_code": "200", + "meta.key": "200•,", + }, "should add the sampling key to all spans in the trace") + } + } +} + +func TestRulesWithEMADynamicSampler(t *testing.T) { + data := []TestRulesData{ + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "downstream-dynamic", + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "rule_test", + Operator: "=", + Value: int64(1), + }, + }, + Sampler: &config.RulesBasedDownstreamSampler{ + EMADynamicSampler: &config.EMADynamicSamplerConfig{ + GoalSampleRate: 10, + FieldList: []string{"http.status_code"}, + AddSampleRateKeyToTrace: true, + AddSampleRateKeyToTraceField: "meta.key", + }, + }, + }, + }, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "rule_test": int64(1), + "http.status_code": "200", + }, + }, + }, + { + Event: types.Event{ + Data: map[string]interface{}{ + "rule_test": int64(1), + "http.status_code": "200", + }, + }, + }, + }, + ExpectedKeep: true, + ExpectedRate: 10, + }, + } + + for _, d := range data { + sampler := &RulesBasedSampler{ + Config: d.Rules, + Logger: &logger.NullLogger{}, + Metrics: &metrics.NullMetrics{}, + } + + trace := &types.Trace{} + + for _, span := range d.Spans { + trace.AddSpan(span) + } + + sampler.Start() + rate, keep := sampler.GetSampleRate(trace) + + assert.Equal(t, d.ExpectedRate, rate, d.Rules) + + // we can only test when we don't expect to keep the trace + if !d.ExpectedKeep { + assert.Equal(t, d.ExpectedKeep, keep, d.Rules) + } + + spans := trace.GetSpans() + assert.Len(t, spans, len(d.Spans), "should have the same number of spans as input") + for _, span := range spans { + assert.Equal(t, span.Event.Data, map[string]interface{}{ + "rule_test": int64(1), + "http.status_code": "200", + "meta.key": "200•,", + }, "should add the sampling key to all spans in the trace") + } + } +} diff --git a/sample/sample.go b/sample/sample.go index 9fb4539cfb..eef4337ca5 100644 --- a/sample/sample.go +++ b/sample/sample.go @@ -11,6 +11,7 @@ import ( type Sampler interface { GetSampleRate(trace *types.Trace) (rate uint, keep bool) + Start() error } // SamplerFactory is used to create new samplers with common (injected) resources @@ -32,30 +33,26 @@ func (s *SamplerFactory) GetSamplerImplementationForDataset(dataset string) Samp switch c := c.(type) { case *config.DeterministicSamplerConfig: - ds := &DeterministicSampler{Config: c, Logger: s.Logger} - ds.Start() - sampler = ds + sampler = &DeterministicSampler{Config: c, Logger: s.Logger} case *config.DynamicSamplerConfig: - ds := &DynamicSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics} - ds.Start() - sampler = ds + sampler = &DynamicSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics} case *config.EMADynamicSamplerConfig: - ds := &EMADynamicSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics} - ds.Start() - sampler = ds + sampler = &EMADynamicSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics} case *config.RulesBasedSamplerConfig: - ds := &RulesBasedSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics} - ds.Start() - sampler = ds + sampler = &RulesBasedSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics} case *config.TotalThroughputSamplerConfig: - ds := &TotalThroughputSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics} - ds.Start() - sampler = ds + sampler = &TotalThroughputSampler{Config: c, Logger: s.Logger, Metrics: s.Metrics} default: s.Logger.Error().Logf("unknown sampler type %T. Exiting.", c) os.Exit(1) } + err = sampler.Start() + if err != nil { + s.Logger.Debug().WithField("dataset", dataset).Logf("failed to start sampler") + return nil + } + s.Logger.Debug().WithField("dataset", dataset).Logf("created implementation for sampler type %T", c) return sampler From d0ed028d858e8d9d4782f83ff1850c6fe70c9505 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 1 Oct 2021 17:46:58 +0100 Subject: [PATCH 076/351] Build and publish multi-arch docker images on tag (#323) * remove now-unused docker orb * update Go to 1.16 + go.mod to 1.16 + Circle cimg/go to 1.16 * new docker image build script + set BuildID ldflags + use ko to perform the multi-arch image build *and* publish * new docker_publish job + runs the new image build script + setup_remote_docker so a docker service is available during the job + caches the ko installation + runs on all build workflow executions - if running on a tag, publishes to Docker Hub - all other builds, publishes to local docker registry Co-authored-by: Robb Kidd --- .circleci/config.yml | 42 ++++++++++++++++++++++++------------------ build/build_docker.sh | 24 ++++++++++++++++++++++++ go.mod | 2 +- 3 files changed, 49 insertions(+), 19 deletions(-) create mode 100755 build/build_docker.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 91e79458a8..97b5462bd0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,13 +2,12 @@ version: 2.1 orbs: aws-cli: circleci/aws-cli@1.3.0 - docker: circleci/docker@1.3.0 executors: linuxgo: parameters: docker: - - image: cimg/go:1.15 + - image: cimg/go:1.16 - image: redis:6 commands: @@ -124,6 +123,25 @@ jobs: if [[ -z "$version" ]] ; then version=${CIRCLE_SHA1:0:7}; fi aws s3 sync ~/artifacts s3://honeycomb-builds/honeycombio/refinery/$version/ + publish_docker: + executor: linuxgo + steps: + - restore_cache: + keys: + googleko + - run: go install github.com/google/ko@latest + - save_cache: + key: googleko + paths: + - $GOPATH/bin/ko + - checkout + - setup_remote_docker + - run: + name: "Publish multi-arch docker image" + command: | + echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin; + build/build_docker.sh + workflows: build: jobs: @@ -155,23 +173,11 @@ workflows: only: /^v.*/ branches: ignore: /.*/ - - docker/publish: - tag: latest - extra_build_args: --build-arg BUILD_ID=${CIRCLE_SHA1:0:7} - image: $CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - requires: - - build - filters: - branches: - only: main - - docker/publish: - tag: latest,${CIRCLE_TAG:1} - extra_build_args: --build-arg BUILD_ID=${CIRCLE_TAG:1} - image: $CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + - publish_docker: + context: Honeycomb Secrets for Public Repos requires: - build filters: tags: - only: /^v.*/ - branches: - ignore: /.*/ + only: /.*/ + diff --git a/build/build_docker.sh b/build/build_docker.sh new file mode 100755 index 0000000000..c85607e3f0 --- /dev/null +++ b/build/build_docker.sh @@ -0,0 +1,24 @@ +set -o nounset +set -o pipefail +set -o xtrace + +TAGS="latest" +VERSION=${CIRCLE_TAG:-dev} +REPO=${KO_DOCKER_REPO:-ko.local} +if [[ $VERSION != "dev" ]]; then + # set docker username and add version tag + REPO="honeycombio" + TAGS+=",$VERSION" +fi + +unset GOOS +unset GOARCH +export KO_DOCKER_REPO=$REPO +export GOFLAGS="-ldflags=-X=main.BuildID=$VERSION" +export SOURCE_DATE_EPOCH=$(date +%s) +# shellcheck disable=SC2086 +ko publish \ + --tags "${TAGS}" \ + --base-import-paths \ + --platform "linux/amd64,linux/arm64" \ + ./cmd/refinery diff --git a/go.mod b/go.mod index 37809dcc1a..14f622fa26 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/honeycombio/refinery -go 1.14 +go 1.16 require ( github.com/davecgh/go-spew v1.1.1 From 26164890cbadb7ba619a4ad4dd2431b118c22ede Mon Sep 17 00:00:00 2001 From: Robb Kidd Date: Fri, 1 Oct 2021 14:11:43 -0400 Subject: [PATCH 077/351] prepare 1.5.0 release (#326) --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2294c1db72..2bbb6c03a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,28 @@ # Refinery Changelog +## 1.5.0 + +### Enhancements + +- Add dynamic sampler support to rules based samplers (#317) [@puckpuck](https://github.com/puckpuck) +- Publish arm64 Docker images (#323) [@MikeGoldsmith](https://github.com/MikeGoldsmith) + +### Maintenance + +- Adds Stalebot (#321) [@JamieDanielson](https://github.com/JamieDanielson) +- Switch licecycle terminology to maintained (#315) [cartermp](https://github.com/cartermp) +- Add NOTICE (#314) [cartermp](https://github.com/cartermp) +- Add issue and PR templates (#307) [@vreynolds](https://github.com/vreynolds) +- Add OSS lifecycle badge (#304) [@vreynolds](https://github.com/vreynolds) +- Add community health files (#303) [@vreynolds](https://github.com/vreynolds) +- Bump github.com/spf13/viper from 1.8.1 to 1.9.0 (#320) [dependabot[bot]] +- Bump github.com/json-iterator/go from 1.1.11 to 1.1.12 (#316) [dependabot[bot]] +- Bump github.com/klauspost/compress from 1.13.4 to 1.13.6 (#319) [dependabot[bot]] +- Bump github.com/fsnotify/fsnotify from 1.5.0 to 1.5.1 (#311) [dependabot[bot]] +- Bump google.golang.org/grpc from 1.39.1 to 1.40.0 (#305) [dependabot[bot]] +- Bump github.com/fsnotify/fsnotify from 1.4.9 to 1.5.0 (#308) [dependabot[bot]] +- Bump github.com/klauspost/compress from 1.13.3 to 1.13.4 (#306) [dependabot[bot]] + ## 1.4.1 ### Fixes From 344d404e43c45ca5b249c5ba11185bb672d2eca9 Mon Sep 17 00:00:00 2001 From: Estelle Poulin Date: Mon, 4 Oct 2021 07:49:31 -0400 Subject: [PATCH 078/351] Patch for race condition on map access (#324) Fixes race bug when using using Prometheus metrics. --- metrics/prometheus.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/metrics/prometheus.go b/metrics/prometheus.go index 24c3df836f..0f799328b3 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -79,6 +79,9 @@ func (p *PromMetrics) Register(name string, metricType string) { } func (p *PromMetrics) Increment(name string) { + p.lock.Lock() + defer p.lock.Unlock() + if counterIface, ok := p.metrics[name]; ok { if counter, ok := counterIface.(prometheus.Counter); ok { counter.Inc() @@ -86,6 +89,9 @@ func (p *PromMetrics) Increment(name string) { } } func (p *PromMetrics) Count(name string, n interface{}) { + p.lock.Lock() + defer p.lock.Unlock() + if counterIface, ok := p.metrics[name]; ok { if counter, ok := counterIface.(prometheus.Counter); ok { counter.Add(ConvertNumeric(n)) @@ -93,6 +99,9 @@ func (p *PromMetrics) Count(name string, n interface{}) { } } func (p *PromMetrics) Gauge(name string, val interface{}) { + p.lock.Lock() + defer p.lock.Unlock() + if gaugeIface, ok := p.metrics[name]; ok { if gauge, ok := gaugeIface.(prometheus.Gauge); ok { gauge.Set(ConvertNumeric(val)) @@ -100,6 +109,9 @@ func (p *PromMetrics) Gauge(name string, val interface{}) { } } func (p *PromMetrics) Histogram(name string, obs interface{}) { + p.lock.Lock() + defer p.lock.Unlock() + if histIface, ok := p.metrics[name]; ok { if hist, ok := histIface.(prometheus.Histogram); ok { hist.Observe(ConvertNumeric(obs)) From 204b24431561d8ec41d386c1c9ba6fb12cb3261b Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 6 Oct 2021 09:23:40 +0100 Subject: [PATCH 079/351] Build docker images on all builds and publish on tag (#328) Forked builds will fail the publish_docker step because it tries to login to docker, but the username and password are in CircleCI secrets that are not shared on forked builds. --- .circleci/config.yml | 83 ++++++++++++++---------- build/build_docker.sh => build-docker.sh | 18 +++-- 2 files changed, 60 insertions(+), 41 deletions(-) rename build/build_docker.sh => build-docker.sh (52%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 97b5462bd0..bab1dd08ef 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,13 +3,6 @@ version: 2.1 orbs: aws-cli: circleci/aws-cli@1.3.0 -executors: - linuxgo: - parameters: - docker: - - image: cimg/go:1.16 - - image: redis:6 - commands: go-build: parameters: @@ -33,17 +26,19 @@ commands: jobs: test: - executor: linuxgo + docker: + - image: cimg/go:1.16 + - image: redis:6 steps: - checkout - run: - name: install dockerize - command: wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && sudo tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz - environment: - DOCKERIZE_VERSION: v0.3.0 + name: install dockerize + command: wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && sudo tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz + environment: + DOCKERIZE_VERSION: v0.3.0 - run: - name: Wait for redis - command: dockerize -wait tcp://localhost:6379 -timeout 1m + name: Wait for redis + command: dockerize -wait tcp://localhost:6379 -timeout 1m - run: name: go_test with race command: go test -tags race --race --timeout 60s -v ./... @@ -51,8 +46,9 @@ jobs: name: go_test command: go test -tags all --timeout 60s -v ./... - build: - executor: linuxgo + build_binaries: + docker: + - image: cimg/go:1.16 steps: - checkout - go-build: @@ -123,24 +119,33 @@ jobs: if [[ -z "$version" ]] ; then version=${CIRCLE_SHA1:0:7}; fi aws s3 sync ~/artifacts s3://honeycomb-builds/honeycombio/refinery/$version/ - publish_docker: - executor: linuxgo + build_docker: + docker: + - image: cimg/go:1.16 steps: - - restore_cache: - keys: - googleko - run: go install github.com/google/ko@latest - - save_cache: - key: googleko - paths: - - $GOPATH/bin/ko - checkout - setup_remote_docker - - run: - name: "Publish multi-arch docker image" - command: | - echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin; - build/build_docker.sh + - run: ./build-docker.sh + - run: mkdir -p ~/images + - run: docker save honeycombio/refinery | gzip > ~/images/refinery.tar.gz + - persist_to_workspace: + root: ~/ + paths: + - images + - store_artifacts: + path: ~/images + + publish_docker: + docker: + - image: cimg/base:stable + steps: + - attach_workspace: + at: ~/ + - setup_remote_docker + - run: docker load -i ~/images/refinery.tar.gz + - run: echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin; + - run: docker image push -a honeycombio/refinery workflows: build: @@ -149,7 +154,13 @@ workflows: filters: tags: only: /.*/ - - build: + - build_binaries: + requires: + - test + filters: + tags: + only: /.*/ + - build_docker: requires: - test filters: @@ -158,7 +169,7 @@ workflows: - publish_github: context: Honeycomb Secrets for Public Repos requires: - - build + - build_binaries filters: tags: only: /^v.*/ @@ -167,7 +178,7 @@ workflows: - publish_s3: context: Honeycomb Secrets for Public Repos requires: - - build + - build_binaries filters: tags: only: /^v.*/ @@ -176,8 +187,10 @@ workflows: - publish_docker: context: Honeycomb Secrets for Public Repos requires: - - build + - build_docker filters: tags: - only: /.*/ + only: /^v.*/ + branches: + ignore: /.*/ diff --git a/build/build_docker.sh b/build-docker.sh similarity index 52% rename from build/build_docker.sh rename to build-docker.sh index c85607e3f0..5d96459106 100755 --- a/build/build_docker.sh +++ b/build-docker.sh @@ -3,17 +3,17 @@ set -o pipefail set -o xtrace TAGS="latest" -VERSION=${CIRCLE_TAG:-dev} -REPO=${KO_DOCKER_REPO:-ko.local} -if [[ $VERSION != "dev" ]]; then - # set docker username and add version tag - REPO="honeycombio" +VERSION="dev" +if [[ -n ${CIRCLE_TAG:-} ]]; then + # trim 'v' prefix if present + VERSION=${CIRCLE_TAG#"v"} + # append version to image tags TAGS+=",$VERSION" fi unset GOOS unset GOARCH -export KO_DOCKER_REPO=$REPO +export KO_DOCKER_REPO="ko.local" export GOFLAGS="-ldflags=-X=main.BuildID=$VERSION" export SOURCE_DATE_EPOCH=$(date +%s) # shellcheck disable=SC2086 @@ -22,3 +22,9 @@ ko publish \ --base-import-paths \ --platform "linux/amd64,linux/arm64" \ ./cmd/refinery + +# update tags to use correct org name +for TAG in ${TAGS//,/ } +do + docker image tag ko.local/refinery honeycombio/refinery:$TAG +done From c039363571365e3f930b61edb8ec36db208c37d7 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 6 Oct 2021 13:01:44 +0100 Subject: [PATCH 080/351] Use RWLock in prometheus metrics (#331) A mutex is used in the prometheus metrics implementation and in #324 was used the lock when using the different metrics types. This PR switches the mutex type to a RWLock to allow concurrent retrieving of metrics from the map. Co-authored-by: Robb Kidd --- metrics/prometheus.go | 30 +++++++++++++++--------------- metrics/prometheus_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 15 deletions(-) diff --git a/metrics/prometheus.go b/metrics/prometheus.go index 0f799328b3..5809da919d 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -19,7 +19,7 @@ type PromMetrics struct { // metrics keeps a record of all the registered metrics so we can increment // them by name metrics map[string]interface{} - lock sync.Mutex + lock sync.RWMutex prefix string } @@ -57,21 +57,21 @@ func (p *PromMetrics) Register(name string, metricType string) { switch metricType { case "counter": newmet = promauto.NewCounter(prometheus.CounterOpts{ - Name: name, + Name: name, Namespace: p.prefix, - Help: name, + Help: name, }) case "gauge": newmet = promauto.NewGauge(prometheus.GaugeOpts{ - Name: name, + Name: name, Namespace: p.prefix, - Help: name, + Help: name, }) case "histogram": newmet = promauto.NewHistogram(prometheus.HistogramOpts{ - Name: name, + Name: name, Namespace: p.prefix, - Help: name, + Help: name, }) } @@ -79,8 +79,8 @@ func (p *PromMetrics) Register(name string, metricType string) { } func (p *PromMetrics) Increment(name string) { - p.lock.Lock() - defer p.lock.Unlock() + p.lock.RLock() + defer p.lock.RUnlock() if counterIface, ok := p.metrics[name]; ok { if counter, ok := counterIface.(prometheus.Counter); ok { @@ -89,8 +89,8 @@ func (p *PromMetrics) Increment(name string) { } } func (p *PromMetrics) Count(name string, n interface{}) { - p.lock.Lock() - defer p.lock.Unlock() + p.lock.RLock() + defer p.lock.RUnlock() if counterIface, ok := p.metrics[name]; ok { if counter, ok := counterIface.(prometheus.Counter); ok { @@ -99,8 +99,8 @@ func (p *PromMetrics) Count(name string, n interface{}) { } } func (p *PromMetrics) Gauge(name string, val interface{}) { - p.lock.Lock() - defer p.lock.Unlock() + p.lock.RLock() + defer p.lock.RUnlock() if gaugeIface, ok := p.metrics[name]; ok { if gauge, ok := gaugeIface.(prometheus.Gauge); ok { @@ -109,8 +109,8 @@ func (p *PromMetrics) Gauge(name string, val interface{}) { } } func (p *PromMetrics) Histogram(name string, obs interface{}) { - p.lock.Lock() - defer p.lock.Unlock() + p.lock.RLock() + defer p.lock.RUnlock() if histIface, ok := p.metrics[name]; ok { if hist, ok := histIface.(prometheus.Histogram); ok { diff --git a/metrics/prometheus_test.go b/metrics/prometheus_test.go index b0c093caf5..539f926a41 100644 --- a/metrics/prometheus_test.go +++ b/metrics/prometheus_test.go @@ -3,6 +3,7 @@ package metrics import ( + "fmt" "testing" "github.com/honeycombio/refinery/config" @@ -24,3 +25,29 @@ func TestMultipleRegistrations(t *testing.T) { p.Register("test", "counter") } + +func TestRaciness(t *testing.T) { + p := &PromMetrics{ + Logger: &logger.MockLogger{}, + Config: &config.MockConfig{}, + } + + err := p.Start() + + assert.NoError(t, err) + + p.Register("race", "counter") + + // this loop modifying the metric registry and reading it to increment + // a counter should not trigger a race condition + for i := 0; i < 50; i++ { + go func(j int) { + metricName := fmt.Sprintf("metric%d", j) + p.Register(metricName, "counter") + }(i) + + go func(j int) { + p.Increment("race") + }(i) + } +} From d22cfb6aa35704cdb502362439471d309a11f2b3 Mon Sep 17 00:00:00 2001 From: Robb Kidd Date: Fri, 8 Oct 2021 13:40:31 -0400 Subject: [PATCH 081/351] [dev/ci] test in dev and CI the same way (#333) * add convenience make targets for testing * add dockerize to test targets Check for whether Redis is running with Make, so the dev and CI environments run similarly and show the same warnings. Now the tests can be only "make test"! * cache the downloaded dockerize in CI * cache go dependencies in CI --- .circleci/config.yml | 31 ++++++++++++++----------- Makefile | 55 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 14 deletions(-) create mode 100644 Makefile diff --git a/.circleci/config.yml b/.circleci/config.yml index bab1dd08ef..c3351e40e5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,20 +31,23 @@ jobs: - image: redis:6 steps: - checkout - - run: - name: install dockerize - command: wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && sudo tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz - environment: - DOCKERIZE_VERSION: v0.3.0 - - run: - name: Wait for redis - command: dockerize -wait tcp://localhost:6379 -timeout 1m - - run: - name: go_test with race - command: go test -tags race --race --timeout 60s -v ./... - - run: - name: go_test - command: go test -tags all --timeout 60s -v ./... + - restore_cache: + keys: + - v1-dockerize-{{ checksum "Makefile" }} + - v1-dockerize- + - run: make dockerize + - save_cache: + key: v1-dockerize-{{ checksum "Makefile" }} + paths: + - dockerize.tar.gz + - restore_cache: + keys: + - v3-go-mod-{{ checksum "go.sum" }} + - run: make test + - save_cache: + key: v3-go-mod-{{ checksum "go.sum" }} + paths: + - /home/circleci/go/pkg/mod build_binaries: docker: diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..032c4a401a --- /dev/null +++ b/Makefile @@ -0,0 +1,55 @@ +MAKEFLAGS += --warn-undefined-variables +MAKEFLAGS += --no-builtin-rules +MAKEFLAGS += --no-builtin-variables + +.PHONY: test +#: run all tests +test: test_with_race test_all + +.PHONY: test_with_race +#: run only tests tagged with potential race conditions +test_with_race: wait_for_redis + @echo + @echo "+++ testing - race conditions?" + @echo + go test -tags race --race --timeout 60s -v ./... + +.PHONY: test_all +#: run all tests, but with no race condition detection +test_all: wait_for_redis + @echo + @echo "+++ testing - all the tests" + @echo + go test -tags all --timeout 60s -v ./... + +.PHONY: wait_for_redis +# wait for Redis to become available for test suite +wait_for_redis: dockerize + @echo + @echo "+++ We need a Redis running to run the tests." + @echo + @echo "Checking with dockerize $(shell ./dockerize --version)" + @./dockerize -wait tcp://localhost:6379 -timeout 30s + +# ensure the dockerize command is available +dockerize: dockerize.tar.gz + tar xzvmf dockerize.tar.gz + +HOST_OS := $(shell uname -s | tr A-Z a-z) +# You can override this version from an environment variable. +DOCKERIZE_VERSION ?= v0.6.1 +DOCKERIZE_RELEASE_ASSET := dockerize-${HOST_OS}-amd64-${DOCKERIZE_VERSION}.tar.gz + +dockerize.tar.gz: + @echo + @echo "+++ Retrieving dockerize tool for Redis readiness check." + @echo + curl --location --silent --show-error \ + --output dockerize.tar.gz \ + https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/${DOCKERIZE_RELEASE_ASSET} \ + && file dockerize.tar.gz | grep --silent gzip + +.PHONY: clean +clean: + rm -f dockerize.tar.gz + rm -f dockerize From 5c7a47b25e3550893d7f94eef445165ab82a79df Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 8 Oct 2021 19:09:12 +0100 Subject: [PATCH 082/351] Prepare v1.5.1 release (#334) Co-authored-by: Robb Kidd --- CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bbb6c03a0..8ba1d09a41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Refinery Changelog +## 1.5.1 + +### Fixes + +- Fix for race condition in prometheus metrics (#324) [@estheruary](https://github.com/estheruary) +- Update race condition fix to use RWLock instead of Lock (#331) [@MikeGoldsmith](https://github.com/MikeGoldsmith) & [@robbkidd](https://github.com/robbkidd) + +### Maintenance + +- Build docker images on all builds and publish only on tag (#328) [@MikeGoldsmith](https://github.com/MikeGoldsmith) + ## 1.5.0 ### Enhancements From 4dbdd024981b9a9ec998189bcf26d662dd54d314 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 13 Oct 2021 11:04:46 +0100 Subject: [PATCH 083/351] Build docker images during publish (#336) When building and publishing a docker image in multiple jobs, only the architecture of the CI runtime is persisted. We want to build and publish all architectures, so we've reverted to building the docker image as part publish step. Co-authored-by: JamieDanielson --- .circleci/config.yml | 29 ++++++++++++++--------------- build-docker.sh | 8 +------- 2 files changed, 15 insertions(+), 22 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c3351e40e5..8fa8c3451e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -129,26 +129,22 @@ jobs: - run: go install github.com/google/ko@latest - checkout - setup_remote_docker - - run: ./build-docker.sh - - run: mkdir -p ~/images - - run: docker save honeycombio/refinery | gzip > ~/images/refinery.tar.gz - - persist_to_workspace: - root: ~/ - paths: - - images - - store_artifacts: - path: ~/images + - run: + name: build docker images and publish locally + command: ./build-docker.sh publish_docker: docker: - - image: cimg/base:stable + - image: cimg/go:1.16 steps: - - attach_workspace: - at: ~/ + - run: go install github.com/google/ko@latest + - checkout - setup_remote_docker - - run: docker load -i ~/images/refinery.tar.gz - - run: echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin; - - run: docker image push -a honeycombio/refinery + - run: + name: build docker images and publish to Docker Hub + environment: + KO_DOCKER_REPO: honeycombio + command: ./build-docker.sh workflows: build: @@ -173,6 +169,7 @@ workflows: context: Honeycomb Secrets for Public Repos requires: - build_binaries + - build_docker filters: tags: only: /^v.*/ @@ -182,6 +179,7 @@ workflows: context: Honeycomb Secrets for Public Repos requires: - build_binaries + - build_docker filters: tags: only: /^v.*/ @@ -190,6 +188,7 @@ workflows: - publish_docker: context: Honeycomb Secrets for Public Repos requires: + - build_binaries - build_docker filters: tags: diff --git a/build-docker.sh b/build-docker.sh index 5d96459106..c27a5091dd 100755 --- a/build-docker.sh +++ b/build-docker.sh @@ -13,7 +13,7 @@ fi unset GOOS unset GOARCH -export KO_DOCKER_REPO="ko.local" +export KO_DOCKER_REPO=${KO_DOCKER_REPO:-ko.local} export GOFLAGS="-ldflags=-X=main.BuildID=$VERSION" export SOURCE_DATE_EPOCH=$(date +%s) # shellcheck disable=SC2086 @@ -22,9 +22,3 @@ ko publish \ --base-import-paths \ --platform "linux/amd64,linux/arm64" \ ./cmd/refinery - -# update tags to use correct org name -for TAG in ${TAGS//,/ } -do - docker image tag ko.local/refinery honeycombio/refinery:$TAG -done From 4693caf0f64200d09ed25317dd277beaa3616e49 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 13 Oct 2021 14:15:04 +0100 Subject: [PATCH 084/351] Prepare v1.5.2 release (#337) Prepares the v1.5.2 release. --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ba1d09a41..14c6534453 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Refinery Changelog +## 1.5.2 2021-10-13 + +### Fixes + +- Build multi-arch docker images during publish CI step (#336) [@MikeGoldsmith](https://github.com/MikeGoldsmith) + ## 1.5.1 ### Fixes From 36be8a92b864f3e02e06b1f5603c264879eb8bf5 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 13 Oct 2021 15:45:38 +0100 Subject: [PATCH 085/351] Re-add missing docker login when publishing (#338) * re-add missing docker login when publishing * Update env vars for docker login * Fix docker login command Co-authored-by: JamieDanielson --- .circleci/config.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8fa8c3451e..b95cb462ca 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -144,7 +144,9 @@ jobs: name: build docker images and publish to Docker Hub environment: KO_DOCKER_REPO: honeycombio - command: ./build-docker.sh + command: | + echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin; + ./build-docker.sh workflows: build: From 3b21808cdccce581acb0b5f35d2a1b8671c62aa8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Oct 2021 20:48:40 -0600 Subject: [PATCH 086/351] Bump github.com/honeycombio/libhoney-go from 1.15.4 to 1.15.5 (#327) Bumps [github.com/honeycombio/libhoney-go](https://github.com/honeycombio/libhoney-go) from 1.15.4 to 1.15.5. - [Release notes](https://github.com/honeycombio/libhoney-go/releases) - [Changelog](https://github.com/honeycombio/libhoney-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/honeycombio/libhoney-go/compare/v1.15.4...v1.15.5) --- updated-dependencies: - dependency-name: github.com/honeycombio/libhoney-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 14f622fa26..a997097945 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/libhoney-go v1.15.4 + github.com/honeycombio/libhoney-go v1.15.5 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.13.6 diff --git a/go.sum b/go.sum index dd84af5811..cba1d4d9e4 100644 --- a/go.sum +++ b/go.sum @@ -223,8 +223,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/libhoney-go v1.15.4 h1:D6UftkvQC9ZnPXK00wET9Le8zxdc+vPeGlqCpWWHS5Y= -github.com/honeycombio/libhoney-go v1.15.4/go.mod h1:heFH+SMgmpF2m3aHwnQqUS5feImLHnzP6RaIvFkWsKU= +github.com/honeycombio/libhoney-go v1.15.5 h1:Djren7ovq6pnPljEow3F1uu3FCc9ailigm9qtTPpEWA= +github.com/honeycombio/libhoney-go v1.15.5/go.mod h1:8NyBoM746bz+nw3yQzQF8gtJO/z4mkr/MD5C4r4uC2Y= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= @@ -238,7 +238,7 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -339,8 +339,8 @@ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/msgpack/v5 v5.2.0 h1:ZhIAtVUP1mme8GIlpiAnmTzjSWMexA/uNF2We85DR0w= -github.com/vmihailenco/msgpack/v5 v5.2.0/go.mod h1:fEM7KuHcnm0GvDCztRpw9hV0PuoO2ciTismP6vjggcM= +github.com/vmihailenco/msgpack/v5 v5.3.4 h1:qMKAwOV+meBw2Y8k9cVwAy7qErtYCwBzZ2ellBfvnqc= +github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= From 6be5d107de09e85aba32235b505810e449102afb Mon Sep 17 00:00:00 2001 From: Ian Smith Date: Tue, 2 Nov 2021 06:59:19 -0700 Subject: [PATCH 087/351] Add an --interface-names flag (#342) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add an --interface-names flag Our config lets you specify which network name to use for peers; this flag makes it easier to discover what interfaces the binary has available. Example, on a locally-built image: ``` ± docker run -t ko.local/refinery:e2915c3075fd17b54c155f53b04acf0cff1a027a5610cc7bf25052d0725bd7ed --interface-names lo eth0 ``` --- cmd/refinery/main.go | 21 +++++++++++++++++---- collect/collect_test.go | 10 +++++----- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/cmd/refinery/main.go b/cmd/refinery/main.go index effacd60e7..58552db8e4 100644 --- a/cmd/refinery/main.go +++ b/cmd/refinery/main.go @@ -33,10 +33,11 @@ var BuildID string var version string type Options struct { - ConfigFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/refinery/refinery.toml"` - RulesFile string `short:"r" long:"rules_config" description:"Path to rules config file" default:"/etc/refinery/rules.toml"` - Version bool `short:"v" long:"version" description:"Print version number and exit"` - Debug bool `short:"d" long:"debug" description:"If enabled, runs debug service (runs on the first open port between localhost:6060 and :6069 by default)"` + ConfigFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/refinery/refinery.toml"` + RulesFile string `short:"r" long:"rules_config" description:"Path to rules config file" default:"/etc/refinery/rules.toml"` + Version bool `short:"v" long:"version" description:"Print version number and exit"` + Debug bool `short:"d" long:"debug" description:"If enabled, runs debug service (runs on the first open port between localhost:6060 and :6069 by default)"` + InterfaceNames bool `long:"interface-names" description:"If set, print system's network interface names and exit."` } func main() { @@ -58,6 +59,18 @@ func main() { os.Exit(0) } + if opts.InterfaceNames { + ifaces, err := net.Interfaces() + if err != nil { + fmt.Printf("Error: %s\n", err) + os.Exit(1) + } + for _, i := range ifaces { + fmt.Println(i.Name) + } + os.Exit(0) + } + a := app.App{ Version: version, } diff --git a/collect/collect_test.go b/collect/collect_test.go index 2d912d1056..2d8b62aeff 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -320,14 +320,14 @@ func TestCacheSizeReload(t *testing.T) { coll.AddSpan(&types.Span{TraceID: "2", Event: event}) expectedEvents := 1 - wait := 2 * time.Millisecond + wait := 1 * time.Second check := func() bool { transmission.Mux.RLock() defer transmission.Mux.RUnlock() return len(transmission.Events) == expectedEvents } - assert.Eventually(t, check, 10*wait, wait, "expected one trace evicted and sent") + assert.Eventually(t, check, 60*wait, wait, "expected one trace evicted and sent") conf.Mux.Lock() conf.GetInMemoryCollectorCacheCapacityVal.CacheCapacity = 2 @@ -339,7 +339,7 @@ func TestCacheSizeReload(t *testing.T) { defer coll.mutex.RUnlock() return coll.cache.(*cache.DefaultInMemCache).GetCacheSize() == 2 - }, 10*wait, wait, "cache size to change") + }, 60*wait, wait, "cache size to change") coll.AddSpan(&types.Span{TraceID: "3", Event: event}) time.Sleep(5 * conf.SendTickerVal) @@ -351,7 +351,7 @@ func TestCacheSizeReload(t *testing.T) { conf.ReloadConfig() expectedEvents = 2 - assert.Eventually(t, check, 10*wait, wait, "expected another trace evicted and sent") + assert.Eventually(t, check, 60*wait, wait, "expected another trace evicted and sent") } func TestSampleConfigReload(t *testing.T) { @@ -361,7 +361,7 @@ func TestSampleConfigReload(t *testing.T) { conf := &config.MockConfig{ GetSendDelayVal: 0, - GetTraceTimeoutVal: 10 * time.Millisecond, + GetTraceTimeoutVal: 60 * time.Second, GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1}, SendTickerVal: 2 * time.Millisecond, GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{CacheCapacity: 10}, From 8c9ac8f000f12e9cb45122c9c1608f568eee001a Mon Sep 17 00:00:00 2001 From: Robb Kidd Date: Wed, 3 Nov 2021 08:52:41 -0400 Subject: [PATCH 088/351] empower apply-labels action to apply labels (#344) --- .github/workflows/apply-labels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/apply-labels.yml b/.github/workflows/apply-labels.yml index 7d90af5148..d3293214fc 100644 --- a/.github/workflows/apply-labels.yml +++ b/.github/workflows/apply-labels.yml @@ -1,5 +1,5 @@ name: Apply project labels -on: [issues, pull_request, label] +on: [issues, pull_request_target, label] jobs: apply-labels: runs-on: ubuntu-latest From e61fd1aebf574c7c1c3621aad33b05de34998bf4 Mon Sep 17 00:00:00 2001 From: Ian Smith Date: Thu, 4 Nov 2021 05:32:07 -0700 Subject: [PATCH 089/351] Use alpine as base image (#343) --- .ko.yaml | 1 + 1 file changed, 1 insertion(+) create mode 100644 .ko.yaml diff --git a/.ko.yaml b/.ko.yaml new file mode 100644 index 0000000000..0034ad542f --- /dev/null +++ b/.ko.yaml @@ -0,0 +1 @@ +defaultBaseImage: alpine:3.13 From 0bc1d36555a6ff69bee3341fda6c6332e780b5de Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 5 Nov 2021 14:16:50 +0000 Subject: [PATCH 090/351] bump libhoney-go to v1.15.6 (#347) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a997097945..5e744a47f3 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/libhoney-go v1.15.5 + github.com/honeycombio/libhoney-go v1.15.6 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.13.6 diff --git a/go.sum b/go.sum index cba1d4d9e4..ecaf0bb103 100644 --- a/go.sum +++ b/go.sum @@ -223,8 +223,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/libhoney-go v1.15.5 h1:Djren7ovq6pnPljEow3F1uu3FCc9ailigm9qtTPpEWA= -github.com/honeycombio/libhoney-go v1.15.5/go.mod h1:8NyBoM746bz+nw3yQzQF8gtJO/z4mkr/MD5C4r4uC2Y= +github.com/honeycombio/libhoney-go v1.15.6 h1:zbwfdo74Gsedmu6OT/oAHv4pfKNoseTXRMA/4e5XWew= +github.com/honeycombio/libhoney-go v1.15.6/go.mod h1:8NyBoM746bz+nw3yQzQF8gtJO/z4mkr/MD5C4r4uC2Y= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= From ac4199238f0a31d2d293699d95004765a5a71436 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 5 Nov 2021 14:31:12 +0000 Subject: [PATCH 091/351] prepare v1.6.0 release (#348) --- CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 14c6534453..4dffdf6298 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Refinery Changelog +## 1.6.0 2021-11-04 + +- Add an --interface-names flag (#342) | [@ismith](https://github.com/ismith) + +### Fixes + +- bump libhoney-go to v1.15.6 +- empower apply-labels action to apply labels (#344) +- Bump github.com/honeycombio/libhoney-go from 1.15.4 to 1.15.5 (#327) +- Re-add missing docker login when publishing (#338) + ## 1.5.2 2021-10-13 ### Fixes From 4423b1ffdb3bc2bd84f01e6b030760ed949fc3b6 Mon Sep 17 00:00:00 2001 From: JamieDanielson Date: Wed, 10 Nov 2021 17:36:37 -0500 Subject: [PATCH 092/351] Revert "Use alpine as base image (#343)" (#352) This reverts commit e61fd1aebf574c7c1c3621aad33b05de34998bf4. --- .ko.yaml | 1 - 1 file changed, 1 deletion(-) delete mode 100644 .ko.yaml diff --git a/.ko.yaml b/.ko.yaml deleted file mode 100644 index 0034ad542f..0000000000 --- a/.ko.yaml +++ /dev/null @@ -1 +0,0 @@ -defaultBaseImage: alpine:3.13 From 59b1eaedaa25c82990eca948cfb046563dffe9f5 Mon Sep 17 00:00:00 2001 From: JamieDanielson Date: Wed, 10 Nov 2021 18:08:30 -0500 Subject: [PATCH 093/351] prep patch release v1.6.1 (#353) * Update changelog * rename RELEASE.md to RELEASING.md --- CHANGELOG.md | 4 ++++ RELEASE.md => RELEASING.md | 0 2 files changed, 4 insertions(+) rename RELEASE.md => RELEASING.md (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4dffdf6298..ca6e581bb7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Refinery Changelog +## 1.6.1 2021-11-10 + +- Revert "Use alpine as base image (#343)" (#352) + ## 1.6.0 2021-11-04 - Add an --interface-names flag (#342) | [@ismith](https://github.com/ismith) diff --git a/RELEASE.md b/RELEASING.md similarity index 100% rename from RELEASE.md rename to RELEASING.md From 3f690568b75391e83f71a4b8988801226c74a1f4 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Wed, 24 Nov 2021 17:54:26 -0700 Subject: [PATCH 094/351] Update dependabot to monthly (#356) --- .github/dependabot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2a0f635d7d..90c0fdb305 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -8,7 +8,7 @@ updates: - package-ecosystem: "gomod" # See documentation for possible values directory: "/" # Location of package manifests schedule: - interval: "weekly" + interval: "monthly" labels: - "type: dependencies" reviewers: From 89cac6581d694fdef4fb1edc00845c705cd136a2 Mon Sep 17 00:00:00 2001 From: Ben Darfler Date: Mon, 29 Nov 2021 11:09:00 -0500 Subject: [PATCH 095/351] Improves histogram buckets over the default set (#355) This is an attempt at a usable set of buckets for a wide range of metrics Fixes #115 --- metrics/prometheus.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/metrics/prometheus.go b/metrics/prometheus.go index 5809da919d..4161fe78d6 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -72,6 +72,9 @@ func (p *PromMetrics) Register(name string, metricType string) { Name: name, Namespace: p.prefix, Help: name, + // This is an attempt at a usable set of buckets for a wide range of metrics + // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous + Buckets: prometheus.ExponentialBuckets(1, 4, 16), }) } From 115282f7a5184895c7703b98bfe6288d47cf9f3c Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Mon, 29 Nov 2021 20:42:38 +0000 Subject: [PATCH 096/351] Replace internal duplicated code with Husky (#341) * replace internal otlp proto files with public module * replace internal duplicated code with husky * go mod tidy * use shared errors from husky * check for invalid content error * use husky overload to pass body and requestinfo * update husky to v0.2.0 --- go.mod | 6 +- go.sum | 14 +- internal/opentelemetry-proto-gen/README.md | 3 - .../collector/logs/v1/logs_service.pb.go | 213 --- .../collector/logs/v1/logs_service.pb.gw.go | 163 -- .../metrics/v1/metrics_service.pb.go | 213 --- .../metrics/v1/metrics_service.pb.gw.go | 163 -- .../collector/trace/v1/trace_config.pb.go | 361 ---- .../collector/trace/v1/trace_service.pb.go | 213 --- .../collector/trace/v1/trace_service.pb.gw.go | 163 -- .../common/v1/common.pb.go | 430 ----- .../logs/v1/logs.pb.go | 448 ----- .../metrics/experimental/configservice.pb.go | 423 ----- .../metrics/v1/metrics.pb.go | 1501 ----------------- .../resource/v1/resource.pb.go | 100 -- .../trace/v1/trace.pb.go | 815 --------- route/errors.go | 4 +- route/otlp_trace.go | 388 +---- route/otlp_trace_test.go | 13 +- route/route.go | 5 +- 20 files changed, 67 insertions(+), 5572 deletions(-) delete mode 100644 internal/opentelemetry-proto-gen/README.md delete mode 100644 internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go delete mode 100644 internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go delete mode 100644 internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go delete mode 100644 internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go delete mode 100644 internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go delete mode 100644 internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go delete mode 100644 internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go delete mode 100644 internal/opentelemetry-proto-gen/common/v1/common.pb.go delete mode 100644 internal/opentelemetry-proto-gen/logs/v1/logs.pb.go delete mode 100644 internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go delete mode 100644 internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go delete mode 100644 internal/opentelemetry-proto-gen/resource/v1/resource.pb.go delete mode 100644 internal/opentelemetry-proto-gen/trace/v1/trace.pb.go diff --git a/go.mod b/go.mod index 5e744a47f3..fbc797e057 100644 --- a/go.mod +++ b/go.mod @@ -10,13 +10,12 @@ require ( github.com/fsnotify/fsnotify v1.5.1 github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible - github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 github.com/gomodule/redigo v1.8.5 github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d - github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 + github.com/honeycombio/husky v0.2.0 github.com/honeycombio/libhoney-go v1.15.6 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 @@ -29,7 +28,8 @@ require ( github.com/spf13/viper v1.9.0 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 - google.golang.org/grpc v1.40.0 + go.opentelemetry.io/proto/otlp v0.11.0 + google.golang.org/grpc v1.42.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect ) diff --git a/go.sum b/go.sum index ecaf0bb103..eafe20de69 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -67,7 +68,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -80,6 +85,7 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= @@ -118,7 +124,6 @@ github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+Pugk github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -223,6 +228,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= +github.com/honeycombio/husky v0.2.0 h1:vjuUU9HtQnNLOk+VLUG7AQ7b/1oO7gL13N10nFTUaXs= +github.com/honeycombio/husky v0.2.0/go.mod h1:OKDdF3gAoP6GtQsMgFJKUqApY+x6T0mWv1S+VHpbH8A= github.com/honeycombio/libhoney-go v1.15.6 h1:zbwfdo74Gsedmu6OT/oAHv4pfKNoseTXRMA/4e5XWew= github.com/honeycombio/libhoney-go v1.15.6/go.mod h1:8NyBoM746bz+nw3yQzQF8gtJO/z4mkr/MD5C4r4uC2Y= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -361,6 +368,8 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= @@ -716,8 +725,9 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/internal/opentelemetry-proto-gen/README.md b/internal/opentelemetry-proto-gen/README.md deleted file mode 100644 index 5cfc56f03c..0000000000 --- a/internal/opentelemetry-proto-gen/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# OTLP Protobuf Definitions - -The definitions can be found [here](https://github.com/open-telemetry/opentelemetry-proto/tree/59c488bfb8fb6d0458ad6425758b70259ff4a2bd). \ No newline at end of file diff --git a/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go deleted file mode 100644 index 0ee0db23ce..0000000000 --- a/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/logs/v1/logs_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/logs/v1" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportLogsServiceRequest struct { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportLogsServiceRequest) Reset() { *m = ExportLogsServiceRequest{} } -func (m *ExportLogsServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportLogsServiceRequest) ProtoMessage() {} -func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8e3bf87aaa43acd4, []int{0} -} -func (m *ExportLogsServiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportLogsServiceRequest.Unmarshal(m, b) -} -func (m *ExportLogsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportLogsServiceRequest.Marshal(b, m, deterministic) -} -func (m *ExportLogsServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportLogsServiceRequest.Merge(m, src) -} -func (m *ExportLogsServiceRequest) XXX_Size() int { - return xxx_messageInfo_ExportLogsServiceRequest.Size(m) -} -func (m *ExportLogsServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportLogsServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportLogsServiceRequest proto.InternalMessageInfo - -func (m *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs { - if m != nil { - return m.ResourceLogs - } - return nil -} - -type ExportLogsServiceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportLogsServiceResponse) Reset() { *m = ExportLogsServiceResponse{} } -func (m *ExportLogsServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportLogsServiceResponse) ProtoMessage() {} -func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8e3bf87aaa43acd4, []int{1} -} -func (m *ExportLogsServiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportLogsServiceResponse.Unmarshal(m, b) -} -func (m *ExportLogsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportLogsServiceResponse.Marshal(b, m, deterministic) -} -func (m *ExportLogsServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportLogsServiceResponse.Merge(m, src) -} -func (m *ExportLogsServiceResponse) XXX_Size() int { - return xxx_messageInfo_ExportLogsServiceResponse.Size(m) -} -func (m *ExportLogsServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportLogsServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportLogsServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExportLogsServiceRequest)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest") - proto.RegisterType((*ExportLogsServiceResponse)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/logs/v1/logs_service.proto", fileDescriptor_8e3bf87aaa43acd4) -} - -var fileDescriptor_8e3bf87aaa43acd4 = []byte{ - // 263 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xc8, 0x2f, 0x48, 0xcd, - 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, - 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0xcf, 0xc9, 0x4f, 0x2f, 0xd6, 0x2f, - 0x33, 0x04, 0xd3, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0x45, 0x42, 0xaa, - 0x28, 0x3a, 0x21, 0x82, 0x7a, 0x70, 0x9d, 0x7a, 0x20, 0x1d, 0x7a, 0x65, 0x86, 0x52, 0x6a, 0xd8, - 0x2c, 0x40, 0x36, 0x16, 0xa2, 0x53, 0x29, 0x8b, 0x4b, 0xc2, 0xb5, 0xa2, 0x20, 0xbf, 0xa8, 0xc4, - 0x27, 0x3f, 0xbd, 0x38, 0x18, 0x62, 0x53, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x90, 0x1f, - 0x17, 0x6f, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x51, 0x72, 0x6a, 0x3c, 0x48, 0x8b, 0x04, 0xa3, 0x02, - 0xb3, 0x06, 0xb7, 0x91, 0xa6, 0x1e, 0x36, 0x27, 0x40, 0x2d, 0xd6, 0x0b, 0x82, 0xea, 0x00, 0x99, - 0x17, 0xc4, 0x53, 0x84, 0xc4, 0x53, 0x92, 0xe6, 0x92, 0xc4, 0x62, 0x57, 0x71, 0x41, 0x7e, 0x5e, - 0x71, 0xaa, 0xd1, 0x5c, 0x46, 0x2e, 0x6e, 0x24, 0x71, 0xa1, 0x5e, 0x46, 0x2e, 0x36, 0x88, 0x6a, - 0x21, 0x7b, 0x3d, 0xa2, 0xfc, 0xac, 0x87, 0xcb, 0x23, 0x52, 0x0e, 0xe4, 0x1b, 0x00, 0x71, 0x9d, - 0x12, 0x83, 0x53, 0x1b, 0x23, 0x97, 0x46, 0x66, 0x3e, 0x71, 0x06, 0x39, 0x09, 0x20, 0x99, 0x11, - 0x00, 0x52, 0x13, 0xc0, 0x18, 0xe5, 0x96, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, - 0xab, 0x0f, 0x32, 0x45, 0x17, 0x11, 0x3b, 0x28, 0x86, 0xea, 0x42, 0xe2, 0x2a, 0x3d, 0x35, 0x4f, - 0x3f, 0x1d, 0x4b, 0x9a, 0x48, 0x62, 0x03, 0xcb, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x49, - 0xa7, 0x2f, 0x4a, 0x43, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// LogsServiceClient is the client API for LogsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LogsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) -} - -type logsServiceClient struct { - cc *grpc.ClientConn -} - -func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient { - return &logsServiceClient{cc} -} - -func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) { - out := new(ExportLogsServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// LogsServiceServer is the server API for LogsService service. -type LogsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) -} - -// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations. -type UnimplementedLogsServiceServer struct { -} - -func (*UnimplementedLogsServiceServer) Export(ctx context.Context, req *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) { - s.RegisterService(&_LogsService_serviceDesc, srv) -} - -func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportLogsServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LogsServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _LogsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService", - HandlerType: (*LogsServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _LogsService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto", -} diff --git a/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go b/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go deleted file mode 100644 index 8003733add..0000000000 --- a/internal/opentelemetry-proto-gen/collector/logs/v1/logs_service.pb.gw.go +++ /dev/null @@ -1,163 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: opentelemetry/proto/collector/logs/v1/logs_service.proto - -/* -Package v1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client LogsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportLogsServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server LogsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportLogsServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Export(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterLogsServiceHandlerServer registers the http handlers for service LogsService to "mux". -// UnaryRPC :call LogsServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterLogsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LogsServiceServer) error { - - mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_LogsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterLogsServiceHandlerFromEndpoint is same as RegisterLogsServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterLogsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterLogsServiceHandler(ctx, mux, conn) -} - -// RegisterLogsServiceHandler registers the http handlers for service LogsService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterLogsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterLogsServiceHandlerClient(ctx, mux, NewLogsServiceClient(conn)) -} - -// RegisterLogsServiceHandlerClient registers the http handlers for service LogsService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LogsServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LogsServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "LogsServiceClient" to call the correct interceptors. -func RegisterLogsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LogsServiceClient) error { - - mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_LogsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_LogsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_LogsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "logs"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_LogsService_Export_0 = runtime.ForwardResponseMessage -) diff --git a/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go deleted file mode 100644 index 2fe6fe69b0..0000000000 --- a/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/metrics/v1" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportMetricsServiceRequest struct { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } -func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceRequest) ProtoMessage() {} -func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_75fb6015e6e64798, []int{0} -} -func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) -} -func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) -} -func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) -} -func (m *ExportMetricsServiceRequest) XXX_Size() int { - return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) -} -func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo - -func (m *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics { - if m != nil { - return m.ResourceMetrics - } - return nil -} - -type ExportMetricsServiceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } -func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceResponse) ProtoMessage() {} -func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_75fb6015e6e64798, []int{1} -} -func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) -} -func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) -} -func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) -} -func (m *ExportMetricsServiceResponse) XXX_Size() int { - return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) -} -func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest") - proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/metrics/v1/metrics_service.proto", fileDescriptor_75fb6015e6e64798) -} - -var fileDescriptor_75fb6015e6e64798 = []byte{ - // 264 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xcb, 0x2f, 0x48, 0xcd, - 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, - 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x07, 0x89, 0x66, 0x26, 0x17, 0xeb, - 0x97, 0x19, 0xc2, 0x98, 0xf1, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x7a, 0x60, 0xa5, 0x42, - 0x1a, 0x28, 0xfa, 0x21, 0x82, 0x7a, 0x70, 0xfd, 0x7a, 0x50, 0x4d, 0x7a, 0x65, 0x86, 0x52, 0x3a, - 0xd8, 0x6c, 0xc2, 0x34, 0x1f, 0x62, 0x84, 0x52, 0x25, 0x97, 0xb4, 0x6b, 0x45, 0x41, 0x7e, 0x51, - 0x89, 0x2f, 0x44, 0x38, 0x18, 0x62, 0x6b, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x50, 0x14, - 0x97, 0x40, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x51, 0x72, 0x6a, 0x3c, 0x54, 0xa3, 0x04, 0xa3, 0x02, - 0xb3, 0x06, 0xb7, 0x91, 0xbe, 0x1e, 0x36, 0x17, 0x21, 0xdc, 0xa1, 0x17, 0x04, 0xd5, 0x07, 0x35, - 0x38, 0x88, 0xbf, 0x08, 0x55, 0x40, 0x49, 0x8e, 0x4b, 0x06, 0xbb, 0xd5, 0xc5, 0x05, 0xf9, 0x79, - 0xc5, 0xa9, 0x46, 0x6b, 0x18, 0xb9, 0xf8, 0x50, 0xa5, 0x84, 0x66, 0x32, 0x72, 0xb1, 0x41, 0xf4, - 0x08, 0xb9, 0xea, 0x11, 0x1b, 0x22, 0x7a, 0x78, 0x3c, 0x28, 0xe5, 0x46, 0xa9, 0x31, 0x10, 0xc7, - 0x2a, 0x31, 0x38, 0xf5, 0x33, 0x72, 0x69, 0x67, 0xe6, 0x13, 0x6d, 0x9c, 0x93, 0x30, 0xaa, 0x49, - 0x01, 0x20, 0x95, 0x01, 0x8c, 0x51, 0x9e, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, - 0xb9, 0xfa, 0x20, 0xb3, 0x74, 0x11, 0x51, 0x89, 0x62, 0xb4, 0x2e, 0x24, 0x62, 0xd3, 0x53, 0xf3, - 0xf4, 0xd3, 0xb1, 0xa7, 0xa4, 0x24, 0x36, 0xb0, 0x12, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, - 0xaa, 0xdd, 0xdf, 0x49, 0x7c, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetricsServiceClient is the client API for MetricsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) -} - -type metricsServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { - return &metricsServiceClient{cc} -} - -func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) { - out := new(ExportMetricsServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MetricsServiceServer is the server API for MetricsService service. -type MetricsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) -} - -// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetricsServiceServer struct { -} - -func (*UnimplementedMetricsServiceServer) Export(ctx context.Context, req *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { - s.RegisterService(&_MetricsService_serviceDesc, srv) -} - -func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportMetricsServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricsServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _MetricsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService", - HandlerType: (*MetricsServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _MetricsService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto", -} diff --git a/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go b/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go deleted file mode 100644 index 8158c98a62..0000000000 --- a/internal/opentelemetry-proto-gen/collector/metrics/v1/metrics_service.pb.gw.go +++ /dev/null @@ -1,163 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto - -/* -Package v1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportMetricsServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server MetricsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportMetricsServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Export(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux". -// UnaryRPC :call MetricsServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error { - - mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_MetricsService_Export_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterMetricsServiceHandler(ctx, mux, conn) -} - -// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn)) -} - -// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "MetricsServiceClient" to call the correct interceptors. -func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error { - - mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_MetricsService_Export_0 = runtime.ForwardResponseMessage -) diff --git a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go deleted file mode 100644 index aa4bfb6b00..0000000000 --- a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_config.pb.go +++ /dev/null @@ -1,361 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/trace/v1/trace_config.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// How spans should be sampled: -// - Always off -// - Always on -// - Always follow the parent Span's decision (off if no parent). -type ConstantSampler_ConstantDecision int32 - -const ( - ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 - ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 - ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 -) - -var ConstantSampler_ConstantDecision_name = map[int32]string{ - 0: "ALWAYS_OFF", - 1: "ALWAYS_ON", - 2: "ALWAYS_PARENT", -} - -var ConstantSampler_ConstantDecision_value = map[string]int32{ - "ALWAYS_OFF": 0, - "ALWAYS_ON": 1, - "ALWAYS_PARENT": 2, -} - -func (x ConstantSampler_ConstantDecision) String() string { - return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) -} - -func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{1, 0} -} - -// Global configuration of the trace service. All fields must be specified, or -// the default (zero) values will be used for each type. -type TraceConfig struct { - // The global default sampler used to make decisions on span sampling. - // - // Types that are valid to be assigned to Sampler: - // *TraceConfig_ConstantSampler - // *TraceConfig_TraceIdRatioBased - // *TraceConfig_RateLimitingSampler - Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` - // The global default max number of attributes per span. - MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` - // The global default max number of annotation events per span. - MaxNumberOfTimedEvents int64 `protobuf:"varint,5,opt,name=max_number_of_timed_events,json=maxNumberOfTimedEvents,proto3" json:"max_number_of_timed_events,omitempty"` - // The global default max number of attributes per timed event. - MaxNumberOfAttributesPerTimedEvent int64 `protobuf:"varint,6,opt,name=max_number_of_attributes_per_timed_event,json=maxNumberOfAttributesPerTimedEvent,proto3" json:"max_number_of_attributes_per_timed_event,omitempty"` - // The global default max number of link entries per span. - MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` - // The global default max number of attributes per span. - MaxNumberOfAttributesPerLink int64 `protobuf:"varint,8,opt,name=max_number_of_attributes_per_link,json=maxNumberOfAttributesPerLink,proto3" json:"max_number_of_attributes_per_link,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TraceConfig) Reset() { *m = TraceConfig{} } -func (m *TraceConfig) String() string { return proto.CompactTextString(m) } -func (*TraceConfig) ProtoMessage() {} -func (*TraceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{0} -} -func (m *TraceConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TraceConfig.Unmarshal(m, b) -} -func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) -} -func (m *TraceConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_TraceConfig.Merge(m, src) -} -func (m *TraceConfig) XXX_Size() int { - return xxx_messageInfo_TraceConfig.Size(m) -} -func (m *TraceConfig) XXX_DiscardUnknown() { - xxx_messageInfo_TraceConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_TraceConfig proto.InternalMessageInfo - -type isTraceConfig_Sampler interface { - isTraceConfig_Sampler() -} - -type TraceConfig_ConstantSampler struct { - ConstantSampler *ConstantSampler `protobuf:"bytes,1,opt,name=constant_sampler,json=constantSampler,proto3,oneof" json:"constant_sampler,omitempty"` -} -type TraceConfig_TraceIdRatioBased struct { - TraceIdRatioBased *TraceIdRatioBased `protobuf:"bytes,2,opt,name=trace_id_ratio_based,json=traceIdRatioBased,proto3,oneof" json:"trace_id_ratio_based,omitempty"` -} -type TraceConfig_RateLimitingSampler struct { - RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof" json:"rate_limiting_sampler,omitempty"` -} - -func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} -func (*TraceConfig_TraceIdRatioBased) isTraceConfig_Sampler() {} -func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} - -func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { - if m != nil { - return m.Sampler - } - return nil -} - -func (m *TraceConfig) GetConstantSampler() *ConstantSampler { - if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { - return x.ConstantSampler - } - return nil -} - -func (m *TraceConfig) GetTraceIdRatioBased() *TraceIdRatioBased { - if x, ok := m.GetSampler().(*TraceConfig_TraceIdRatioBased); ok { - return x.TraceIdRatioBased - } - return nil -} - -func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { - if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { - return x.RateLimitingSampler - } - return nil -} - -func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { - if m != nil { - return m.MaxNumberOfAttributes - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfTimedEvents() int64 { - if m != nil { - return m.MaxNumberOfTimedEvents - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfAttributesPerTimedEvent() int64 { - if m != nil { - return m.MaxNumberOfAttributesPerTimedEvent - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfLinks() int64 { - if m != nil { - return m.MaxNumberOfLinks - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfAttributesPerLink() int64 { - if m != nil { - return m.MaxNumberOfAttributesPerLink - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*TraceConfig) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*TraceConfig_ConstantSampler)(nil), - (*TraceConfig_TraceIdRatioBased)(nil), - (*TraceConfig_RateLimitingSampler)(nil), - } -} - -// Sampler that always makes a constant decision on span sampling. -type ConstantSampler struct { - Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } -func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } -func (*ConstantSampler) ProtoMessage() {} -func (*ConstantSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{1} -} -func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) -} -func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) -} -func (m *ConstantSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConstantSampler.Merge(m, src) -} -func (m *ConstantSampler) XXX_Size() int { - return xxx_messageInfo_ConstantSampler.Size(m) -} -func (m *ConstantSampler) XXX_DiscardUnknown() { - xxx_messageInfo_ConstantSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo - -func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { - if m != nil { - return m.Decision - } - return ConstantSampler_ALWAYS_OFF -} - -// Sampler that tries to uniformly sample traces with a given ratio. -// The ratio of sampling a trace is equal to that of the specified ratio. -type TraceIdRatioBased struct { - // The desired ratio of sampling. Must be within [0.0, 1.0]. - SamplingRatio float64 `protobuf:"fixed64,1,opt,name=samplingRatio,proto3" json:"samplingRatio,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TraceIdRatioBased) Reset() { *m = TraceIdRatioBased{} } -func (m *TraceIdRatioBased) String() string { return proto.CompactTextString(m) } -func (*TraceIdRatioBased) ProtoMessage() {} -func (*TraceIdRatioBased) Descriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{2} -} -func (m *TraceIdRatioBased) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TraceIdRatioBased.Unmarshal(m, b) -} -func (m *TraceIdRatioBased) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TraceIdRatioBased.Marshal(b, m, deterministic) -} -func (m *TraceIdRatioBased) XXX_Merge(src proto.Message) { - xxx_messageInfo_TraceIdRatioBased.Merge(m, src) -} -func (m *TraceIdRatioBased) XXX_Size() int { - return xxx_messageInfo_TraceIdRatioBased.Size(m) -} -func (m *TraceIdRatioBased) XXX_DiscardUnknown() { - xxx_messageInfo_TraceIdRatioBased.DiscardUnknown(m) -} - -var xxx_messageInfo_TraceIdRatioBased proto.InternalMessageInfo - -func (m *TraceIdRatioBased) GetSamplingRatio() float64 { - if m != nil { - return m.SamplingRatio - } - return 0 -} - -// Sampler that tries to sample with a rate per time window. -type RateLimitingSampler struct { - // Rate per second. - Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } -func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } -func (*RateLimitingSampler) ProtoMessage() {} -func (*RateLimitingSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5936aa8fa6443e6f, []int{3} -} -func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) -} -func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) -} -func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitingSampler.Merge(m, src) -} -func (m *RateLimitingSampler) XXX_Size() int { - return xxx_messageInfo_RateLimitingSampler.Size(m) -} -func (m *RateLimitingSampler) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo - -func (m *RateLimitingSampler) GetQps() int64 { - if m != nil { - return m.Qps - } - return 0 -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) - proto.RegisterType((*TraceConfig)(nil), "opentelemetry.proto.trace.v1.TraceConfig") - proto.RegisterType((*ConstantSampler)(nil), "opentelemetry.proto.trace.v1.ConstantSampler") - proto.RegisterType((*TraceIdRatioBased)(nil), "opentelemetry.proto.trace.v1.TraceIdRatioBased") - proto.RegisterType((*RateLimitingSampler)(nil), "opentelemetry.proto.trace.v1.RateLimitingSampler") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/trace/v1/trace_config.proto", fileDescriptor_5936aa8fa6443e6f) -} - -var fileDescriptor_5936aa8fa6443e6f = []byte{ - // 519 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4d, 0x6b, 0xdb, 0x40, - 0x10, 0x86, 0xad, 0xb8, 0xf9, 0x9a, 0xe0, 0x44, 0x5e, 0x37, 0x45, 0x94, 0x40, 0x53, 0x51, 0xa8, - 0x2f, 0x96, 0x70, 0x7a, 0x28, 0xed, 0xa1, 0x60, 0xe7, 0xb3, 0x60, 0x1c, 0xa3, 0x18, 0x4a, 0x7d, - 0x59, 0x56, 0xf2, 0x5a, 0x5d, 0x2a, 0xed, 0xba, 0xab, 0xb5, 0x49, 0x2f, 0x3d, 0xf5, 0x1f, 0xf5, - 0x0f, 0x16, 0xad, 0x54, 0xd9, 0x72, 0x12, 0x41, 0x6f, 0x9a, 0x79, 0xf7, 0x7d, 0x66, 0x06, 0x8f, - 0x07, 0x5c, 0x31, 0xa7, 0x5c, 0xd1, 0x88, 0xc6, 0x54, 0xc9, 0x9f, 0xee, 0x5c, 0x0a, 0x25, 0x5c, - 0x25, 0x49, 0x40, 0xdd, 0x65, 0x37, 0xfb, 0xc0, 0x81, 0xe0, 0x33, 0x16, 0x3a, 0x5a, 0x43, 0x27, - 0x25, 0x43, 0x96, 0x74, 0xf4, 0x3b, 0x67, 0xd9, 0xb5, 0x7f, 0x6f, 0xc3, 0xc1, 0x38, 0x0d, 0xce, - 0xb5, 0x07, 0x4d, 0xc0, 0x0c, 0x04, 0x4f, 0x14, 0xe1, 0x0a, 0x27, 0x24, 0x9e, 0x47, 0x54, 0x5a, - 0xc6, 0xa9, 0xd1, 0x3e, 0x38, 0xeb, 0x38, 0x55, 0x20, 0xe7, 0x3c, 0x77, 0xdd, 0x65, 0xa6, 0x9b, - 0x9a, 0x77, 0x14, 0x94, 0x53, 0xc8, 0x87, 0xe7, 0x59, 0x7f, 0x6c, 0x8a, 0x25, 0x51, 0x4c, 0x60, - 0x9f, 0x24, 0x74, 0x6a, 0x6d, 0x69, 0xbe, 0x5b, 0xcd, 0xd7, 0x4d, 0x7e, 0x9e, 0x7a, 0xa9, 0xaf, - 0x9f, 0xda, 0x6e, 0x6a, 0x5e, 0x53, 0x6d, 0x26, 0x51, 0x08, 0xc7, 0x92, 0x28, 0x8a, 0x23, 0x16, - 0x33, 0xc5, 0x78, 0x58, 0x0c, 0x51, 0xd7, 0x45, 0xba, 0xd5, 0x45, 0x3c, 0xa2, 0xe8, 0x20, 0x77, - 0xae, 0x06, 0x69, 0xc9, 0x87, 0x69, 0xf4, 0x1e, 0xac, 0x98, 0xdc, 0x63, 0xbe, 0x88, 0x7d, 0x2a, - 0xb1, 0x98, 0x61, 0xa2, 0x94, 0x64, 0xfe, 0x42, 0xd1, 0xc4, 0x7a, 0x76, 0x6a, 0xb4, 0xeb, 0xde, - 0x71, 0x4c, 0xee, 0x87, 0x5a, 0xbe, 0x9d, 0xf5, 0x0a, 0x11, 0x7d, 0x84, 0x97, 0x65, 0xa3, 0x62, - 0x31, 0x9d, 0x62, 0xba, 0xa4, 0x5c, 0x25, 0xd6, 0xb6, 0xb6, 0xbe, 0x58, 0xb3, 0x8e, 0x53, 0xf9, - 0x52, 0xab, 0x68, 0x0c, 0xed, 0xa7, 0x8a, 0xe2, 0x39, 0x95, 0xeb, 0x28, 0x6b, 0x47, 0x93, 0xec, - 0x47, 0x9b, 0x18, 0x51, 0xb9, 0xc2, 0xa2, 0x0e, 0xb4, 0xca, 0xd4, 0x88, 0xf1, 0xef, 0x89, 0xb5, - 0xab, 0x01, 0xe6, 0x1a, 0x60, 0x90, 0xe6, 0xd1, 0x35, 0xbc, 0xae, 0x6c, 0x22, 0x75, 0x5b, 0x7b, - 0xda, 0x7c, 0xf2, 0x54, 0xf5, 0x94, 0xd4, 0xdf, 0x87, 0xdd, 0xfc, 0xd7, 0xb1, 0xff, 0x18, 0x70, - 0xb4, 0xb1, 0x41, 0x68, 0x02, 0x7b, 0x53, 0x1a, 0xb0, 0x84, 0x09, 0xae, 0x57, 0xf0, 0xf0, 0xec, - 0xd3, 0x7f, 0xad, 0x60, 0x11, 0x5f, 0xe4, 0x14, 0xaf, 0xe0, 0xd9, 0x17, 0x60, 0x6e, 0xaa, 0xe8, - 0x10, 0xa0, 0x37, 0xf8, 0xd2, 0xfb, 0x7a, 0x87, 0x6f, 0xaf, 0xae, 0xcc, 0x1a, 0x6a, 0xc0, 0xfe, - 0xbf, 0x78, 0x68, 0x1a, 0xa8, 0x09, 0x8d, 0x3c, 0x1c, 0xf5, 0xbc, 0xcb, 0xe1, 0xd8, 0xdc, 0xb2, - 0x3f, 0x40, 0xf3, 0xc1, 0x5a, 0xa2, 0x37, 0xd0, 0xd0, 0x53, 0x31, 0x1e, 0xea, 0xac, 0xee, 0xdd, - 0xf0, 0xca, 0x49, 0xfb, 0x2d, 0xb4, 0x1e, 0x59, 0x36, 0x64, 0x42, 0xfd, 0xc7, 0x3c, 0xd1, 0x96, - 0xba, 0x97, 0x7e, 0xf6, 0x7f, 0xc1, 0x2b, 0x26, 0x2a, 0xe7, 0xee, 0x9b, 0x6b, 0x7f, 0xe0, 0x51, - 0x2a, 0x8d, 0x8c, 0xc9, 0x75, 0xc8, 0xd4, 0xb7, 0x85, 0xef, 0x04, 0x22, 0xd6, 0x17, 0xa3, 0xb3, - 0x3a, 0x19, 0x25, 0x56, 0x27, 0x3b, 0x20, 0x21, 0xe5, 0x6e, 0x28, 0xdc, 0x40, 0x44, 0x11, 0x0d, - 0x94, 0x90, 0xc5, 0x45, 0xf1, 0x77, 0xf4, 0x83, 0x77, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x53, - 0xba, 0x65, 0xf8, 0x78, 0x04, 0x00, 0x00, -} diff --git a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go deleted file mode 100644 index 425527cdb4..0000000000 --- a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/trace/v1/trace_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ExportTraceServiceRequest struct { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } -func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceRequest) ProtoMessage() {} -func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192a962890318cf4, []int{0} -} -func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) -} -func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) -} -func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) -} -func (m *ExportTraceServiceRequest) XXX_Size() int { - return xxx_messageInfo_ExportTraceServiceRequest.Size(m) -} -func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo - -func (m *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { - if m != nil { - return m.ResourceSpans - } - return nil -} - -type ExportTraceServiceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } -func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceResponse) ProtoMessage() {} -func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_192a962890318cf4, []int{1} -} -func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) -} -func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) -} -func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) -} -func (m *ExportTraceServiceResponse) XXX_Size() int { - return xxx_messageInfo_ExportTraceServiceResponse.Size(m) -} -func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExportTraceServiceRequest)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest") - proto.RegisterType((*ExportTraceServiceResponse)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/collector/trace/v1/trace_service.proto", fileDescriptor_192a962890318cf4) -} - -var fileDescriptor_192a962890318cf4 = []byte{ - // 265 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0xca, 0x2f, 0x48, 0xcd, - 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, - 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x2f, 0x29, 0x4a, 0x4c, 0x4e, 0xd5, - 0x2f, 0x33, 0x84, 0x30, 0xe2, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0xf5, 0xc0, 0xca, 0x84, - 0xd4, 0x50, 0xf4, 0x42, 0x04, 0xf5, 0xe0, 0x7a, 0xf5, 0xc0, 0x5a, 0xf4, 0xca, 0x0c, 0xa5, 0x34, - 0xb0, 0xd9, 0x81, 0x6a, 0x32, 0x44, 0xb3, 0x52, 0x3e, 0x97, 0xa4, 0x6b, 0x45, 0x41, 0x7e, 0x51, - 0x49, 0x08, 0x48, 0x30, 0x18, 0x62, 0x5b, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x50, 0x10, - 0x17, 0x5f, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x11, 0xc8, 0x21, 0x05, 0x89, 0x79, 0xc5, 0x12, 0x8c, - 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0xda, 0x7a, 0xd8, 0xdc, 0x01, 0xb3, 0x5d, 0x2f, 0x08, 0xaa, 0x27, - 0x18, 0xa4, 0x25, 0x88, 0xb7, 0x08, 0x99, 0xab, 0x24, 0xc3, 0x25, 0x85, 0xcd, 0xc2, 0xe2, 0x82, - 0xfc, 0xbc, 0xe2, 0x54, 0xa3, 0x45, 0x8c, 0x5c, 0x3c, 0xc8, 0x12, 0x42, 0x13, 0x19, 0xb9, 0xd8, - 0x20, 0xea, 0x85, 0x1c, 0xf5, 0x88, 0xf3, 0xbd, 0x1e, 0x4e, 0x0f, 0x49, 0x39, 0x51, 0x62, 0x04, - 0xc4, 0x89, 0x4a, 0x0c, 0x4e, 0x9d, 0x8c, 0x5c, 0x9a, 0x99, 0xf9, 0x44, 0x1a, 0xe5, 0x24, 0x88, - 0x6c, 0x4a, 0x00, 0x48, 0x55, 0x00, 0x63, 0x94, 0x7b, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, - 0x72, 0x7e, 0xae, 0x3e, 0xc8, 0x1c, 0x5d, 0x44, 0x64, 0xa1, 0x18, 0xab, 0x0b, 0x89, 0xba, 0xf4, - 0xd4, 0x3c, 0xfd, 0x74, 0x6c, 0xa9, 0x24, 0x89, 0x0d, 0xac, 0xc0, 0x18, 0x10, 0x00, 0x00, 0xff, - 0xff, 0xc1, 0x6e, 0x1a, 0x15, 0x56, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TraceServiceClient is the client API for TraceService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TraceServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) -} - -type traceServiceClient struct { - cc *grpc.ClientConn -} - -func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { - return &traceServiceClient{cc} -} - -func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { - out := new(ExportTraceServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// TraceServiceServer is the server API for TraceService service. -type TraceServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) -} - -// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. -type UnimplementedTraceServiceServer struct { -} - -func (*UnimplementedTraceServiceServer) Export(ctx context.Context, req *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") -} - -func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) -} - -func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportTraceServiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TraceServiceServer).Export(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _TraceService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", - HandlerType: (*TraceServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Export", - Handler: _TraceService_Export_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", -} diff --git a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go b/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go deleted file mode 100644 index 1da38f1cd2..0000000000 --- a/internal/opentelemetry-proto-gen/collector/trace/v1/trace_service.pb.gw.go +++ /dev/null @@ -1,163 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: opentelemetry/proto/collector/trace/v1/trace_service.proto - -/* -Package v1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportTraceServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportTraceServiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Export(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". -// UnaryRPC :call TraceServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { - - mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterTraceServiceHandler(ctx, mux, conn) -} - -// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) -} - -// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "TraceServiceClient" to call the correct interceptors. -func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { - - mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_TraceService_Export_0 = runtime.ForwardResponseMessage -) diff --git a/internal/opentelemetry-proto-gen/common/v1/common.pb.go b/internal/opentelemetry-proto-gen/common/v1/common.pb.go deleted file mode 100644 index dd951ce8e0..0000000000 --- a/internal/opentelemetry-proto-gen/common/v1/common.pb.go +++ /dev/null @@ -1,430 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/common/v1/common.proto - -package v1 - -import ( - fmt "fmt" - math "math" - - proto "github.com/gogo/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// AnyValue is used to represent any type of attribute value. AnyValue may contain a -// primitive value such as a string or integer or it may contain an arbitrary nested -// object containing arrays, key-value lists and primitives. -type AnyValue struct { - // The value is one of the listed fields. It is valid for all values to be unspecified - // in which case this AnyValue is considered to be "null". - // - // Types that are valid to be assigned to Value: - // *AnyValue_StringValue - // *AnyValue_BoolValue - // *AnyValue_IntValue - // *AnyValue_DoubleValue - // *AnyValue_ArrayValue - // *AnyValue_KvlistValue - Value isAnyValue_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AnyValue) Reset() { *m = AnyValue{} } -func (m *AnyValue) String() string { return proto.CompactTextString(m) } -func (*AnyValue) ProtoMessage() {} -func (*AnyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{0} -} -func (m *AnyValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AnyValue.Unmarshal(m, b) -} -func (m *AnyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AnyValue.Marshal(b, m, deterministic) -} -func (m *AnyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_AnyValue.Merge(m, src) -} -func (m *AnyValue) XXX_Size() int { - return xxx_messageInfo_AnyValue.Size(m) -} -func (m *AnyValue) XXX_DiscardUnknown() { - xxx_messageInfo_AnyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_AnyValue proto.InternalMessageInfo - -type isAnyValue_Value interface { - isAnyValue_Value() -} - -type AnyValue_StringValue struct { - StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` -} -type AnyValue_BoolValue struct { - BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` -} -type AnyValue_IntValue struct { - IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` -} -type AnyValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` -} -type AnyValue_ArrayValue struct { - ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof" json:"array_value,omitempty"` -} -type AnyValue_KvlistValue struct { - KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof" json:"kvlist_value,omitempty"` -} - -func (*AnyValue_StringValue) isAnyValue_Value() {} -func (*AnyValue_BoolValue) isAnyValue_Value() {} -func (*AnyValue_IntValue) isAnyValue_Value() {} -func (*AnyValue_DoubleValue) isAnyValue_Value() {} -func (*AnyValue_ArrayValue) isAnyValue_Value() {} -func (*AnyValue_KvlistValue) isAnyValue_Value() {} - -func (m *AnyValue) GetValue() isAnyValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *AnyValue) GetStringValue() string { - if x, ok := m.GetValue().(*AnyValue_StringValue); ok { - return x.StringValue - } - return "" -} - -func (m *AnyValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*AnyValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *AnyValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*AnyValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *AnyValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*AnyValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *AnyValue) GetArrayValue() *ArrayValue { - if x, ok := m.GetValue().(*AnyValue_ArrayValue); ok { - return x.ArrayValue - } - return nil -} - -func (m *AnyValue) GetKvlistValue() *KeyValueList { - if x, ok := m.GetValue().(*AnyValue_KvlistValue); ok { - return x.KvlistValue - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AnyValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AnyValue_StringValue)(nil), - (*AnyValue_BoolValue)(nil), - (*AnyValue_IntValue)(nil), - (*AnyValue_DoubleValue)(nil), - (*AnyValue_ArrayValue)(nil), - (*AnyValue_KvlistValue)(nil), - } -} - -// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message -// since oneof in AnyValue does not allow repeated fields. -type ArrayValue struct { - // Array of values. The array may be empty (contain 0 elements). - Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ArrayValue) Reset() { *m = ArrayValue{} } -func (m *ArrayValue) String() string { return proto.CompactTextString(m) } -func (*ArrayValue) ProtoMessage() {} -func (*ArrayValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{1} -} -func (m *ArrayValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ArrayValue.Unmarshal(m, b) -} -func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic) -} -func (m *ArrayValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArrayValue.Merge(m, src) -} -func (m *ArrayValue) XXX_Size() int { - return xxx_messageInfo_ArrayValue.Size(m) -} -func (m *ArrayValue) XXX_DiscardUnknown() { - xxx_messageInfo_ArrayValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ArrayValue proto.InternalMessageInfo - -func (m *ArrayValue) GetValues() []*AnyValue { - if m != nil { - return m.Values - } - return nil -} - -// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message -// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need -// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to -// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches -// are semantically equivalent. -type KeyValueList struct { - // A collection of key/value pairs of key-value pairs. The list may be empty (may - // contain 0 elements). - Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyValueList) Reset() { *m = KeyValueList{} } -func (m *KeyValueList) String() string { return proto.CompactTextString(m) } -func (*KeyValueList) ProtoMessage() {} -func (*KeyValueList) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{2} -} -func (m *KeyValueList) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyValueList.Unmarshal(m, b) -} -func (m *KeyValueList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyValueList.Marshal(b, m, deterministic) -} -func (m *KeyValueList) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValueList.Merge(m, src) -} -func (m *KeyValueList) XXX_Size() int { - return xxx_messageInfo_KeyValueList.Size(m) -} -func (m *KeyValueList) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValueList.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValueList proto.InternalMessageInfo - -func (m *KeyValueList) GetValues() []*KeyValue { - if m != nil { - return m.Values - } - return nil -} - -// KeyValue is a key-value pair that is used to store Span attributes, Link -// attributes, etc. -type KeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{3} -} -func (m *KeyValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyValue.Unmarshal(m, b) -} -func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) -} -func (m *KeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValue.Merge(m, src) -} -func (m *KeyValue) XXX_Size() int { - return xxx_messageInfo_KeyValue.Size(m) -} -func (m *KeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValue proto.InternalMessageInfo - -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetValue() *AnyValue { - if m != nil { - return m.Value - } - return nil -} - -// StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version -// of KeyValue that only supports string values. -type StringKeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StringKeyValue) Reset() { *m = StringKeyValue{} } -func (m *StringKeyValue) String() string { return proto.CompactTextString(m) } -func (*StringKeyValue) ProtoMessage() {} -func (*StringKeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{4} -} -func (m *StringKeyValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringKeyValue.Unmarshal(m, b) -} -func (m *StringKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringKeyValue.Marshal(b, m, deterministic) -} -func (m *StringKeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringKeyValue.Merge(m, src) -} -func (m *StringKeyValue) XXX_Size() int { - return xxx_messageInfo_StringKeyValue.Size(m) -} -func (m *StringKeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_StringKeyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_StringKeyValue proto.InternalMessageInfo - -func (m *StringKeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *StringKeyValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// InstrumentationLibrary is a message representing the instrumentation library information -// such as the fully qualified name and version. -type InstrumentationLibrary struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InstrumentationLibrary) Reset() { *m = InstrumentationLibrary{} } -func (m *InstrumentationLibrary) String() string { return proto.CompactTextString(m) } -func (*InstrumentationLibrary) ProtoMessage() {} -func (*InstrumentationLibrary) Descriptor() ([]byte, []int) { - return fileDescriptor_62ba46dcb97aa817, []int{5} -} -func (m *InstrumentationLibrary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InstrumentationLibrary.Unmarshal(m, b) -} -func (m *InstrumentationLibrary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InstrumentationLibrary.Marshal(b, m, deterministic) -} -func (m *InstrumentationLibrary) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstrumentationLibrary.Merge(m, src) -} -func (m *InstrumentationLibrary) XXX_Size() int { - return xxx_messageInfo_InstrumentationLibrary.Size(m) -} -func (m *InstrumentationLibrary) XXX_DiscardUnknown() { - xxx_messageInfo_InstrumentationLibrary.DiscardUnknown(m) -} - -var xxx_messageInfo_InstrumentationLibrary proto.InternalMessageInfo - -func (m *InstrumentationLibrary) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *InstrumentationLibrary) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func init() { - proto.RegisterType((*AnyValue)(nil), "opentelemetry.proto.common.v1.AnyValue") - proto.RegisterType((*ArrayValue)(nil), "opentelemetry.proto.common.v1.ArrayValue") - proto.RegisterType((*KeyValueList)(nil), "opentelemetry.proto.common.v1.KeyValueList") - proto.RegisterType((*KeyValue)(nil), "opentelemetry.proto.common.v1.KeyValue") - proto.RegisterType((*StringKeyValue)(nil), "opentelemetry.proto.common.v1.StringKeyValue") - proto.RegisterType((*InstrumentationLibrary)(nil), "opentelemetry.proto.common.v1.InstrumentationLibrary") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/common/v1/common.proto", fileDescriptor_62ba46dcb97aa817) -} - -var fileDescriptor_62ba46dcb97aa817 = []byte{ - // 411 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4b, 0xab, 0xd3, 0x40, - 0x14, 0xce, 0xdc, 0xdc, 0xdb, 0x9b, 0x9c, 0x14, 0x91, 0x41, 0xa4, 0x9b, 0x8b, 0xa1, 0x2e, 0x8c, - 0xca, 0x4d, 0x68, 0xdd, 0xb8, 0x51, 0x69, 0x05, 0x89, 0x58, 0xb1, 0x44, 0x70, 0xa1, 0x0b, 0x49, - 0x74, 0x88, 0x43, 0x93, 0x99, 0x3a, 0x99, 0x04, 0xf2, 0xe3, 0xfc, 0x6f, 0x32, 0x8f, 0xf4, 0xb1, - 0x69, 0xe9, 0xee, 0xcc, 0x97, 0xef, 0x71, 0x4e, 0x66, 0x0e, 0xbc, 0xe0, 0x5b, 0xc2, 0x24, 0xa9, - 0x48, 0x4d, 0xa4, 0xe8, 0x93, 0xad, 0xe0, 0x92, 0x27, 0xbf, 0x78, 0x5d, 0x73, 0x96, 0x74, 0x33, - 0x5b, 0xc5, 0x1a, 0xc6, 0x77, 0x47, 0x5c, 0x03, 0xc6, 0x96, 0xd1, 0xcd, 0xa6, 0xff, 0xae, 0xc0, - 0x5b, 0xb0, 0xfe, 0x5b, 0x5e, 0xb5, 0x04, 0x3f, 0x85, 0x71, 0x23, 0x05, 0x65, 0xe5, 0xcf, 0x4e, - 0x9d, 0x27, 0x28, 0x44, 0x91, 0x9f, 0x3a, 0x59, 0x60, 0x50, 0x43, 0x7a, 0x02, 0x50, 0x70, 0x5e, - 0x59, 0xca, 0x55, 0x88, 0x22, 0x2f, 0x75, 0x32, 0x5f, 0x61, 0x86, 0x70, 0x07, 0x3e, 0x65, 0xd2, - 0x7e, 0x77, 0x43, 0x14, 0xb9, 0xa9, 0x93, 0x79, 0x94, 0xc9, 0x5d, 0xc8, 0x6f, 0xde, 0x16, 0x15, - 0xb1, 0x8c, 0xeb, 0x10, 0x45, 0x48, 0x85, 0x18, 0xd4, 0x90, 0x56, 0x10, 0xe4, 0x42, 0xe4, 0xbd, - 0xe5, 0xdc, 0x84, 0x28, 0x0a, 0xe6, 0xcf, 0xe3, 0x93, 0xb3, 0xc4, 0x0b, 0xa5, 0xd0, 0xfa, 0xd4, - 0xc9, 0x20, 0xdf, 0x9d, 0xf0, 0x1a, 0xc6, 0x9b, 0xae, 0xa2, 0xcd, 0xd0, 0xd4, 0x48, 0xdb, 0xbd, - 0x3c, 0x63, 0xf7, 0x89, 0x18, 0xf9, 0x8a, 0x36, 0x52, 0xf5, 0x67, 0x2c, 0x34, 0xb4, 0xbc, 0x85, - 0x1b, 0x6d, 0x35, 0xfd, 0x0c, 0xb0, 0x8f, 0xc5, 0xef, 0x60, 0xa4, 0xe1, 0x66, 0x82, 0x42, 0x37, - 0x0a, 0xe6, 0xcf, 0xce, 0x75, 0x6c, 0xff, 0x7c, 0x66, 0x65, 0xd3, 0x2f, 0x30, 0x3e, 0x8c, 0xbd, - 0xd8, 0x70, 0x10, 0xef, 0x0c, 0x7f, 0x80, 0x37, 0x60, 0xf8, 0x21, 0xb8, 0x1b, 0xd2, 0x9b, 0x5b, - 0xcd, 0x54, 0x89, 0xdf, 0xd8, 0x31, 0xf4, 0x35, 0x5e, 0xd0, 0xae, 0x1d, 0xfe, 0x35, 0x3c, 0xf8, - 0xaa, 0x5f, 0xc6, 0x89, 0x88, 0x47, 0x87, 0x11, 0xfe, 0xa0, 0xfc, 0x00, 0x8f, 0x3f, 0xb2, 0x46, - 0x8a, 0xb6, 0x26, 0x4c, 0xe6, 0x92, 0x72, 0xb6, 0xa2, 0x85, 0xc8, 0x45, 0x8f, 0x31, 0x5c, 0xb3, - 0xbc, 0xb6, 0x6f, 0x2f, 0xd3, 0x35, 0x9e, 0xc0, 0x6d, 0x47, 0x44, 0x43, 0x39, 0xb3, 0x2e, 0xc3, - 0x71, 0xf9, 0x17, 0x42, 0xca, 0x4f, 0x77, 0xbd, 0x0c, 0xde, 0xeb, 0x72, 0xad, 0xe0, 0x35, 0xfa, - 0xfe, 0xb6, 0xa4, 0xf2, 0x4f, 0x5b, 0x28, 0x42, 0xa2, 0x84, 0xf7, 0xfb, 0x45, 0x3a, 0xf2, 0xb9, - 0x37, 0x6b, 0x55, 0x12, 0x96, 0x94, 0x07, 0xdb, 0x55, 0x8c, 0x34, 0xfe, 0xea, 0x7f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x58, 0xdb, 0x68, 0x5e, 0x85, 0x03, 0x00, 0x00, -} diff --git a/internal/opentelemetry-proto-gen/logs/v1/logs.pb.go b/internal/opentelemetry-proto-gen/logs/v1/logs.pb.go deleted file mode 100644 index 04b6212702..0000000000 --- a/internal/opentelemetry-proto-gen/logs/v1/logs.pb.go +++ /dev/null @@ -1,448 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/logs/v1/logs.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - v11 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" - v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Possible values for LogRecord.SeverityNumber. -type SeverityNumber int32 - -const ( - // UNSPECIFIED is the default SeverityNumber, it MUST not be used. - SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0 - SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1 - SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2 - SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3 - SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4 - SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5 - SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6 - SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7 - SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8 - SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9 - SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10 - SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11 - SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12 - SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13 - SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14 - SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15 - SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16 - SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17 - SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18 - SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19 - SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20 - SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21 - SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22 - SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23 - SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24 -) - -var SeverityNumber_name = map[int32]string{ - 0: "SEVERITY_NUMBER_UNSPECIFIED", - 1: "SEVERITY_NUMBER_TRACE", - 2: "SEVERITY_NUMBER_TRACE2", - 3: "SEVERITY_NUMBER_TRACE3", - 4: "SEVERITY_NUMBER_TRACE4", - 5: "SEVERITY_NUMBER_DEBUG", - 6: "SEVERITY_NUMBER_DEBUG2", - 7: "SEVERITY_NUMBER_DEBUG3", - 8: "SEVERITY_NUMBER_DEBUG4", - 9: "SEVERITY_NUMBER_INFO", - 10: "SEVERITY_NUMBER_INFO2", - 11: "SEVERITY_NUMBER_INFO3", - 12: "SEVERITY_NUMBER_INFO4", - 13: "SEVERITY_NUMBER_WARN", - 14: "SEVERITY_NUMBER_WARN2", - 15: "SEVERITY_NUMBER_WARN3", - 16: "SEVERITY_NUMBER_WARN4", - 17: "SEVERITY_NUMBER_ERROR", - 18: "SEVERITY_NUMBER_ERROR2", - 19: "SEVERITY_NUMBER_ERROR3", - 20: "SEVERITY_NUMBER_ERROR4", - 21: "SEVERITY_NUMBER_FATAL", - 22: "SEVERITY_NUMBER_FATAL2", - 23: "SEVERITY_NUMBER_FATAL3", - 24: "SEVERITY_NUMBER_FATAL4", -} - -var SeverityNumber_value = map[string]int32{ - "SEVERITY_NUMBER_UNSPECIFIED": 0, - "SEVERITY_NUMBER_TRACE": 1, - "SEVERITY_NUMBER_TRACE2": 2, - "SEVERITY_NUMBER_TRACE3": 3, - "SEVERITY_NUMBER_TRACE4": 4, - "SEVERITY_NUMBER_DEBUG": 5, - "SEVERITY_NUMBER_DEBUG2": 6, - "SEVERITY_NUMBER_DEBUG3": 7, - "SEVERITY_NUMBER_DEBUG4": 8, - "SEVERITY_NUMBER_INFO": 9, - "SEVERITY_NUMBER_INFO2": 10, - "SEVERITY_NUMBER_INFO3": 11, - "SEVERITY_NUMBER_INFO4": 12, - "SEVERITY_NUMBER_WARN": 13, - "SEVERITY_NUMBER_WARN2": 14, - "SEVERITY_NUMBER_WARN3": 15, - "SEVERITY_NUMBER_WARN4": 16, - "SEVERITY_NUMBER_ERROR": 17, - "SEVERITY_NUMBER_ERROR2": 18, - "SEVERITY_NUMBER_ERROR3": 19, - "SEVERITY_NUMBER_ERROR4": 20, - "SEVERITY_NUMBER_FATAL": 21, - "SEVERITY_NUMBER_FATAL2": 22, - "SEVERITY_NUMBER_FATAL3": 23, - "SEVERITY_NUMBER_FATAL4": 24, -} - -func (x SeverityNumber) String() string { - return proto.EnumName(SeverityNumber_name, int32(x)) -} - -func (SeverityNumber) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{0} -} - -// Masks for LogRecord.flags field. -type LogRecordFlags int32 - -const ( - LogRecordFlags_LOG_RECORD_FLAG_UNSPECIFIED LogRecordFlags = 0 - LogRecordFlags_LOG_RECORD_FLAG_TRACE_FLAGS_MASK LogRecordFlags = 255 -) - -var LogRecordFlags_name = map[int32]string{ - 0: "LOG_RECORD_FLAG_UNSPECIFIED", - 255: "LOG_RECORD_FLAG_TRACE_FLAGS_MASK", -} - -var LogRecordFlags_value = map[string]int32{ - "LOG_RECORD_FLAG_UNSPECIFIED": 0, - "LOG_RECORD_FLAG_TRACE_FLAGS_MASK": 255, -} - -func (x LogRecordFlags) String() string { - return proto.EnumName(LogRecordFlags_name, int32(x)) -} - -func (LogRecordFlags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{1} -} - -// A collection of InstrumentationLibraryLogs from a Resource. -type ResourceLogs struct { - // The resource for the logs in this message. - // If this field is not set then no resource info is known. - Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // A list of InstrumentationLibraryLogs that originate from a resource. - InstrumentationLibraryLogs []*InstrumentationLibraryLogs `protobuf:"bytes,2,rep,name=instrumentation_library_logs,json=instrumentationLibraryLogs,proto3" json:"instrumentation_library_logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResourceLogs) Reset() { *m = ResourceLogs{} } -func (m *ResourceLogs) String() string { return proto.CompactTextString(m) } -func (*ResourceLogs) ProtoMessage() {} -func (*ResourceLogs) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{0} -} -func (m *ResourceLogs) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceLogs.Unmarshal(m, b) -} -func (m *ResourceLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceLogs.Marshal(b, m, deterministic) -} -func (m *ResourceLogs) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceLogs.Merge(m, src) -} -func (m *ResourceLogs) XXX_Size() int { - return xxx_messageInfo_ResourceLogs.Size(m) -} -func (m *ResourceLogs) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceLogs.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceLogs proto.InternalMessageInfo - -func (m *ResourceLogs) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - -func (m *ResourceLogs) GetInstrumentationLibraryLogs() []*InstrumentationLibraryLogs { - if m != nil { - return m.InstrumentationLibraryLogs - } - return nil -} - -// A collection of Logs produced by an InstrumentationLibrary. -type InstrumentationLibraryLogs struct { - // The instrumentation library information for the logs in this message. - // If this field is not set then no library info is known. - InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` - // A list of log records. - Logs []*LogRecord `protobuf:"bytes,2,rep,name=logs,proto3" json:"logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InstrumentationLibraryLogs) Reset() { *m = InstrumentationLibraryLogs{} } -func (m *InstrumentationLibraryLogs) String() string { return proto.CompactTextString(m) } -func (*InstrumentationLibraryLogs) ProtoMessage() {} -func (*InstrumentationLibraryLogs) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{1} -} -func (m *InstrumentationLibraryLogs) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InstrumentationLibraryLogs.Unmarshal(m, b) -} -func (m *InstrumentationLibraryLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InstrumentationLibraryLogs.Marshal(b, m, deterministic) -} -func (m *InstrumentationLibraryLogs) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstrumentationLibraryLogs.Merge(m, src) -} -func (m *InstrumentationLibraryLogs) XXX_Size() int { - return xxx_messageInfo_InstrumentationLibraryLogs.Size(m) -} -func (m *InstrumentationLibraryLogs) XXX_DiscardUnknown() { - xxx_messageInfo_InstrumentationLibraryLogs.DiscardUnknown(m) -} - -var xxx_messageInfo_InstrumentationLibraryLogs proto.InternalMessageInfo - -func (m *InstrumentationLibraryLogs) GetInstrumentationLibrary() *v11.InstrumentationLibrary { - if m != nil { - return m.InstrumentationLibrary - } - return nil -} - -func (m *InstrumentationLibraryLogs) GetLogs() []*LogRecord { - if m != nil { - return m.Logs - } - return nil -} - -// A log record according to OpenTelemetry Log Data Model: -// https://github.com/open-telemetry/oteps/blob/master/text/logs/0097-log-data-model.md -type LogRecord struct { - // time_unix_nano is the time when the event occurred. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // Numerical value of the severity, normalized to values described in Log Data Model. - // [Optional]. - SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"` - // The severity text (also known as log level). The original string representation as - // it is known at the source. [Optional]. - SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"` - // Short event identifier that does not contain varying parts. Name describes - // what happened (e.g. "ProcessStarted"). Recommended to be no longer than 50 - // characters. Not guaranteed to be unique in any way. [Optional]. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // A value containing the body of the log record. Can be for example a human-readable - // string message (including multi-line) describing the event in a free form or it can - // be a structured data composed of arrays and maps of other values. [Optional]. - Body *v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"` - // Additional attributes that describe the specific event occurrence. [Optional]. - Attributes []*v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes,omitempty"` - DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // Flags, a bit field. 8 least significant bits are the trace flags as - // defined in W3C Trace Context specification. 24 most significant bits are reserved - // and must be set to 0. Readers must not assume that 24 most significant bits - // will be zero and must correctly mask the bits when reading 8-bit trace flag (use - // flags & TRACE_FLAGS_MASK). [Optional]. - Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"` - // A unique identifier for a trace. All logs from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes - // is considered invalid. Can be set for logs that are part of request processing - // and have an assigned trace id. [Optional]. - TraceId []byte `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes is considered - // invalid. Can be set for logs that are part of a particular processing span. - // If span_id is present trace_id SHOULD be also present. [Optional]. - SpanId []byte `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogRecord) Reset() { *m = LogRecord{} } -func (m *LogRecord) String() string { return proto.CompactTextString(m) } -func (*LogRecord) ProtoMessage() {} -func (*LogRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_d1c030a3ec7e961e, []int{2} -} -func (m *LogRecord) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogRecord.Unmarshal(m, b) -} -func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic) -} -func (m *LogRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogRecord.Merge(m, src) -} -func (m *LogRecord) XXX_Size() int { - return xxx_messageInfo_LogRecord.Size(m) -} -func (m *LogRecord) XXX_DiscardUnknown() { - xxx_messageInfo_LogRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_LogRecord proto.InternalMessageInfo - -func (m *LogRecord) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *LogRecord) GetSeverityNumber() SeverityNumber { - if m != nil { - return m.SeverityNumber - } - return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED -} - -func (m *LogRecord) GetSeverityText() string { - if m != nil { - return m.SeverityText - } - return "" -} - -func (m *LogRecord) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *LogRecord) GetBody() *v11.AnyValue { - if m != nil { - return m.Body - } - return nil -} - -func (m *LogRecord) GetAttributes() []*v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *LogRecord) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *LogRecord) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -func (m *LogRecord) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *LogRecord) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value) - proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value) - proto.RegisterType((*ResourceLogs)(nil), "opentelemetry.proto.logs.v1.ResourceLogs") - proto.RegisterType((*InstrumentationLibraryLogs)(nil), "opentelemetry.proto.logs.v1.InstrumentationLibraryLogs") - proto.RegisterType((*LogRecord)(nil), "opentelemetry.proto.logs.v1.LogRecord") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/logs/v1/logs.proto", fileDescriptor_d1c030a3ec7e961e) -} - -var fileDescriptor_d1c030a3ec7e961e = []byte{ - // 756 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x95, 0xdf, 0x6f, 0xea, 0x36, - 0x14, 0xc7, 0x97, 0xf2, 0xdb, 0xa5, 0xd4, 0xf3, 0x5a, 0x9a, 0xd2, 0x69, 0x8d, 0xba, 0xad, 0x63, - 0x9d, 0x0a, 0x6a, 0x60, 0xda, 0xb4, 0xed, 0x25, 0xd0, 0x80, 0x50, 0x29, 0x54, 0x06, 0xba, 0x1f, - 0x2f, 0x51, 0x00, 0x8f, 0x45, 0x03, 0x1b, 0x25, 0x0e, 0x82, 0xbf, 0xef, 0xbe, 0x5c, 0xdd, 0xa7, - 0xfb, 0x1f, 0xdd, 0xab, 0x98, 0x1f, 0x2d, 0x28, 0xa6, 0x4f, 0xd8, 0xe7, 0x73, 0xbe, 0x5f, 0x9f, - 0x73, 0x44, 0x6c, 0x70, 0xcd, 0xa6, 0x84, 0x72, 0x32, 0x26, 0x13, 0xc2, 0xdd, 0x45, 0x71, 0xea, - 0x32, 0xce, 0x8a, 0x63, 0x36, 0xf2, 0x8a, 0xb3, 0x3b, 0xf1, 0x5b, 0x10, 0x21, 0x74, 0xb1, 0x95, - 0xb7, 0x0c, 0x16, 0x04, 0x9f, 0xdd, 0xe5, 0x6e, 0xc2, 0x4c, 0x06, 0x6c, 0x32, 0x61, 0x34, 0xb0, - 0x59, 0xae, 0x96, 0x9a, 0x5c, 0x21, 0x2c, 0xd7, 0x25, 0x1e, 0xf3, 0xdd, 0x01, 0x09, 0xb2, 0xd7, - 0xeb, 0x65, 0xfe, 0xd5, 0x47, 0x05, 0xa4, 0xf1, 0x2a, 0xd4, 0x64, 0x23, 0x0f, 0x99, 0x20, 0xb9, - 0x4e, 0x51, 0x15, 0x4d, 0xc9, 0x1f, 0xea, 0x3f, 0x16, 0xc2, 0x8a, 0xdb, 0xf8, 0xcc, 0xee, 0x0a, - 0x6b, 0x03, 0xbc, 0x91, 0xa2, 0x05, 0xf8, 0xda, 0xa1, 0x1e, 0x77, 0xfd, 0x09, 0xa1, 0xdc, 0xe6, - 0x0e, 0xa3, 0xd6, 0xd8, 0xe9, 0xbb, 0xb6, 0xbb, 0xb0, 0x82, 0xb6, 0xd4, 0x03, 0x2d, 0x92, 0x3f, - 0xd4, 0x7f, 0x29, 0xec, 0xe9, 0xbb, 0xd0, 0xd8, 0x36, 0x68, 0x2e, 0xf5, 0x41, 0x95, 0x38, 0xe7, - 0x48, 0xd9, 0xd5, 0x7b, 0x05, 0xe4, 0xe4, 0x52, 0x44, 0xc1, 0x99, 0xa4, 0xb2, 0x55, 0xbf, 0x3f, - 0x87, 0x16, 0xb5, 0x9a, 0xb2, 0xb4, 0x2c, 0x9c, 0x0d, 0x2f, 0x09, 0xfd, 0x06, 0xa2, 0xaf, 0x3a, - 0xbe, 0xde, 0xdb, 0x71, 0x93, 0x8d, 0x30, 0x19, 0x30, 0x77, 0x88, 0x85, 0xe6, 0xea, 0x43, 0x04, - 0xa4, 0x36, 0x31, 0xf4, 0x1d, 0xc8, 0x70, 0x67, 0x42, 0x2c, 0x9f, 0x3a, 0x73, 0x8b, 0xda, 0x94, - 0x89, 0x82, 0xe3, 0x38, 0x1d, 0x44, 0x7b, 0xd4, 0x99, 0xb7, 0x6c, 0xca, 0x50, 0x17, 0x1c, 0x7b, - 0x64, 0x46, 0x5c, 0x87, 0x2f, 0x2c, 0xea, 0x4f, 0xfa, 0xc4, 0x55, 0x0f, 0x34, 0x25, 0x9f, 0xd1, - 0x7f, 0xda, 0x7b, 0x74, 0x67, 0xa5, 0x69, 0x09, 0x09, 0xce, 0x78, 0x5b, 0x7b, 0xf4, 0x2d, 0x38, - 0xda, 0xb8, 0x72, 0x32, 0xe7, 0x6a, 0x44, 0x53, 0xf2, 0x29, 0x9c, 0x5e, 0x07, 0xbb, 0x64, 0xce, - 0x11, 0x02, 0x51, 0x6a, 0x4f, 0x88, 0x1a, 0x15, 0x4c, 0xac, 0xd1, 0xef, 0x20, 0xda, 0x67, 0xc3, - 0x85, 0x1a, 0x13, 0xb3, 0xfd, 0xe1, 0x8d, 0xd9, 0x1a, 0x74, 0xf1, 0x6c, 0x8f, 0x7d, 0x82, 0x85, - 0x08, 0xd5, 0x01, 0xb0, 0x39, 0x77, 0x9d, 0xbe, 0xcf, 0x89, 0xa7, 0xc6, 0xc5, 0x04, 0xdf, 0xb2, - 0x78, 0x20, 0x2b, 0x8b, 0x57, 0x52, 0xf4, 0x2b, 0x50, 0x87, 0x2e, 0x9b, 0x4e, 0xc9, 0xd0, 0x7a, - 0x89, 0x5a, 0x03, 0xe6, 0x53, 0xae, 0x26, 0x34, 0x25, 0x7f, 0x84, 0xb3, 0x2b, 0x6e, 0x6c, 0x70, - 0x35, 0xa0, 0xe8, 0x04, 0xc4, 0xfe, 0x1d, 0xdb, 0x23, 0x4f, 0x4d, 0x6a, 0x4a, 0x3e, 0x81, 0x97, - 0x1b, 0x74, 0x0e, 0x92, 0xdc, 0xb5, 0x07, 0xc4, 0x72, 0x86, 0x6a, 0x4a, 0x53, 0xf2, 0x69, 0x9c, - 0x10, 0xfb, 0xc6, 0x10, 0x9d, 0x81, 0x84, 0x37, 0xb5, 0x69, 0x40, 0x80, 0x20, 0xf1, 0x60, 0xdb, - 0x18, 0xde, 0xbc, 0x8b, 0x81, 0xcc, 0xf6, 0x94, 0xd1, 0x25, 0xb8, 0xe8, 0x98, 0xcf, 0x26, 0x6e, - 0x74, 0xff, 0xb6, 0x5a, 0xbd, 0xc7, 0x8a, 0x89, 0xad, 0x5e, 0xab, 0xf3, 0x64, 0x56, 0x1b, 0xb5, - 0x86, 0x79, 0x0f, 0xbf, 0x40, 0xe7, 0xe0, 0x74, 0x37, 0xa1, 0x8b, 0x8d, 0xaa, 0x09, 0x15, 0x94, - 0x03, 0xd9, 0x50, 0xa4, 0xc3, 0x03, 0x29, 0x2b, 0xc1, 0x88, 0x94, 0x95, 0x61, 0x34, 0xec, 0xb8, - 0x7b, 0xb3, 0xd2, 0xab, 0xc3, 0x58, 0x98, 0x4c, 0x20, 0x1d, 0xc6, 0xa5, 0xac, 0x04, 0x13, 0x52, - 0x56, 0x86, 0x49, 0xa4, 0x82, 0x93, 0x5d, 0xd6, 0x68, 0xd5, 0xda, 0x30, 0x15, 0x56, 0x48, 0x40, - 0x74, 0x08, 0x64, 0xa8, 0x04, 0x0f, 0x65, 0xa8, 0x0c, 0xd3, 0x61, 0x47, 0xfd, 0x69, 0xe0, 0x16, - 0x3c, 0x0a, 0x13, 0x05, 0x44, 0x87, 0x19, 0x19, 0x2a, 0xc1, 0x63, 0x19, 0x2a, 0x43, 0x18, 0x86, - 0x4c, 0x8c, 0xdb, 0x18, 0x7e, 0x19, 0x36, 0x0c, 0x81, 0x74, 0x88, 0xa4, 0xac, 0x04, 0xbf, 0x92, - 0xb2, 0x32, 0x3c, 0x09, 0x3b, 0xae, 0x66, 0x74, 0x8d, 0x26, 0x3c, 0x0d, 0x93, 0x09, 0xa4, 0xc3, - 0xac, 0x94, 0x95, 0xe0, 0x99, 0x94, 0x95, 0xa1, 0x7a, 0xf3, 0x17, 0xc8, 0x6c, 0x6e, 0xa4, 0x9a, - 0xf8, 0x16, 0x2e, 0xc1, 0x45, 0xb3, 0x5d, 0xb7, 0xb0, 0x59, 0x6d, 0xe3, 0x7b, 0xab, 0xd6, 0x34, - 0xea, 0x3b, 0x7f, 0xe2, 0xef, 0x81, 0xb6, 0x9b, 0x20, 0xfe, 0x71, 0x62, 0xd9, 0xb1, 0x1e, 0x8d, - 0xce, 0x03, 0xfc, 0xa4, 0x54, 0xfe, 0x07, 0xdf, 0x38, 0x6c, 0xdf, 0x1d, 0x55, 0x09, 0xee, 0x42, - 0xef, 0x29, 0x08, 0x3d, 0x29, 0xff, 0xfc, 0x31, 0x72, 0xf8, 0x7f, 0x7e, 0x3f, 0xf8, 0xf2, 0x8b, - 0x81, 0xe8, 0xf6, 0xe5, 0xd5, 0xdb, 0xf2, 0xb8, 0x5d, 0xbe, 0x81, 0x23, 0x42, 0x8b, 0xa3, 0xcd, - 0xdb, 0xdb, 0x8f, 0x8b, 0x68, 0xe9, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x3d, 0xc3, 0x0c, - 0xa1, 0x07, 0x00, 0x00, -} diff --git a/internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go b/internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go deleted file mode 100644 index eebb0aa75e..0000000000 --- a/internal/opentelemetry-proto-gen/metrics/experimental/configservice.pb.go +++ /dev/null @@ -1,423 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/metrics/experimental/configservice.proto - -package experimental - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type MetricConfigRequest struct { - // Required. The resource for which configuration should be returned. - Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // Optional. The value of MetricConfigResponse.fingerprint for the last - // configuration that the caller received and successfully applied. - LastKnownFingerprint []byte `protobuf:"bytes,2,opt,name=last_known_fingerprint,json=lastKnownFingerprint,proto3" json:"last_known_fingerprint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricConfigRequest) Reset() { *m = MetricConfigRequest{} } -func (m *MetricConfigRequest) String() string { return proto.CompactTextString(m) } -func (*MetricConfigRequest) ProtoMessage() {} -func (*MetricConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_79b5d4ea55caf90b, []int{0} -} -func (m *MetricConfigRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricConfigRequest.Unmarshal(m, b) -} -func (m *MetricConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricConfigRequest.Marshal(b, m, deterministic) -} -func (m *MetricConfigRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricConfigRequest.Merge(m, src) -} -func (m *MetricConfigRequest) XXX_Size() int { - return xxx_messageInfo_MetricConfigRequest.Size(m) -} -func (m *MetricConfigRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MetricConfigRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricConfigRequest proto.InternalMessageInfo - -func (m *MetricConfigRequest) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - -func (m *MetricConfigRequest) GetLastKnownFingerprint() []byte { - if m != nil { - return m.LastKnownFingerprint - } - return nil -} - -type MetricConfigResponse struct { - // Optional. The fingerprint associated with this MetricConfigResponse. Each - // change in configs yields a different fingerprint. The resource SHOULD copy - // this value to MetricConfigRequest.last_known_fingerprint for the next - // configuration request. If there are no changes between fingerprint and - // MetricConfigRequest.last_known_fingerprint, then all other fields besides - // fingerprint in the response are optional, or the same as the last update if - // present. - // - // The exact mechanics of generating the fingerprint is up to the - // implementation. However, a fingerprint must be deterministically determined - // by the configurations -- the same configuration will generate the same - // fingerprint on any instance of an implementation. Hence using a timestamp is - // unacceptable, but a deterministic hash is fine. - Fingerprint []byte `protobuf:"bytes,1,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` - // A single metric may match multiple schedules. In such cases, the schedule - // that specifies the smallest period is applied. - // - // Note, for optimization purposes, it is recommended to use as few schedules - // as possible to capture all required metric updates. Where you can be - // conservative, do take full advantage of the inclusion/exclusion patterns to - // capture as much of your targeted metrics. - Schedules []*MetricConfigResponse_Schedule `protobuf:"bytes,2,rep,name=schedules,proto3" json:"schedules,omitempty"` - // Optional. The client is suggested to wait this long (in seconds) before - // pinging the configuration service again. - SuggestedWaitTimeSec int32 `protobuf:"varint,3,opt,name=suggested_wait_time_sec,json=suggestedWaitTimeSec,proto3" json:"suggested_wait_time_sec,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricConfigResponse) Reset() { *m = MetricConfigResponse{} } -func (m *MetricConfigResponse) String() string { return proto.CompactTextString(m) } -func (*MetricConfigResponse) ProtoMessage() {} -func (*MetricConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_79b5d4ea55caf90b, []int{1} -} -func (m *MetricConfigResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricConfigResponse.Unmarshal(m, b) -} -func (m *MetricConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricConfigResponse.Marshal(b, m, deterministic) -} -func (m *MetricConfigResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricConfigResponse.Merge(m, src) -} -func (m *MetricConfigResponse) XXX_Size() int { - return xxx_messageInfo_MetricConfigResponse.Size(m) -} -func (m *MetricConfigResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MetricConfigResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricConfigResponse proto.InternalMessageInfo - -func (m *MetricConfigResponse) GetFingerprint() []byte { - if m != nil { - return m.Fingerprint - } - return nil -} - -func (m *MetricConfigResponse) GetSchedules() []*MetricConfigResponse_Schedule { - if m != nil { - return m.Schedules - } - return nil -} - -func (m *MetricConfigResponse) GetSuggestedWaitTimeSec() int32 { - if m != nil { - return m.SuggestedWaitTimeSec - } - return 0 -} - -// A Schedule is used to apply a particular scheduling configuration to -// a metric. If a metric name matches a schedule's patterns, then the metric -// adopts the configuration specified by the schedule. -type MetricConfigResponse_Schedule struct { - // Metrics with names that match a rule in the inclusion_patterns are - // targeted by this schedule. Metrics that match the exclusion_patterns - // are not targeted for this schedule, even if they match an inclusion - // pattern. - ExclusionPatterns []*MetricConfigResponse_Schedule_Pattern `protobuf:"bytes,1,rep,name=exclusion_patterns,json=exclusionPatterns,proto3" json:"exclusion_patterns,omitempty"` - InclusionPatterns []*MetricConfigResponse_Schedule_Pattern `protobuf:"bytes,2,rep,name=inclusion_patterns,json=inclusionPatterns,proto3" json:"inclusion_patterns,omitempty"` - // Describes the collection period for each metric in seconds. - // A period of 0 means to not export. - PeriodSec int32 `protobuf:"varint,3,opt,name=period_sec,json=periodSec,proto3" json:"period_sec,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricConfigResponse_Schedule) Reset() { *m = MetricConfigResponse_Schedule{} } -func (m *MetricConfigResponse_Schedule) String() string { return proto.CompactTextString(m) } -func (*MetricConfigResponse_Schedule) ProtoMessage() {} -func (*MetricConfigResponse_Schedule) Descriptor() ([]byte, []int) { - return fileDescriptor_79b5d4ea55caf90b, []int{1, 0} -} -func (m *MetricConfigResponse_Schedule) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricConfigResponse_Schedule.Unmarshal(m, b) -} -func (m *MetricConfigResponse_Schedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricConfigResponse_Schedule.Marshal(b, m, deterministic) -} -func (m *MetricConfigResponse_Schedule) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricConfigResponse_Schedule.Merge(m, src) -} -func (m *MetricConfigResponse_Schedule) XXX_Size() int { - return xxx_messageInfo_MetricConfigResponse_Schedule.Size(m) -} -func (m *MetricConfigResponse_Schedule) XXX_DiscardUnknown() { - xxx_messageInfo_MetricConfigResponse_Schedule.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricConfigResponse_Schedule proto.InternalMessageInfo - -func (m *MetricConfigResponse_Schedule) GetExclusionPatterns() []*MetricConfigResponse_Schedule_Pattern { - if m != nil { - return m.ExclusionPatterns - } - return nil -} - -func (m *MetricConfigResponse_Schedule) GetInclusionPatterns() []*MetricConfigResponse_Schedule_Pattern { - if m != nil { - return m.InclusionPatterns - } - return nil -} - -func (m *MetricConfigResponse_Schedule) GetPeriodSec() int32 { - if m != nil { - return m.PeriodSec - } - return 0 -} - -// A light-weight pattern that can match 1 or more -// metrics, for which this schedule will apply. The string is used to -// match against metric names. It should not exceed 100k characters. -type MetricConfigResponse_Schedule_Pattern struct { - // Types that are valid to be assigned to Match: - // *MetricConfigResponse_Schedule_Pattern_Equals - // *MetricConfigResponse_Schedule_Pattern_StartsWith - Match isMetricConfigResponse_Schedule_Pattern_Match `protobuf_oneof:"match"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricConfigResponse_Schedule_Pattern) Reset() { *m = MetricConfigResponse_Schedule_Pattern{} } -func (m *MetricConfigResponse_Schedule_Pattern) String() string { return proto.CompactTextString(m) } -func (*MetricConfigResponse_Schedule_Pattern) ProtoMessage() {} -func (*MetricConfigResponse_Schedule_Pattern) Descriptor() ([]byte, []int) { - return fileDescriptor_79b5d4ea55caf90b, []int{1, 0, 0} -} -func (m *MetricConfigResponse_Schedule_Pattern) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Unmarshal(m, b) -} -func (m *MetricConfigResponse_Schedule_Pattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Marshal(b, m, deterministic) -} -func (m *MetricConfigResponse_Schedule_Pattern) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Merge(m, src) -} -func (m *MetricConfigResponse_Schedule_Pattern) XXX_Size() int { - return xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.Size(m) -} -func (m *MetricConfigResponse_Schedule_Pattern) XXX_DiscardUnknown() { - xxx_messageInfo_MetricConfigResponse_Schedule_Pattern.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricConfigResponse_Schedule_Pattern proto.InternalMessageInfo - -type isMetricConfigResponse_Schedule_Pattern_Match interface { - isMetricConfigResponse_Schedule_Pattern_Match() -} - -type MetricConfigResponse_Schedule_Pattern_Equals struct { - Equals string `protobuf:"bytes,1,opt,name=equals,proto3,oneof" json:"equals,omitempty"` -} -type MetricConfigResponse_Schedule_Pattern_StartsWith struct { - StartsWith string `protobuf:"bytes,2,opt,name=starts_with,json=startsWith,proto3,oneof" json:"starts_with,omitempty"` -} - -func (*MetricConfigResponse_Schedule_Pattern_Equals) isMetricConfigResponse_Schedule_Pattern_Match() { -} -func (*MetricConfigResponse_Schedule_Pattern_StartsWith) isMetricConfigResponse_Schedule_Pattern_Match() { -} - -func (m *MetricConfigResponse_Schedule_Pattern) GetMatch() isMetricConfigResponse_Schedule_Pattern_Match { - if m != nil { - return m.Match - } - return nil -} - -func (m *MetricConfigResponse_Schedule_Pattern) GetEquals() string { - if x, ok := m.GetMatch().(*MetricConfigResponse_Schedule_Pattern_Equals); ok { - return x.Equals - } - return "" -} - -func (m *MetricConfigResponse_Schedule_Pattern) GetStartsWith() string { - if x, ok := m.GetMatch().(*MetricConfigResponse_Schedule_Pattern_StartsWith); ok { - return x.StartsWith - } - return "" -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*MetricConfigResponse_Schedule_Pattern) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*MetricConfigResponse_Schedule_Pattern_Equals)(nil), - (*MetricConfigResponse_Schedule_Pattern_StartsWith)(nil), - } -} - -func init() { - proto.RegisterType((*MetricConfigRequest)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigRequest") - proto.RegisterType((*MetricConfigResponse)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigResponse") - proto.RegisterType((*MetricConfigResponse_Schedule)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigResponse.Schedule") - proto.RegisterType((*MetricConfigResponse_Schedule_Pattern)(nil), "opentelemetry.proto.metrics.experimental.MetricConfigResponse.Schedule.Pattern") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/metrics/experimental/configservice.proto", fileDescriptor_79b5d4ea55caf90b) -} - -var fileDescriptor_79b5d4ea55caf90b = []byte{ - // 499 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0x4d, 0x6f, 0xd3, 0x4c, - 0x10, 0xc7, 0x9f, 0x4d, 0x9f, 0xbe, 0x64, 0x52, 0x09, 0xb1, 0x44, 0x60, 0x45, 0x42, 0x0a, 0x3d, - 0x05, 0xa1, 0xae, 0xd5, 0x00, 0x37, 0xe0, 0x10, 0x04, 0x05, 0x21, 0xd4, 0xc8, 0x41, 0xaa, 0xc4, - 0xc5, 0x72, 0x9d, 0xa9, 0xbd, 0xc2, 0xde, 0x75, 0x77, 0xc7, 0x49, 0xb9, 0xf0, 0x19, 0x10, 0xe2, - 0x0b, 0xf0, 0x99, 0xf8, 0x36, 0x9c, 0x90, 0x5f, 0xea, 0x38, 0x22, 0x87, 0x8a, 0x97, 0xdb, 0xe4, - 0x3f, 0x33, 0xbf, 0xff, 0x64, 0x6c, 0x0f, 0x3c, 0xd1, 0x19, 0x2a, 0xc2, 0x04, 0x53, 0x24, 0xf3, - 0xd1, 0xcd, 0x8c, 0x26, 0xed, 0x16, 0xb1, 0x0c, 0xad, 0x8b, 0x97, 0x19, 0x1a, 0x99, 0xa2, 0xa2, - 0x20, 0x71, 0x43, 0xad, 0xce, 0x65, 0x64, 0xd1, 0x2c, 0x64, 0x88, 0xa2, 0x2c, 0xe4, 0xa3, 0xb5, - 0xee, 0x4a, 0x14, 0x75, 0xb7, 0x68, 0x77, 0x0f, 0xc4, 0x26, 0x1f, 0x83, 0x56, 0xe7, 0x26, 0x44, - 0x77, 0x71, 0xd4, 0xc4, 0x15, 0xe4, 0xe0, 0x0b, 0x83, 0x5b, 0x6f, 0x4b, 0xd0, 0xf3, 0xd2, 0xd7, - 0xc3, 0x8b, 0x1c, 0x2d, 0xf1, 0x17, 0xb0, 0x77, 0x55, 0xe9, 0xb0, 0x21, 0x1b, 0xf5, 0xc6, 0xf7, - 0xc5, 0xa6, 0x21, 0x1a, 0xdc, 0xe2, 0x48, 0x78, 0x75, 0xec, 0x35, 0xad, 0xfc, 0x11, 0xdc, 0x4e, - 0x02, 0x4b, 0xfe, 0x07, 0xa5, 0x97, 0xca, 0x3f, 0x97, 0x2a, 0x42, 0x93, 0x19, 0xa9, 0xc8, 0xe9, - 0x0c, 0xd9, 0x68, 0xdf, 0xeb, 0x17, 0xd9, 0x37, 0x45, 0xf2, 0xe5, 0x2a, 0x77, 0xf0, 0xfd, 0x7f, - 0xe8, 0xaf, 0x0f, 0x65, 0x33, 0xad, 0x2c, 0xf2, 0x21, 0xf4, 0xda, 0x0c, 0x56, 0x32, 0xda, 0x12, - 0x47, 0xe8, 0xda, 0x30, 0xc6, 0x79, 0x9e, 0xa0, 0x75, 0x3a, 0xc3, 0xad, 0x51, 0x6f, 0x7c, 0x2c, - 0xae, 0xbb, 0x3d, 0xb1, 0xc9, 0x54, 0xcc, 0x6a, 0x9e, 0xb7, 0x22, 0xf3, 0xc7, 0x70, 0xc7, 0xe6, - 0x51, 0x84, 0x96, 0x70, 0xee, 0x2f, 0x03, 0x49, 0x3e, 0xc9, 0x14, 0x7d, 0x8b, 0xa1, 0xb3, 0x35, - 0x64, 0xa3, 0x6d, 0xaf, 0xdf, 0xa4, 0x4f, 0x03, 0x49, 0xef, 0x64, 0x8a, 0x33, 0x0c, 0x07, 0x3f, - 0x3a, 0xb0, 0x77, 0x85, 0xe3, 0x9f, 0x80, 0xe3, 0x65, 0x98, 0xe4, 0x56, 0x6a, 0xe5, 0x67, 0x01, - 0x11, 0x1a, 0x65, 0x1d, 0x56, 0xce, 0x7c, 0xf2, 0x97, 0x66, 0x16, 0xd3, 0x8a, 0xeb, 0xdd, 0x6c, - 0xac, 0x6a, 0xc5, 0x16, 0xfe, 0x52, 0xfd, 0xe2, 0xdf, 0xf9, 0x47, 0xfe, 0x8d, 0x55, 0xe3, 0x7f, - 0x17, 0xa0, 0xc0, 0xe8, 0x79, 0x6b, 0x6d, 0xdd, 0x4a, 0x29, 0x76, 0x75, 0x02, 0xbb, 0x75, 0x29, - 0x77, 0x60, 0x07, 0x2f, 0xf2, 0x20, 0xb1, 0xe5, 0x13, 0xef, 0xbe, 0xfa, 0xcf, 0xab, 0x7f, 0xf3, - 0x7b, 0xd0, 0xb3, 0x14, 0x18, 0xb2, 0xfe, 0x52, 0x52, 0x5c, 0xbe, 0x54, 0x45, 0x1a, 0x2a, 0xf1, - 0x54, 0x52, 0x3c, 0xd9, 0x85, 0xed, 0x34, 0xa0, 0x30, 0x1e, 0x7f, 0x63, 0xb0, 0xdf, 0x1e, 0x96, - 0x7f, 0x66, 0x70, 0xe3, 0x18, 0x69, 0x4d, 0x7b, 0xfa, 0xbb, 0x7f, 0xbc, 0xfc, 0x6c, 0x06, 0xcf, - 0xfe, 0x6c, 0x6f, 0x93, 0xaf, 0x0c, 0x1e, 0x48, 0x7d, 0x6d, 0xc8, 0xc4, 0x69, 0x53, 0x66, 0xd5, - 0xcd, 0x98, 0x16, 0xe5, 0x53, 0xf6, 0xfe, 0x75, 0x24, 0x29, 0xce, 0xcf, 0x44, 0xa8, 0x53, 0xb7, - 0x00, 0x1e, 0xae, 0xce, 0xc2, 0x1a, 0xff, 0xb0, 0x3a, 0x12, 0x11, 0x2a, 0x37, 0xda, 0x7c, 0x93, - 0xce, 0x76, 0xca, 0x92, 0x87, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x51, 0x6b, 0xa4, 0x34, 0xc6, - 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetricConfigClient is the client API for MetricConfig service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricConfigClient interface { - GetMetricConfig(ctx context.Context, in *MetricConfigRequest, opts ...grpc.CallOption) (*MetricConfigResponse, error) -} - -type metricConfigClient struct { - cc *grpc.ClientConn -} - -func NewMetricConfigClient(cc *grpc.ClientConn) MetricConfigClient { - return &metricConfigClient{cc} -} - -func (c *metricConfigClient) GetMetricConfig(ctx context.Context, in *MetricConfigRequest, opts ...grpc.CallOption) (*MetricConfigResponse, error) { - out := new(MetricConfigResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.metrics.experimental.MetricConfig/GetMetricConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MetricConfigServer is the server API for MetricConfig service. -type MetricConfigServer interface { - GetMetricConfig(context.Context, *MetricConfigRequest) (*MetricConfigResponse, error) -} - -// UnimplementedMetricConfigServer can be embedded to have forward compatible implementations. -type UnimplementedMetricConfigServer struct { -} - -func (*UnimplementedMetricConfigServer) GetMetricConfig(ctx context.Context, req *MetricConfigRequest) (*MetricConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMetricConfig not implemented") -} - -func RegisterMetricConfigServer(s *grpc.Server, srv MetricConfigServer) { - s.RegisterService(&_MetricConfig_serviceDesc, srv) -} - -func _MetricConfig_GetMetricConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricConfigServer).GetMetricConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/opentelemetry.proto.metrics.experimental.MetricConfig/GetMetricConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricConfigServer).GetMetricConfig(ctx, req.(*MetricConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _MetricConfig_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.metrics.experimental.MetricConfig", - HandlerType: (*MetricConfigServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetMetricConfig", - Handler: _MetricConfig_GetMetricConfig_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/metrics/experimental/configservice.proto", -} diff --git a/internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go b/internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go deleted file mode 100644 index 12df1441fb..0000000000 --- a/internal/opentelemetry-proto-gen/metrics/v1/metrics.pb.go +++ /dev/null @@ -1,1501 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/metrics/v1/metrics.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - v11 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" - v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// AggregationTemporality defines how a metric aggregator reports aggregated -// values. It describes how those values relate to the time interval over -// which they are aggregated. -type AggregationTemporality int32 - -const ( - // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. - AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 - // DELTA is an AggregationTemporality for a metric aggregator which reports - // changes since last report time. Successive metrics contain aggregation of - // values from continuous and non-overlapping intervals. - // - // The values for a DELTA metric are based only on the time interval - // associated with one measurement cycle. There is no dependency on - // previous measurements like is the case for CUMULATIVE metrics. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // DELTA metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0+1 to - // t_0+2 with a value of 2. - AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 - // CUMULATIVE is an AggregationTemporality for a metic aggregator which - // reports changes since a fixed start time. This means that current values - // of a CUMULATIVE metric depend on all previous measurements since the - // start time. Because of this, the sender is required to retain this state - // in some form. If this state is lost or invalidated, the CUMULATIVE metric - // values MUST be reset and a new fixed start time following the last - // reported measurement time sent MUST be used. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // CUMULATIVE metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+2 with a value of 5. - // 9. The system experiences a fault and loses state. - // 10. The system recovers and resumes receiving at time=t_1. - // 11. A request is received, the system measures 1 request. - // 12. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_1 to - // t_0+1 with a value of 1. - // - // Note: Even though, when reporting changes since last report time, using - // CUMULATIVE is valid, it is not recommended. This may cause problems for - // systems that do not use start_time to determine when the aggregation - // value was reset (e.g. Prometheus). - AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 -) - -var AggregationTemporality_name = map[int32]string{ - 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", - 1: "AGGREGATION_TEMPORALITY_DELTA", - 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", -} - -var AggregationTemporality_value = map[string]int32{ - "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, - "AGGREGATION_TEMPORALITY_DELTA": 1, - "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, -} - -func (x AggregationTemporality) String() string { - return proto.EnumName(AggregationTemporality_name, int32(x)) -} - -func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{0} -} - -// A collection of InstrumentationLibraryMetrics from a Resource. -type ResourceMetrics struct { - // The resource for the metrics in this message. - // If this field is not set then no resource info is known. - Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // A list of metrics that originate from a resource. - InstrumentationLibraryMetrics []*InstrumentationLibraryMetrics `protobuf:"bytes,2,rep,name=instrumentation_library_metrics,json=instrumentationLibraryMetrics,proto3" json:"instrumentation_library_metrics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResourceMetrics) Reset() { *m = ResourceMetrics{} } -func (m *ResourceMetrics) String() string { return proto.CompactTextString(m) } -func (*ResourceMetrics) ProtoMessage() {} -func (*ResourceMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{0} -} -func (m *ResourceMetrics) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceMetrics.Unmarshal(m, b) -} -func (m *ResourceMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceMetrics.Marshal(b, m, deterministic) -} -func (m *ResourceMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceMetrics.Merge(m, src) -} -func (m *ResourceMetrics) XXX_Size() int { - return xxx_messageInfo_ResourceMetrics.Size(m) -} -func (m *ResourceMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceMetrics proto.InternalMessageInfo - -func (m *ResourceMetrics) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - -func (m *ResourceMetrics) GetInstrumentationLibraryMetrics() []*InstrumentationLibraryMetrics { - if m != nil { - return m.InstrumentationLibraryMetrics - } - return nil -} - -// A collection of Metrics produced by an InstrumentationLibrary. -type InstrumentationLibraryMetrics struct { - // The instrumentation library information for the metrics in this message. - // If this field is not set then no library info is known. - InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` - // A list of metrics that originate from an instrumentation library. - Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InstrumentationLibraryMetrics) Reset() { *m = InstrumentationLibraryMetrics{} } -func (m *InstrumentationLibraryMetrics) String() string { return proto.CompactTextString(m) } -func (*InstrumentationLibraryMetrics) ProtoMessage() {} -func (*InstrumentationLibraryMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{1} -} -func (m *InstrumentationLibraryMetrics) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InstrumentationLibraryMetrics.Unmarshal(m, b) -} -func (m *InstrumentationLibraryMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InstrumentationLibraryMetrics.Marshal(b, m, deterministic) -} -func (m *InstrumentationLibraryMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstrumentationLibraryMetrics.Merge(m, src) -} -func (m *InstrumentationLibraryMetrics) XXX_Size() int { - return xxx_messageInfo_InstrumentationLibraryMetrics.Size(m) -} -func (m *InstrumentationLibraryMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_InstrumentationLibraryMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_InstrumentationLibraryMetrics proto.InternalMessageInfo - -func (m *InstrumentationLibraryMetrics) GetInstrumentationLibrary() *v11.InstrumentationLibrary { - if m != nil { - return m.InstrumentationLibrary - } - return nil -} - -func (m *InstrumentationLibraryMetrics) GetMetrics() []*Metric { - if m != nil { - return m.Metrics - } - return nil -} - -// Defines a Metric which has one or more timeseries. -// -// The data model and relation between entities is shown in the -// diagram below. Here, "DataPoint" is the term used to refer to any -// one of the specific data point value types, and "points" is the term used -// to refer to any one of the lists of points contained in the Metric. -// -// - Metric is composed of a metadata and data. -// - Metadata part contains a name, description, unit. -// - Data is one of the possible types (Gauge, Sum, Histogram, etc.). -// - DataPoint contains timestamps, labels, and one of the possible value type -// fields. -// -// Metric -// +------------+ -// |name | -// |description | -// |unit | +---------------------------+ -// |data |---> |Gauge, Sum, Histogram, ... | -// +------------+ +---------------------------+ -// -// Data [One of Gauge, Sum, Histogram, ...] -// +-----------+ -// |... | // Metadata about the Data. -// |points |--+ -// +-----------+ | -// | +---------------------------+ -// | |DataPoint 1 | -// v |+------+------+ +------+ | -// +-----+ ||label |label |...|label | | -// | 1 |-->||value1|value2|...|valueN| | -// +-----+ |+------+------+ +------+ | -// | . | |+-----+ | -// | . | ||value| | -// | . | |+-----+ | -// | . | +---------------------------+ -// | . | . -// | . | . -// | . | . -// | . | +---------------------------+ -// | . | |DataPoint M | -// +-----+ |+------+------+ +------+ | -// | M |-->||label |label |...|label | | -// +-----+ ||value1|value2|...|valueN| | -// |+------+------+ +------+ | -// |+-----+ | -// ||value| | -// |+-----+ | -// +---------------------------+ -// -// All DataPoint types have three common fields: -// - Labels zero or more key-value pairs associated with the data point. -// - StartTimeUnixNano MUST be set to the start of the interval when the data's -// type includes an AggregationTemporality. This field is not set otherwise. -// - TimeUnixNano MUST be set to: -// - the moment when an aggregation is reported (independent of the -// aggregation temporality). -// - the instantaneous time of the event. -type Metric struct { - // name of the metric, including its DNS name prefix. It must be unique. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // description of the metric, which can be used in documentation. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - // unit in which the metric value is reported. Follows the format - // described by http://unitsofmeasure.org/ucum.html. - Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` - // Data determines the aggregation type (if any) of the metric, what is the - // reported value type for the data points, as well as the relatationship to - // the time interval over which they are reported. - // - // TODO: Update table after the decision on: - // https://github.com/open-telemetry/opentelemetry-specification/issues/731. - // By default, metrics recording using the OpenTelemetry API are exported as - // (the table does not include MeasurementValueType to avoid extra rows): - // - // Instrument Type - // ---------------------------------------------- - // Counter Sum(aggregation_temporality=delta;is_monotonic=true) - // UpDownCounter Sum(aggregation_temporality=delta;is_monotonic=false) - // ValueRecorder TBD - // SumObserver Sum(aggregation_temporality=cumulative;is_monotonic=true) - // UpDownSumObserver Sum(aggregation_temporality=cumulative;is_monotonic=false) - // ValueObserver Gauge() - // - // Types that are valid to be assigned to Data: - // *Metric_IntGauge - // *Metric_DoubleGauge - // *Metric_IntSum - // *Metric_DoubleSum - // *Metric_IntHistogram - // *Metric_DoubleHistogram - Data isMetric_Data `protobuf_oneof:"data"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{2} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -type isMetric_Data interface { - isMetric_Data() -} - -type Metric_IntGauge struct { - IntGauge *IntGauge `protobuf:"bytes,4,opt,name=int_gauge,json=intGauge,proto3,oneof" json:"int_gauge,omitempty"` -} -type Metric_DoubleGauge struct { - DoubleGauge *DoubleGauge `protobuf:"bytes,5,opt,name=double_gauge,json=doubleGauge,proto3,oneof" json:"double_gauge,omitempty"` -} -type Metric_IntSum struct { - IntSum *IntSum `protobuf:"bytes,6,opt,name=int_sum,json=intSum,proto3,oneof" json:"int_sum,omitempty"` -} -type Metric_DoubleSum struct { - DoubleSum *DoubleSum `protobuf:"bytes,7,opt,name=double_sum,json=doubleSum,proto3,oneof" json:"double_sum,omitempty"` -} -type Metric_IntHistogram struct { - IntHistogram *IntHistogram `protobuf:"bytes,8,opt,name=int_histogram,json=intHistogram,proto3,oneof" json:"int_histogram,omitempty"` -} -type Metric_DoubleHistogram struct { - DoubleHistogram *DoubleHistogram `protobuf:"bytes,9,opt,name=double_histogram,json=doubleHistogram,proto3,oneof" json:"double_histogram,omitempty"` -} - -func (*Metric_IntGauge) isMetric_Data() {} -func (*Metric_DoubleGauge) isMetric_Data() {} -func (*Metric_IntSum) isMetric_Data() {} -func (*Metric_DoubleSum) isMetric_Data() {} -func (*Metric_IntHistogram) isMetric_Data() {} -func (*Metric_DoubleHistogram) isMetric_Data() {} - -func (m *Metric) GetData() isMetric_Data { - if m != nil { - return m.Data - } - return nil -} - -func (m *Metric) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Metric) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Metric) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -func (m *Metric) GetIntGauge() *IntGauge { - if x, ok := m.GetData().(*Metric_IntGauge); ok { - return x.IntGauge - } - return nil -} - -func (m *Metric) GetDoubleGauge() *DoubleGauge { - if x, ok := m.GetData().(*Metric_DoubleGauge); ok { - return x.DoubleGauge - } - return nil -} - -func (m *Metric) GetIntSum() *IntSum { - if x, ok := m.GetData().(*Metric_IntSum); ok { - return x.IntSum - } - return nil -} - -func (m *Metric) GetDoubleSum() *DoubleSum { - if x, ok := m.GetData().(*Metric_DoubleSum); ok { - return x.DoubleSum - } - return nil -} - -func (m *Metric) GetIntHistogram() *IntHistogram { - if x, ok := m.GetData().(*Metric_IntHistogram); ok { - return x.IntHistogram - } - return nil -} - -func (m *Metric) GetDoubleHistogram() *DoubleHistogram { - if x, ok := m.GetData().(*Metric_DoubleHistogram); ok { - return x.DoubleHistogram - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Metric) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Metric_IntGauge)(nil), - (*Metric_DoubleGauge)(nil), - (*Metric_IntSum)(nil), - (*Metric_DoubleSum)(nil), - (*Metric_IntHistogram)(nil), - (*Metric_DoubleHistogram)(nil), - } -} - -// Gauge represents the type of a int scalar metric that always exports the -// "current value" for every data point. It should be used for an "unknown" -// aggregation. -// -// A Gauge does not support different aggregation temporalities. Given the -// aggregation is unknown, points cannot be combined using the same -// aggregation, regardless of aggregation temporalities. Therefore, -// AggregationTemporality is not included. Consequently, this also means -// "StartTimeUnixNano" is ignored for all data points. -type IntGauge struct { - DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IntGauge) Reset() { *m = IntGauge{} } -func (m *IntGauge) String() string { return proto.CompactTextString(m) } -func (*IntGauge) ProtoMessage() {} -func (*IntGauge) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{3} -} -func (m *IntGauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IntGauge.Unmarshal(m, b) -} -func (m *IntGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IntGauge.Marshal(b, m, deterministic) -} -func (m *IntGauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntGauge.Merge(m, src) -} -func (m *IntGauge) XXX_Size() int { - return xxx_messageInfo_IntGauge.Size(m) -} -func (m *IntGauge) XXX_DiscardUnknown() { - xxx_messageInfo_IntGauge.DiscardUnknown(m) -} - -var xxx_messageInfo_IntGauge proto.InternalMessageInfo - -func (m *IntGauge) GetDataPoints() []*IntDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -// Gauge represents the type of a double scalar metric that always exports the -// "current value" for every data point. It should be used for an "unknown" -// aggregation. -// -// A Gauge does not support different aggregation temporalities. Given the -// aggregation is unknown, points cannot be combined using the same -// aggregation, regardless of aggregation temporalities. Therefore, -// AggregationTemporality is not included. Consequently, this also means -// "StartTimeUnixNano" is ignored for all data points. -type DoubleGauge struct { - DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleGauge) Reset() { *m = DoubleGauge{} } -func (m *DoubleGauge) String() string { return proto.CompactTextString(m) } -func (*DoubleGauge) ProtoMessage() {} -func (*DoubleGauge) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{4} -} -func (m *DoubleGauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleGauge.Unmarshal(m, b) -} -func (m *DoubleGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleGauge.Marshal(b, m, deterministic) -} -func (m *DoubleGauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleGauge.Merge(m, src) -} -func (m *DoubleGauge) XXX_Size() int { - return xxx_messageInfo_DoubleGauge.Size(m) -} -func (m *DoubleGauge) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleGauge.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleGauge proto.InternalMessageInfo - -func (m *DoubleGauge) GetDataPoints() []*DoubleDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -// Sum represents the type of a numeric int scalar metric that is calculated as -// a sum of all reported measurements over a time interval. -type IntSum struct { - DataPoints []*IntDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` - // If "true" means that the sum is monotonic. - IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IntSum) Reset() { *m = IntSum{} } -func (m *IntSum) String() string { return proto.CompactTextString(m) } -func (*IntSum) ProtoMessage() {} -func (*IntSum) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{5} -} -func (m *IntSum) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IntSum.Unmarshal(m, b) -} -func (m *IntSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IntSum.Marshal(b, m, deterministic) -} -func (m *IntSum) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntSum.Merge(m, src) -} -func (m *IntSum) XXX_Size() int { - return xxx_messageInfo_IntSum.Size(m) -} -func (m *IntSum) XXX_DiscardUnknown() { - xxx_messageInfo_IntSum.DiscardUnknown(m) -} - -var xxx_messageInfo_IntSum proto.InternalMessageInfo - -func (m *IntSum) GetDataPoints() []*IntDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *IntSum) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -func (m *IntSum) GetIsMonotonic() bool { - if m != nil { - return m.IsMonotonic - } - return false -} - -// Sum represents the type of a numeric double scalar metric that is calculated -// as a sum of all reported measurements over a time interval. -type DoubleSum struct { - DataPoints []*DoubleDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` - // If "true" means that the sum is monotonic. - IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleSum) Reset() { *m = DoubleSum{} } -func (m *DoubleSum) String() string { return proto.CompactTextString(m) } -func (*DoubleSum) ProtoMessage() {} -func (*DoubleSum) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{6} -} -func (m *DoubleSum) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleSum.Unmarshal(m, b) -} -func (m *DoubleSum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleSum.Marshal(b, m, deterministic) -} -func (m *DoubleSum) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleSum.Merge(m, src) -} -func (m *DoubleSum) XXX_Size() int { - return xxx_messageInfo_DoubleSum.Size(m) -} -func (m *DoubleSum) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleSum.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleSum proto.InternalMessageInfo - -func (m *DoubleSum) GetDataPoints() []*DoubleDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *DoubleSum) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -func (m *DoubleSum) GetIsMonotonic() bool { - if m != nil { - return m.IsMonotonic - } - return false -} - -// Represents the type of a metric that is calculated by aggregating as a -// Histogram of all reported int measurements over a time interval. -type IntHistogram struct { - DataPoints []*IntHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IntHistogram) Reset() { *m = IntHistogram{} } -func (m *IntHistogram) String() string { return proto.CompactTextString(m) } -func (*IntHistogram) ProtoMessage() {} -func (*IntHistogram) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{7} -} -func (m *IntHistogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IntHistogram.Unmarshal(m, b) -} -func (m *IntHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IntHistogram.Marshal(b, m, deterministic) -} -func (m *IntHistogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntHistogram.Merge(m, src) -} -func (m *IntHistogram) XXX_Size() int { - return xxx_messageInfo_IntHistogram.Size(m) -} -func (m *IntHistogram) XXX_DiscardUnknown() { - xxx_messageInfo_IntHistogram.DiscardUnknown(m) -} - -var xxx_messageInfo_IntHistogram proto.InternalMessageInfo - -func (m *IntHistogram) GetDataPoints() []*IntHistogramDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *IntHistogram) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -// Represents the type of a metric that is calculated by aggregating as a -// Histogram of all reported double measurements over a time interval. -type DoubleHistogram struct { - DataPoints []*DoubleHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleHistogram) Reset() { *m = DoubleHistogram{} } -func (m *DoubleHistogram) String() string { return proto.CompactTextString(m) } -func (*DoubleHistogram) ProtoMessage() {} -func (*DoubleHistogram) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{8} -} -func (m *DoubleHistogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleHistogram.Unmarshal(m, b) -} -func (m *DoubleHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleHistogram.Marshal(b, m, deterministic) -} -func (m *DoubleHistogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleHistogram.Merge(m, src) -} -func (m *DoubleHistogram) XXX_Size() int { - return xxx_messageInfo_DoubleHistogram.Size(m) -} -func (m *DoubleHistogram) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleHistogram.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleHistogram proto.InternalMessageInfo - -func (m *DoubleHistogram) GetDataPoints() []*DoubleHistogramDataPoint { - if m != nil { - return m.DataPoints - } - return nil -} - -func (m *DoubleHistogram) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -// IntDataPoint is a single data point in a timeseries that describes the -// time-varying values of a int64 metric. -type IntDataPoint struct { - // The set of labels that uniquely identify this timeseries. - Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` - // start_time_unix_nano is the last time when the aggregation value was reset - // to "zero". For some metric types this is ignored, see data types for more - // details. - // - // The aggregation value is over the time interval (start_time_unix_nano, - // time_unix_nano]. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - // - // Value of 0 indicates that the timestamp is unspecified. In that case the - // timestamp may be decided by the backend. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // time_unix_nano is the moment when this aggregation value was reported. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // value itself. - Value int64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []*IntExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IntDataPoint) Reset() { *m = IntDataPoint{} } -func (m *IntDataPoint) String() string { return proto.CompactTextString(m) } -func (*IntDataPoint) ProtoMessage() {} -func (*IntDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{9} -} -func (m *IntDataPoint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IntDataPoint.Unmarshal(m, b) -} -func (m *IntDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IntDataPoint.Marshal(b, m, deterministic) -} -func (m *IntDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntDataPoint.Merge(m, src) -} -func (m *IntDataPoint) XXX_Size() int { - return xxx_messageInfo_IntDataPoint.Size(m) -} -func (m *IntDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_IntDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_IntDataPoint proto.InternalMessageInfo - -func (m *IntDataPoint) GetLabels() []*v11.StringKeyValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *IntDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *IntDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *IntDataPoint) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *IntDataPoint) GetExemplars() []*IntExemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -// DoubleDataPoint is a single data point in a timeseries that describes the -// time-varying value of a double metric. -type DoubleDataPoint struct { - // The set of labels that uniquely identify this timeseries. - Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` - // start_time_unix_nano is the last time when the aggregation value was reset - // to "zero". For some metric types this is ignored, see data types for more - // details. - // - // The aggregation value is over the time interval (start_time_unix_nano, - // time_unix_nano]. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - // - // Value of 0 indicates that the timestamp is unspecified. In that case the - // timestamp may be decided by the backend. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // time_unix_nano is the moment when this aggregation value was reported. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // value itself. - Value float64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []*DoubleExemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleDataPoint) Reset() { *m = DoubleDataPoint{} } -func (m *DoubleDataPoint) String() string { return proto.CompactTextString(m) } -func (*DoubleDataPoint) ProtoMessage() {} -func (*DoubleDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{10} -} -func (m *DoubleDataPoint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleDataPoint.Unmarshal(m, b) -} -func (m *DoubleDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleDataPoint.Marshal(b, m, deterministic) -} -func (m *DoubleDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleDataPoint.Merge(m, src) -} -func (m *DoubleDataPoint) XXX_Size() int { - return xxx_messageInfo_DoubleDataPoint.Size(m) -} -func (m *DoubleDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleDataPoint proto.InternalMessageInfo - -func (m *DoubleDataPoint) GetLabels() []*v11.StringKeyValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *DoubleDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *DoubleDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *DoubleDataPoint) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *DoubleDataPoint) GetExemplars() []*DoubleExemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -// IntHistogramDataPoint is a single data point in a timeseries that describes -// the time-varying values of a Histogram of int values. A Histogram contains -// summary statistics for a population of values, it may optionally contain -// the distribution of those values across a set of buckets. -type IntHistogramDataPoint struct { - // The set of labels that uniquely identify this timeseries. - Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` - // start_time_unix_nano is the last time when the aggregation value was reset - // to "zero". For some metric types this is ignored, see data types for more - // details. - // - // The aggregation value is over the time interval (start_time_unix_nano, - // time_unix_nano]. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - // - // Value of 0 indicates that the timestamp is unspecified. In that case the - // timestamp may be decided by the backend. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // time_unix_nano is the moment when this aggregation value was reported. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be non-negative. This - // value must be equal to the sum of the "count" fields in buckets if a - // histogram is provided. - Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field - // must be zero. This value must be equal to the sum of the "sum" fields in - // buckets if a histogram is provided. - Sum int64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` - // bucket_counts is an optional field contains the count values of histogram - // for each bucket. - // - // The sum of the bucket_counts must equal the value in the count field. - // - // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. - BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` - // explicit_bounds specifies buckets with explicitly defined bounds for values. - // The bucket boundaries are described by "bounds" field. - // - // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket - // at index i are: - // - // (-infinity, bounds[i]) for i == 0 - // [bounds[i-1], bounds[i]) for 0 < i < N-1 - // [bounds[i], +infinity) for i == N-1 - // The values in bounds array must be strictly increasing. - // - // Note: only [a, b) intervals are currently supported for each bucket except the first one. - // If we decide to also support (a, b] intervals we should add support for these by defining - // a boolean value which decides what type of intervals to use. - ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []*IntExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IntHistogramDataPoint) Reset() { *m = IntHistogramDataPoint{} } -func (m *IntHistogramDataPoint) String() string { return proto.CompactTextString(m) } -func (*IntHistogramDataPoint) ProtoMessage() {} -func (*IntHistogramDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{11} -} -func (m *IntHistogramDataPoint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IntHistogramDataPoint.Unmarshal(m, b) -} -func (m *IntHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IntHistogramDataPoint.Marshal(b, m, deterministic) -} -func (m *IntHistogramDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntHistogramDataPoint.Merge(m, src) -} -func (m *IntHistogramDataPoint) XXX_Size() int { - return xxx_messageInfo_IntHistogramDataPoint.Size(m) -} -func (m *IntHistogramDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_IntHistogramDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_IntHistogramDataPoint proto.InternalMessageInfo - -func (m *IntHistogramDataPoint) GetLabels() []*v11.StringKeyValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *IntHistogramDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *IntHistogramDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *IntHistogramDataPoint) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *IntHistogramDataPoint) GetSum() int64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *IntHistogramDataPoint) GetBucketCounts() []uint64 { - if m != nil { - return m.BucketCounts - } - return nil -} - -func (m *IntHistogramDataPoint) GetExplicitBounds() []float64 { - if m != nil { - return m.ExplicitBounds - } - return nil -} - -func (m *IntHistogramDataPoint) GetExemplars() []*IntExemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -// HistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Histogram of double values. A Histogram contains -// summary statistics for a population of values, it may optionally contain the -// distribution of those values across a set of buckets. -type DoubleHistogramDataPoint struct { - // The set of labels that uniquely identify this timeseries. - Labels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` - // start_time_unix_nano is the last time when the aggregation value was reset - // to "zero". For some metric types this is ignored, see data types for more - // details. - // - // The aggregation value is over the time interval (start_time_unix_nano, - // time_unix_nano]. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - // - // Value of 0 indicates that the timestamp is unspecified. In that case the - // timestamp may be decided by the backend. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // time_unix_nano is the moment when this aggregation value was reported. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be non-negative. This - // value must be equal to the sum of the "count" fields in buckets if a - // histogram is provided. - Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field - // must be zero. This value must be equal to the sum of the "sum" fields in - // buckets if a histogram is provided. - Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` - // bucket_counts is an optional field contains the count values of histogram - // for each bucket. - // - // The sum of the bucket_counts must equal the value in the count field. - // - // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. - BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` - // explicit_bounds specifies buckets with explicitly defined bounds for values. - // The bucket boundaries are described by "bounds" field. - // - // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket - // at index i are: - // - // (-infinity, bounds[i]) for i == 0 - // [bounds[i-1], bounds[i]) for 0 < i < N-1 - // [bounds[i], +infinity) for i == N-1 - // The values in bounds array must be strictly increasing. - // - // Note: only [a, b) intervals are currently supported for each bucket except the first one. - // If we decide to also support (a, b] intervals we should add support for these by defining - // a boolean value which decides what type of intervals to use. - ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - Exemplars []*DoubleExemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleHistogramDataPoint) Reset() { *m = DoubleHistogramDataPoint{} } -func (m *DoubleHistogramDataPoint) String() string { return proto.CompactTextString(m) } -func (*DoubleHistogramDataPoint) ProtoMessage() {} -func (*DoubleHistogramDataPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{12} -} -func (m *DoubleHistogramDataPoint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleHistogramDataPoint.Unmarshal(m, b) -} -func (m *DoubleHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleHistogramDataPoint.Marshal(b, m, deterministic) -} -func (m *DoubleHistogramDataPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleHistogramDataPoint.Merge(m, src) -} -func (m *DoubleHistogramDataPoint) XXX_Size() int { - return xxx_messageInfo_DoubleHistogramDataPoint.Size(m) -} -func (m *DoubleHistogramDataPoint) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleHistogramDataPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleHistogramDataPoint proto.InternalMessageInfo - -func (m *DoubleHistogramDataPoint) GetLabels() []*v11.StringKeyValue { - if m != nil { - return m.Labels - } - return nil -} - -func (m *DoubleHistogramDataPoint) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *DoubleHistogramDataPoint) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *DoubleHistogramDataPoint) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *DoubleHistogramDataPoint) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *DoubleHistogramDataPoint) GetBucketCounts() []uint64 { - if m != nil { - return m.BucketCounts - } - return nil -} - -func (m *DoubleHistogramDataPoint) GetExplicitBounds() []float64 { - if m != nil { - return m.ExplicitBounds - } - return nil -} - -func (m *DoubleHistogramDataPoint) GetExemplars() []*DoubleExemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -// A representation of an exemplar, which is a sample input int measurement. -// Exemplars also hold information about the environment when the measurement -// was recorded, for example the span and trace ID of the active span when the -// exemplar was recorded. -type IntExemplar struct { - // The set of labels that were filtered out by the aggregator, but recorded - // alongside the original measurement. Only labels that were filtered out - // by the aggregator should be included - FilteredLabels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels,omitempty"` - // time_unix_nano is the exact time when this exemplar was recorded - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // Numerical int value of the measurement that was recorded. - Value int64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` - // (Optional) Span ID of the exemplar trace. - // span_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // (Optional) Trace ID of the exemplar trace. - // trace_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IntExemplar) Reset() { *m = IntExemplar{} } -func (m *IntExemplar) String() string { return proto.CompactTextString(m) } -func (*IntExemplar) ProtoMessage() {} -func (*IntExemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{13} -} -func (m *IntExemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IntExemplar.Unmarshal(m, b) -} -func (m *IntExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IntExemplar.Marshal(b, m, deterministic) -} -func (m *IntExemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntExemplar.Merge(m, src) -} -func (m *IntExemplar) XXX_Size() int { - return xxx_messageInfo_IntExemplar.Size(m) -} -func (m *IntExemplar) XXX_DiscardUnknown() { - xxx_messageInfo_IntExemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_IntExemplar proto.InternalMessageInfo - -func (m *IntExemplar) GetFilteredLabels() []*v11.StringKeyValue { - if m != nil { - return m.FilteredLabels - } - return nil -} - -func (m *IntExemplar) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *IntExemplar) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *IntExemplar) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *IntExemplar) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -// A representation of an exemplar, which is a sample input double measurement. -// Exemplars also hold information about the environment when the measurement -// was recorded, for example the span and trace ID of the active span when the -// exemplar was recorded. -type DoubleExemplar struct { - // The set of labels that were filtered out by the aggregator, but recorded - // alongside the original measurement. Only labels that were filtered out - // by the aggregator should be included - FilteredLabels []*v11.StringKeyValue `protobuf:"bytes,1,rep,name=filtered_labels,json=filteredLabels,proto3" json:"filtered_labels,omitempty"` - // time_unix_nano is the exact time when this exemplar was recorded - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // Numerical double value of the measurement that was recorded. - Value float64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` - // (Optional) Span ID of the exemplar trace. - // span_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // (Optional) Trace ID of the exemplar trace. - // trace_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleExemplar) Reset() { *m = DoubleExemplar{} } -func (m *DoubleExemplar) String() string { return proto.CompactTextString(m) } -func (*DoubleExemplar) ProtoMessage() {} -func (*DoubleExemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_3c3112f9fa006917, []int{14} -} -func (m *DoubleExemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleExemplar.Unmarshal(m, b) -} -func (m *DoubleExemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleExemplar.Marshal(b, m, deterministic) -} -func (m *DoubleExemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleExemplar.Merge(m, src) -} -func (m *DoubleExemplar) XXX_Size() int { - return xxx_messageInfo_DoubleExemplar.Size(m) -} -func (m *DoubleExemplar) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleExemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleExemplar proto.InternalMessageInfo - -func (m *DoubleExemplar) GetFilteredLabels() []*v11.StringKeyValue { - if m != nil { - return m.FilteredLabels - } - return nil -} - -func (m *DoubleExemplar) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *DoubleExemplar) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *DoubleExemplar) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *DoubleExemplar) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) - proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics") - proto.RegisterType((*InstrumentationLibraryMetrics)(nil), "opentelemetry.proto.metrics.v1.InstrumentationLibraryMetrics") - proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric") - proto.RegisterType((*IntGauge)(nil), "opentelemetry.proto.metrics.v1.IntGauge") - proto.RegisterType((*DoubleGauge)(nil), "opentelemetry.proto.metrics.v1.DoubleGauge") - proto.RegisterType((*IntSum)(nil), "opentelemetry.proto.metrics.v1.IntSum") - proto.RegisterType((*DoubleSum)(nil), "opentelemetry.proto.metrics.v1.DoubleSum") - proto.RegisterType((*IntHistogram)(nil), "opentelemetry.proto.metrics.v1.IntHistogram") - proto.RegisterType((*DoubleHistogram)(nil), "opentelemetry.proto.metrics.v1.DoubleHistogram") - proto.RegisterType((*IntDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntDataPoint") - proto.RegisterType((*DoubleDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleDataPoint") - proto.RegisterType((*IntHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.IntHistogramDataPoint") - proto.RegisterType((*DoubleHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.DoubleHistogramDataPoint") - proto.RegisterType((*IntExemplar)(nil), "opentelemetry.proto.metrics.v1.IntExemplar") - proto.RegisterType((*DoubleExemplar)(nil), "opentelemetry.proto.metrics.v1.DoubleExemplar") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917) -} - -var fileDescriptor_3c3112f9fa006917 = []byte{ - // 1059 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0x41, 0x4f, 0xe3, 0x46, - 0x14, 0xc6, 0x09, 0x38, 0xc9, 0x4b, 0x16, 0xd2, 0xd1, 0x16, 0xdc, 0x95, 0x68, 0x21, 0x5b, 0xb1, - 0x74, 0x77, 0x49, 0x04, 0xd5, 0x56, 0xbd, 0x54, 0x6d, 0x80, 0x14, 0xd2, 0x06, 0x36, 0x1a, 0x02, - 0x12, 0x55, 0x25, 0x6b, 0x12, 0x4f, 0xb3, 0xa3, 0xda, 0x33, 0x91, 0x3d, 0x46, 0xf0, 0x03, 0x7a, - 0x6b, 0x4f, 0xfd, 0x31, 0xfd, 0x1d, 0x3d, 0x54, 0xed, 0xa5, 0x52, 0xef, 0x3d, 0xf5, 0xd2, 0x53, - 0x0f, 0xd5, 0x8c, 0x6d, 0x92, 0x2c, 0x86, 0x64, 0xc5, 0xae, 0xc4, 0xde, 0xde, 0xbc, 0x79, 0xef, - 0xf3, 0xf7, 0xbe, 0xf7, 0x5e, 0x0c, 0x86, 0xa7, 0x62, 0x40, 0xb9, 0xa4, 0x2e, 0xf5, 0xa8, 0xf4, - 0x2f, 0x6a, 0x03, 0x5f, 0x48, 0x51, 0x53, 0x36, 0xeb, 0x05, 0xb5, 0xb3, 0xcd, 0xc4, 0xac, 0xea, - 0x0b, 0xf4, 0xfe, 0x58, 0x74, 0xe4, 0xac, 0x26, 0x21, 0x67, 0x9b, 0x0f, 0x1e, 0xa7, 0xa1, 0xf5, - 0x84, 0xe7, 0x09, 0xae, 0xc0, 0x22, 0x2b, 0x4a, 0x7b, 0x50, 0x4d, 0x8b, 0xf5, 0x69, 0x20, 0x42, - 0xbf, 0x47, 0x55, 0x74, 0x62, 0x47, 0xf1, 0x95, 0xbf, 0x0c, 0x58, 0xc0, 0xb1, 0xeb, 0x20, 0x7a, - 0x24, 0x6a, 0x40, 0x3e, 0x89, 0xb2, 0x8c, 0x15, 0x63, 0xbd, 0xb8, 0xf5, 0x51, 0x35, 0x8d, 0xe2, - 0x25, 0xd4, 0xd9, 0x66, 0x35, 0xc1, 0xc0, 0x97, 0xa9, 0xe8, 0x07, 0x03, 0x3e, 0x60, 0x3c, 0x90, - 0x7e, 0xe8, 0x51, 0x2e, 0x89, 0x64, 0x82, 0xdb, 0x2e, 0xeb, 0xfa, 0xc4, 0xbf, 0xb0, 0xe3, 0xea, - 0xac, 0xcc, 0x4a, 0x76, 0xbd, 0xb8, 0xf5, 0x59, 0xf5, 0x66, 0x05, 0xaa, 0xcd, 0x71, 0x98, 0x56, - 0x84, 0x12, 0xf3, 0xc5, 0xcb, 0xec, 0xa6, 0xeb, 0xca, 0xaf, 0x06, 0x2c, 0xdf, 0x08, 0x80, 0x38, - 0x2c, 0x5d, 0x43, 0x34, 0xae, 0xff, 0x59, 0x2a, 0xc1, 0x58, 0xf8, 0x6b, 0xf9, 0xe1, 0xc5, 0x74, - 0x62, 0xe8, 0x0b, 0xc8, 0x8d, 0x0b, 0xb0, 0x36, 0x49, 0x80, 0x88, 0x29, 0x4e, 0xd2, 0x2a, 0xbf, - 0xcc, 0x82, 0x19, 0xf9, 0x10, 0x82, 0x59, 0x4e, 0xbc, 0xa8, 0x53, 0x05, 0xac, 0x6d, 0xb4, 0x02, - 0x45, 0x87, 0x06, 0x3d, 0x9f, 0x0d, 0xd4, 0x63, 0xad, 0x8c, 0xbe, 0x1a, 0x75, 0xa9, 0xac, 0x90, - 0x33, 0x69, 0x65, 0xa3, 0x2c, 0x65, 0xa3, 0x3d, 0x28, 0x30, 0x2e, 0xed, 0x3e, 0x09, 0xfb, 0xd4, - 0x9a, 0xd5, 0x85, 0xaf, 0x4f, 0xee, 0x8c, 0xdc, 0x53, 0xf1, 0xfb, 0x33, 0x38, 0xcf, 0x62, 0x1b, - 0xb5, 0xa1, 0xe4, 0x88, 0xb0, 0xeb, 0xd2, 0x18, 0x6b, 0x4e, 0x63, 0x3d, 0x99, 0x84, 0xb5, 0xab, - 0x73, 0x12, 0xb8, 0xa2, 0x33, 0x3c, 0xa2, 0x3a, 0xe4, 0x14, 0xb5, 0x20, 0xf4, 0x2c, 0x53, 0x83, - 0xad, 0x4d, 0x41, 0xec, 0x28, 0xf4, 0xf6, 0x67, 0xb0, 0xc9, 0xb4, 0x85, 0xbe, 0x02, 0x88, 0x49, - 0x29, 0x94, 0xdc, 0x0d, 0x73, 0x7d, 0x85, 0x52, 0x04, 0x54, 0x70, 0x92, 0x03, 0x3a, 0x82, 0x7b, - 0x8a, 0xce, 0x0b, 0x16, 0x48, 0xd1, 0xf7, 0x89, 0x67, 0xe5, 0x35, 0xdc, 0xd3, 0x29, 0x48, 0xed, - 0x27, 0x39, 0xfb, 0x33, 0xb8, 0xc4, 0x46, 0xce, 0xe8, 0x5b, 0x28, 0xc7, 0x04, 0x87, 0xb8, 0x05, - 0x8d, 0x5b, 0x9b, 0x8e, 0xe6, 0x28, 0xf4, 0x82, 0x33, 0xee, 0xda, 0x36, 0x61, 0xd6, 0x21, 0x92, - 0x54, 0x4e, 0x21, 0x9f, 0xf4, 0x0c, 0x1d, 0x40, 0x51, 0xf9, 0xec, 0x81, 0x60, 0x5c, 0x06, 0x96, - 0xa1, 0x67, 0x71, 0x9a, 0x22, 0x76, 0x89, 0x24, 0x6d, 0x95, 0x84, 0xc1, 0x49, 0xcc, 0xa0, 0x62, - 0x43, 0x71, 0xa4, 0x85, 0xa8, 0x9d, 0x86, 0x3e, 0x65, 0x29, 0xe9, 0x0f, 0xf8, 0xdb, 0x00, 0x33, - 0xea, 0xeb, 0x6b, 0xa6, 0x8e, 0x04, 0x2c, 0x91, 0x7e, 0xdf, 0xa7, 0xfd, 0x68, 0xfb, 0x25, 0xf5, - 0x06, 0xc2, 0x27, 0x2e, 0x93, 0x17, 0x7a, 0x79, 0xe6, 0xb7, 0x3e, 0x99, 0x04, 0x5d, 0x1f, 0xa6, - 0x77, 0x86, 0xd9, 0x78, 0x91, 0xa4, 0xfa, 0xd1, 0x2a, 0x94, 0x58, 0x60, 0x7b, 0x82, 0x0b, 0x29, - 0x38, 0xeb, 0xe9, 0x3d, 0xcc, 0xe3, 0x22, 0x0b, 0x0e, 0x12, 0x57, 0xe5, 0x1f, 0x03, 0x0a, 0x97, - 0xf3, 0xf7, 0xfa, 0xd5, 0xbc, 0x93, 0x35, 0xff, 0x6e, 0x40, 0x69, 0x74, 0x49, 0xd0, 0x49, 0x5a, - 0xd9, 0xcf, 0x5e, 0x65, 0xcf, 0xee, 0x46, 0xf1, 0x95, 0x3f, 0x0d, 0x58, 0x78, 0x69, 0x4d, 0xd1, - 0x69, 0x5a, 0x71, 0x9f, 0xbe, 0xe2, 0xb2, 0xdf, 0x91, 0xfa, 0x7e, 0xca, 0xe8, 0xce, 0x5d, 0xb2, - 0x41, 0x0d, 0x30, 0x5d, 0xd2, 0xa5, 0x6e, 0x52, 0xd7, 0xc6, 0x84, 0x77, 0xe8, 0x91, 0xf4, 0x19, - 0xef, 0x7f, 0x4d, 0x2f, 0x4e, 0x88, 0x1b, 0x52, 0x1c, 0x27, 0xa3, 0x1a, 0xdc, 0x0f, 0x24, 0xf1, - 0xa5, 0x2d, 0x99, 0x47, 0xed, 0x90, 0xb3, 0x73, 0x9b, 0x13, 0x2e, 0x74, 0x15, 0x26, 0x7e, 0x47, - 0xdf, 0x75, 0x98, 0x47, 0x8f, 0x39, 0x3b, 0x3f, 0x24, 0x5c, 0xa0, 0x0f, 0x61, 0xfe, 0xa5, 0xd0, - 0xac, 0x0e, 0x2d, 0xc9, 0xd1, 0xa8, 0xfb, 0x30, 0x77, 0xa6, 0x9e, 0xa3, 0xdf, 0x73, 0x65, 0x1c, - 0x1d, 0x50, 0x13, 0x0a, 0xf4, 0x9c, 0x7a, 0x03, 0x97, 0xf8, 0x81, 0x35, 0xa7, 0x69, 0x3f, 0x99, - 0x62, 0xd6, 0x1a, 0x71, 0x0e, 0x1e, 0x66, 0x57, 0x7e, 0xce, 0x24, 0xfd, 0x7e, 0x2b, 0x25, 0x31, - 0x12, 0x49, 0x5a, 0x57, 0x25, 0xa9, 0x4e, 0x37, 0xa1, 0x69, 0xaa, 0xfc, 0x9b, 0x81, 0x77, 0x53, - 0x97, 0xf3, 0xee, 0x6b, 0xd3, 0x13, 0x21, 0x97, 0x5a, 0x1b, 0x13, 0x47, 0x07, 0x54, 0x86, 0xac, - 0xfa, 0x5b, 0x62, 0x4e, 0x8f, 0x90, 0x32, 0xd1, 0x43, 0xb8, 0xd7, 0x0d, 0x7b, 0xdf, 0x53, 0x69, - 0xeb, 0x88, 0xc0, 0x32, 0x57, 0xb2, 0x0a, 0x2c, 0x72, 0xee, 0x68, 0x1f, 0x7a, 0x04, 0x0b, 0xf4, - 0x7c, 0xe0, 0xb2, 0x1e, 0x93, 0x76, 0x57, 0x84, 0xdc, 0x09, 0xac, 0xdc, 0x4a, 0x76, 0xdd, 0xc0, - 0xf3, 0x89, 0x7b, 0x5b, 0x7b, 0xc7, 0xc7, 0x31, 0x7f, 0xab, 0x71, 0xfc, 0x2f, 0x03, 0xd6, 0x75, - 0x3f, 0x1c, 0x6f, 0xbb, 0xf6, 0xc6, 0x9b, 0xd0, 0xbe, 0x75, 0x55, 0xfb, 0x5b, 0xcc, 0xfd, 0x6f, - 0x06, 0x14, 0x47, 0x3a, 0x83, 0x4e, 0x60, 0xe1, 0x3b, 0xe6, 0x4a, 0xea, 0x53, 0xc7, 0xbe, 0x8d, - 0xf4, 0xf3, 0x09, 0x4a, 0x2b, 0x6a, 0xc1, 0x55, 0x45, 0x33, 0x37, 0x6d, 0x7a, 0x76, 0xf4, 0xc7, - 0x6f, 0x09, 0x72, 0xc1, 0x80, 0x70, 0x9b, 0x39, 0x5a, 0xe9, 0x12, 0x36, 0xd5, 0xb1, 0xe9, 0xa0, - 0xf7, 0x20, 0x2f, 0x7d, 0xd2, 0xa3, 0xea, 0x66, 0x4e, 0xdf, 0xe4, 0xf4, 0xb9, 0xe9, 0x54, 0xfe, - 0x30, 0x60, 0x7e, 0xbc, 0xea, 0xbb, 0x54, 0x9a, 0x71, 0x8b, 0xd2, 0x1e, 0xff, 0x68, 0xc0, 0x62, - 0xfa, 0x3b, 0x10, 0x3d, 0x82, 0x87, 0xf5, 0xbd, 0x3d, 0xdc, 0xd8, 0xab, 0x77, 0x9a, 0xcf, 0x0f, - 0xed, 0x4e, 0xe3, 0xa0, 0xfd, 0x1c, 0xd7, 0x5b, 0xcd, 0xce, 0xa9, 0x7d, 0x7c, 0x78, 0xd4, 0x6e, - 0xec, 0x34, 0xbf, 0x6c, 0x36, 0x76, 0xcb, 0x33, 0x68, 0x15, 0x96, 0xaf, 0x0b, 0xdc, 0x6d, 0xb4, - 0x3a, 0xf5, 0xb2, 0x81, 0xd6, 0xa0, 0x72, 0x5d, 0xc8, 0xce, 0xf1, 0xc1, 0x71, 0xab, 0xde, 0x69, - 0x9e, 0x34, 0xca, 0x99, 0x6d, 0x09, 0xab, 0x4c, 0x4c, 0x18, 0xc0, 0xed, 0x52, 0xfc, 0x1f, 0x6d, - 0x5b, 0x5d, 0xb4, 0x8d, 0x6f, 0x3e, 0xef, 0x33, 0xf9, 0x22, 0xec, 0x2a, 0x91, 0x6b, 0x2a, 0x75, - 0x63, 0xf8, 0x65, 0x60, 0x0c, 0x69, 0x23, 0xfa, 0x4e, 0xd0, 0xa7, 0xbc, 0xd6, 0x1f, 0xfd, 0x50, - 0xd1, 0x35, 0xf5, 0xc5, 0xc7, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x7c, 0x5f, 0x8f, 0xd1, - 0x10, 0x00, 0x00, -} diff --git a/internal/opentelemetry-proto-gen/resource/v1/resource.pb.go b/internal/opentelemetry-proto-gen/resource/v1/resource.pb.go deleted file mode 100644 index 75fedfe4e5..0000000000 --- a/internal/opentelemetry-proto-gen/resource/v1/resource.pb.go +++ /dev/null @@ -1,100 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/resource/v1/resource.proto - -package v1 - -import ( - fmt "fmt" - math "math" - - proto "github.com/gogo/protobuf/proto" - v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Resource information. -type Resource struct { - // Set of labels that describe the resource. - Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, then - // no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_446f73eacf88f3f5, []int{0} -} -func (m *Resource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Resource.Unmarshal(m, b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) -} -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return xxx_messageInfo_Resource.Size(m) -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) -} - -var xxx_messageInfo_Resource proto.InternalMessageInfo - -func (m *Resource) GetAttributes() []*v1.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Resource) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func init() { - proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5) -} - -var fileDescriptor_446f73eacf88f3f5 = []byte{ - // 227 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcb, 0x2f, 0x48, 0xcd, - 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, - 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, - 0x42, 0xf2, 0x28, 0xea, 0x21, 0x82, 0x7a, 0x70, 0x35, 0x65, 0x86, 0x52, 0x5a, 0xd8, 0x0c, 0x4c, - 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0x03, 0x19, 0x07, 0x61, 0x41, 0xf4, 0x29, 0xf5, 0x32, 0x72, 0x71, - 0x04, 0x41, 0xf5, 0x0a, 0xb9, 0x73, 0x71, 0x25, 0x96, 0x94, 0x14, 0x65, 0x26, 0x95, 0x96, 0xa4, - 0x16, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0x61, 0xb3, 0x0e, 0x6a, 0x46, 0x99, - 0xa1, 0x9e, 0x77, 0x6a, 0x65, 0x58, 0x62, 0x4e, 0x69, 0x6a, 0x10, 0x92, 0x56, 0x21, 0x0b, 0x2e, - 0x89, 0x94, 0xa2, 0xfc, 0x82, 0x82, 0xd4, 0x94, 0x78, 0x84, 0x68, 0x7c, 0x72, 0x7e, 0x69, 0x5e, - 0x89, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6f, 0x90, 0x18, 0x54, 0xde, 0x11, 0x2e, 0xed, 0x0c, 0x92, - 0x75, 0x2a, 0xe7, 0x52, 0xca, 0xcc, 0xd7, 0x23, 0xe0, 0x43, 0x27, 0x5e, 0x98, 0x93, 0x03, 0x40, - 0x52, 0x01, 0x8c, 0x51, 0x0e, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x20, 0x77, 0xe9, 0x83, 0x34, - 0xeb, 0x22, 0xbc, 0x8f, 0x62, 0x96, 0x2e, 0x24, 0x30, 0xd2, 0x53, 0xf3, 0xf4, 0xd3, 0x51, 0x02, - 0x39, 0x89, 0x0d, 0x2c, 0x63, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xba, 0x7f, 0x2f, 0x93, 0x8e, - 0x01, 0x00, 0x00, -} diff --git a/internal/opentelemetry-proto-gen/trace/v1/trace.pb.go b/internal/opentelemetry-proto-gen/trace/v1/trace.pb.go deleted file mode 100644 index 8e63f5749f..0000000000 --- a/internal/opentelemetry-proto-gen/trace/v1/trace.pb.go +++ /dev/null @@ -1,815 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/trace/v1/trace.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - v11 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" - v1 "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/resource/v1" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// SpanKind is the type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type Span_SpanKind int32 - -const ( - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. - Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 - // Indicates that the span represents an internal operation within an application, - // as opposed to an operations happening at the boundaries. Default value. - Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - Span_SPAN_KIND_SERVER Span_SpanKind = 2 - // Indicates that the span describes a request to some remote service. - Span_SPAN_KIND_CLIENT Span_SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. - Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. - Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 -) - -var Span_SpanKind_name = map[int32]string{ - 0: "SPAN_KIND_UNSPECIFIED", - 1: "SPAN_KIND_INTERNAL", - 2: "SPAN_KIND_SERVER", - 3: "SPAN_KIND_CLIENT", - 4: "SPAN_KIND_PRODUCER", - 5: "SPAN_KIND_CONSUMER", -} - -var Span_SpanKind_value = map[string]int32{ - "SPAN_KIND_UNSPECIFIED": 0, - "SPAN_KIND_INTERNAL": 1, - "SPAN_KIND_SERVER": 2, - "SPAN_KIND_CLIENT": 3, - "SPAN_KIND_PRODUCER": 4, - "SPAN_KIND_CONSUMER": 5, -} - -func (x Span_SpanKind) String() string { - return proto.EnumName(Span_SpanKind_name, int32(x)) -} - -func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{2, 0} -} - -type Status_DeprecatedStatusCode int32 - -const ( - Status_DEPRECATED_STATUS_CODE_OK Status_DeprecatedStatusCode = 0 - Status_DEPRECATED_STATUS_CODE_CANCELLED Status_DeprecatedStatusCode = 1 - Status_DEPRECATED_STATUS_CODE_UNKNOWN_ERROR Status_DeprecatedStatusCode = 2 - Status_DEPRECATED_STATUS_CODE_INVALID_ARGUMENT Status_DeprecatedStatusCode = 3 - Status_DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED Status_DeprecatedStatusCode = 4 - Status_DEPRECATED_STATUS_CODE_NOT_FOUND Status_DeprecatedStatusCode = 5 - Status_DEPRECATED_STATUS_CODE_ALREADY_EXISTS Status_DeprecatedStatusCode = 6 - Status_DEPRECATED_STATUS_CODE_PERMISSION_DENIED Status_DeprecatedStatusCode = 7 - Status_DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED Status_DeprecatedStatusCode = 8 - Status_DEPRECATED_STATUS_CODE_FAILED_PRECONDITION Status_DeprecatedStatusCode = 9 - Status_DEPRECATED_STATUS_CODE_ABORTED Status_DeprecatedStatusCode = 10 - Status_DEPRECATED_STATUS_CODE_OUT_OF_RANGE Status_DeprecatedStatusCode = 11 - Status_DEPRECATED_STATUS_CODE_UNIMPLEMENTED Status_DeprecatedStatusCode = 12 - Status_DEPRECATED_STATUS_CODE_INTERNAL_ERROR Status_DeprecatedStatusCode = 13 - Status_DEPRECATED_STATUS_CODE_UNAVAILABLE Status_DeprecatedStatusCode = 14 - Status_DEPRECATED_STATUS_CODE_DATA_LOSS Status_DeprecatedStatusCode = 15 - Status_DEPRECATED_STATUS_CODE_UNAUTHENTICATED Status_DeprecatedStatusCode = 16 -) - -var Status_DeprecatedStatusCode_name = map[int32]string{ - 0: "DEPRECATED_STATUS_CODE_OK", - 1: "DEPRECATED_STATUS_CODE_CANCELLED", - 2: "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR", - 3: "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT", - 4: "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED", - 5: "DEPRECATED_STATUS_CODE_NOT_FOUND", - 6: "DEPRECATED_STATUS_CODE_ALREADY_EXISTS", - 7: "DEPRECATED_STATUS_CODE_PERMISSION_DENIED", - 8: "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED", - 9: "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION", - 10: "DEPRECATED_STATUS_CODE_ABORTED", - 11: "DEPRECATED_STATUS_CODE_OUT_OF_RANGE", - 12: "DEPRECATED_STATUS_CODE_UNIMPLEMENTED", - 13: "DEPRECATED_STATUS_CODE_INTERNAL_ERROR", - 14: "DEPRECATED_STATUS_CODE_UNAVAILABLE", - 15: "DEPRECATED_STATUS_CODE_DATA_LOSS", - 16: "DEPRECATED_STATUS_CODE_UNAUTHENTICATED", -} - -var Status_DeprecatedStatusCode_value = map[string]int32{ - "DEPRECATED_STATUS_CODE_OK": 0, - "DEPRECATED_STATUS_CODE_CANCELLED": 1, - "DEPRECATED_STATUS_CODE_UNKNOWN_ERROR": 2, - "DEPRECATED_STATUS_CODE_INVALID_ARGUMENT": 3, - "DEPRECATED_STATUS_CODE_DEADLINE_EXCEEDED": 4, - "DEPRECATED_STATUS_CODE_NOT_FOUND": 5, - "DEPRECATED_STATUS_CODE_ALREADY_EXISTS": 6, - "DEPRECATED_STATUS_CODE_PERMISSION_DENIED": 7, - "DEPRECATED_STATUS_CODE_RESOURCE_EXHAUSTED": 8, - "DEPRECATED_STATUS_CODE_FAILED_PRECONDITION": 9, - "DEPRECATED_STATUS_CODE_ABORTED": 10, - "DEPRECATED_STATUS_CODE_OUT_OF_RANGE": 11, - "DEPRECATED_STATUS_CODE_UNIMPLEMENTED": 12, - "DEPRECATED_STATUS_CODE_INTERNAL_ERROR": 13, - "DEPRECATED_STATUS_CODE_UNAVAILABLE": 14, - "DEPRECATED_STATUS_CODE_DATA_LOSS": 15, - "DEPRECATED_STATUS_CODE_UNAUTHENTICATED": 16, -} - -func (x Status_DeprecatedStatusCode) String() string { - return proto.EnumName(Status_DeprecatedStatusCode_name, int32(x)) -} - -func (Status_DeprecatedStatusCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3, 0} -} - -// For the semantics of status codes see -// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#set-status -type Status_StatusCode int32 - -const ( - // The default status. - Status_STATUS_CODE_UNSET Status_StatusCode = 0 - // The Span has been validated by an Application developers or Operator to have - // completed successfully. - Status_STATUS_CODE_OK Status_StatusCode = 1 - // The Span contains an error. - Status_STATUS_CODE_ERROR Status_StatusCode = 2 -) - -var Status_StatusCode_name = map[int32]string{ - 0: "STATUS_CODE_UNSET", - 1: "STATUS_CODE_OK", - 2: "STATUS_CODE_ERROR", -} - -var Status_StatusCode_value = map[string]int32{ - "STATUS_CODE_UNSET": 0, - "STATUS_CODE_OK": 1, - "STATUS_CODE_ERROR": 2, -} - -func (x Status_StatusCode) String() string { - return proto.EnumName(Status_StatusCode_name, int32(x)) -} - -func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3, 1} -} - -// A collection of InstrumentationLibrarySpans from a Resource. -type ResourceSpans struct { - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // A list of InstrumentationLibrarySpans that originate from a resource. - InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,2,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResourceSpans) Reset() { *m = ResourceSpans{} } -func (m *ResourceSpans) String() string { return proto.CompactTextString(m) } -func (*ResourceSpans) ProtoMessage() {} -func (*ResourceSpans) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{0} -} -func (m *ResourceSpans) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceSpans.Unmarshal(m, b) -} -func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic) -} -func (m *ResourceSpans) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceSpans.Merge(m, src) -} -func (m *ResourceSpans) XXX_Size() int { - return xxx_messageInfo_ResourceSpans.Size(m) -} -func (m *ResourceSpans) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceSpans.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo - -func (m *ResourceSpans) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - -func (m *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { - if m != nil { - return m.InstrumentationLibrarySpans - } - return nil -} - -// A collection of Spans produced by an InstrumentationLibrary. -type InstrumentationLibrarySpans struct { - // The instrumentation library information for the spans in this message. - // If this field is not set then no library info is known. - InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` - // A list of Spans that originate from an instrumentation library. - Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InstrumentationLibrarySpans) Reset() { *m = InstrumentationLibrarySpans{} } -func (m *InstrumentationLibrarySpans) String() string { return proto.CompactTextString(m) } -func (*InstrumentationLibrarySpans) ProtoMessage() {} -func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{1} -} -func (m *InstrumentationLibrarySpans) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InstrumentationLibrarySpans.Unmarshal(m, b) -} -func (m *InstrumentationLibrarySpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InstrumentationLibrarySpans.Marshal(b, m, deterministic) -} -func (m *InstrumentationLibrarySpans) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstrumentationLibrarySpans.Merge(m, src) -} -func (m *InstrumentationLibrarySpans) XXX_Size() int { - return xxx_messageInfo_InstrumentationLibrarySpans.Size(m) -} -func (m *InstrumentationLibrarySpans) XXX_DiscardUnknown() { - xxx_messageInfo_InstrumentationLibrarySpans.DiscardUnknown(m) -} - -var xxx_messageInfo_InstrumentationLibrarySpans proto.InternalMessageInfo - -func (m *InstrumentationLibrarySpans) GetInstrumentationLibrary() *v11.InstrumentationLibrary { - if m != nil { - return m.InstrumentationLibrary - } - return nil -} - -func (m *InstrumentationLibrarySpans) GetSpans() []*Span { - if m != nil { - return m.Spans - } - return nil -} - -// Span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Spans may also be linked to other spans -// from the same or different trace and form graphs. Often, a trace -// contains a root span that describes the end-to-end latency, and one -// or more subspans for its sub-operations. A trace can also contain -// multiple root spans, or none at all. Spans do not need to be -// contiguous - there may be gaps or overlaps between spans in a trace. -// -// The next available field id is 17. -type Span struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes - // is considered invalid. - // - // This field is semantically required. Receiver should generate new - // random trace_id if empty or invalid trace_id was received. - // - // This field is required. - TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes is considered - // invalid. - // - // This field is semantically required. Receiver should generate new - // random span_id if empty or invalid span_id was received. - // - // This field is required. - SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // When null or empty string received - receiver may use string "name" - // as a replacement. There might be smarted algorithms implemented by - // receiver to fix the empty span name. - // - // This field is required. - Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` - // start_time_unix_nano is the start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // end_time_unix_nano is the end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` - // attributes is a collection of key/value pairs. The value can be a string, - // an integer, a double or the Boolean values `true` or `false`. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // events is a collection of Event items. - Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` - // dropped_events_count is the number of dropped events. If the value is 0, then no - // events were dropped. - DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` - // links is a collection of Links, which are references from this span to a span - // in the same or different trace. - Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` - // dropped_links_count is the number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span) Reset() { *m = Span{} } -func (m *Span) String() string { return proto.CompactTextString(m) } -func (*Span) ProtoMessage() {} -func (*Span) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{2} -} -func (m *Span) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span.Unmarshal(m, b) -} -func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span.Marshal(b, m, deterministic) -} -func (m *Span) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span.Merge(m, src) -} -func (m *Span) XXX_Size() int { - return xxx_messageInfo_Span.Size(m) -} -func (m *Span) XXX_DiscardUnknown() { - xxx_messageInfo_Span.DiscardUnknown(m) -} - -var xxx_messageInfo_Span proto.InternalMessageInfo - -func (m *Span) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Span) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *Span) GetTraceState() string { - if m != nil { - return m.TraceState - } - return "" -} - -func (m *Span) GetParentSpanId() []byte { - if m != nil { - return m.ParentSpanId - } - return nil -} - -func (m *Span) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Span) GetKind() Span_SpanKind { - if m != nil { - return m.Kind - } - return Span_SPAN_KIND_UNSPECIFIED -} - -func (m *Span) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *Span) GetEndTimeUnixNano() uint64 { - if m != nil { - return m.EndTimeUnixNano - } - return 0 -} - -func (m *Span) GetAttributes() []*v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *Span) GetEvents() []*Span_Event { - if m != nil { - return m.Events - } - return nil -} - -func (m *Span) GetDroppedEventsCount() uint32 { - if m != nil { - return m.DroppedEventsCount - } - return 0 -} - -func (m *Span) GetLinks() []*Span_Link { - if m != nil { - return m.Links - } - return nil -} - -func (m *Span) GetDroppedLinksCount() uint32 { - if m != nil { - return m.DroppedLinksCount - } - return 0 -} - -func (m *Span) GetStatus() *Status { - if m != nil { - return m.Status - } - return nil -} - -// Event is a time-stamped annotation of the span, consisting of user-supplied -// text description and key-value pairs. -type Span_Event struct { - // time_unix_nano is the time the event occurred. - TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // name of the event. - // This field is semantically required to be set to non-empty string. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // attributes is a collection of attribute key/value pairs on the event. - Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Event) Reset() { *m = Span_Event{} } -func (m *Span_Event) String() string { return proto.CompactTextString(m) } -func (*Span_Event) ProtoMessage() {} -func (*Span_Event) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{2, 0} -} -func (m *Span_Event) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Event.Unmarshal(m, b) -} -func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic) -} -func (m *Span_Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Event.Merge(m, src) -} -func (m *Span_Event) XXX_Size() int { - return xxx_messageInfo_Span_Event.Size(m) -} -func (m *Span_Event) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Event proto.InternalMessageInfo - -func (m *Span_Event) GetTimeUnixNano() uint64 { - if m != nil { - return m.TimeUnixNano - } - return 0 -} - -func (m *Span_Event) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Span_Event) GetAttributes() []*v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span_Event) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. -type Span_Link struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // The trace_state associated with the link. - TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - // attributes is a collection of attribute key/value pairs on the link. - Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Link) Reset() { *m = Span_Link{} } -func (m *Span_Link) String() string { return proto.CompactTextString(m) } -func (*Span_Link) ProtoMessage() {} -func (*Span_Link) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{2, 1} -} -func (m *Span_Link) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Link.Unmarshal(m, b) -} -func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) -} -func (m *Span_Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Link.Merge(m, src) -} -func (m *Span_Link) XXX_Size() int { - return xxx_messageInfo_Span_Link.Size(m) -} -func (m *Span_Link) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Link proto.InternalMessageInfo - -func (m *Span_Link) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Span_Link) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *Span_Link) GetTraceState() string { - if m != nil { - return m.TraceState - } - return "" -} - -func (m *Span_Link) GetAttributes() []*v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span_Link) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -// The Status type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -type Status struct { - // The deprecated status code. This is an optional field. - // - // This field is deprecated and is replaced by the `code` field below. See backward - // compatibility notes below. According to our stability guarantees this field - // will be removed in 12 months, on Oct 22, 2021. All usage of old senders and - // receivers that do not understand the `code` field MUST be phased out by then. - DeprecatedCode Status_DeprecatedStatusCode `protobuf:"varint,1,opt,name=deprecated_code,json=deprecatedCode,proto3,enum=opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode" json:"deprecated_code,omitempty"` // Deprecated: Do not use. - // A developer-facing human readable error message. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // The status code. - Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_5c407ac9c675a601, []int{3} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -// Deprecated: Do not use. -func (m *Status) GetDeprecatedCode() Status_DeprecatedStatusCode { - if m != nil { - return m.DeprecatedCode - } - return Status_DEPRECATED_STATUS_CODE_OK -} - -func (m *Status) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Status) GetCode() Status_StatusCode { - if m != nil { - return m.Code - } - return Status_STATUS_CODE_UNSET -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) - proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_DeprecatedStatusCode", Status_DeprecatedStatusCode_name, Status_DeprecatedStatusCode_value) - proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value) - proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans") - proto.RegisterType((*InstrumentationLibrarySpans)(nil), "opentelemetry.proto.trace.v1.InstrumentationLibrarySpans") - proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span") - proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event") - proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link") - proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601) -} - -var fileDescriptor_5c407ac9c675a601 = []byte{ - // 1130 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xc1, 0x6e, 0xdb, 0x46, - 0x10, 0x0d, 0x6d, 0x49, 0x76, 0xc6, 0xb6, 0xcc, 0x6c, 0x9d, 0x84, 0x71, 0x9a, 0x46, 0x50, 0xdd, - 0x44, 0x49, 0x1a, 0xa9, 0x49, 0x51, 0x20, 0x05, 0x1a, 0xb4, 0x34, 0xb9, 0x4e, 0x08, 0xd3, 0xa4, - 0xb0, 0x24, 0xdd, 0xb4, 0x97, 0x05, 0x6d, 0x6e, 0x5d, 0x22, 0xd6, 0x52, 0x20, 0x29, 0x23, 0x39, - 0xf4, 0x43, 0x0a, 0xf4, 0x73, 0x0a, 0xf4, 0x0b, 0x7a, 0xe9, 0xa1, 0x5f, 0xd2, 0x43, 0xb1, 0x4b, - 0xca, 0xb2, 0x0c, 0x91, 0xce, 0x25, 0x17, 0x83, 0x7c, 0xf3, 0xde, 0xbc, 0x99, 0x9d, 0x59, 0x8b, - 0xd0, 0x4b, 0xc6, 0x8c, 0xe7, 0xec, 0x94, 0x8d, 0x58, 0x9e, 0xbe, 0x1f, 0x8c, 0xd3, 0x24, 0x4f, - 0x06, 0x79, 0x1a, 0x1e, 0xb3, 0xc1, 0xd9, 0xb3, 0xe2, 0xa1, 0x2f, 0x41, 0xf4, 0xe9, 0x1c, 0xb3, - 0x00, 0xfb, 0x05, 0xe1, 0xec, 0xd9, 0xf6, 0xe3, 0x45, 0x79, 0x8e, 0x93, 0xd1, 0x28, 0xe1, 0x22, - 0x51, 0xf1, 0x54, 0x88, 0xb6, 0xfb, 0x8b, 0xb8, 0x29, 0xcb, 0x92, 0x49, 0x5a, 0xd8, 0x4e, 0x9f, - 0x0b, 0x7e, 0xf7, 0x6f, 0x05, 0x36, 0x48, 0x09, 0x79, 0xe3, 0x90, 0x67, 0x08, 0xc3, 0xea, 0x94, - 0xa3, 0x29, 0x1d, 0xa5, 0xb7, 0xf6, 0xfc, 0x51, 0x7f, 0x51, 0x79, 0xe7, 0x89, 0xce, 0x9e, 0xf5, - 0xa7, 0x19, 0xc8, 0xb9, 0x14, 0xfd, 0x06, 0xf7, 0x62, 0x9e, 0xe5, 0xe9, 0x64, 0xc4, 0x78, 0x1e, - 0xe6, 0x71, 0xc2, 0xe9, 0x69, 0x7c, 0x94, 0x86, 0xe9, 0x7b, 0x9a, 0x09, 0x1f, 0x6d, 0xa9, 0xb3, - 0xdc, 0x5b, 0x7b, 0xfe, 0x6d, 0xbf, 0xae, 0xf5, 0xbe, 0x35, 0x9f, 0xc2, 0x2e, 0x32, 0xc8, 0x42, - 0xc9, 0xdd, 0xb8, 0x3a, 0xd8, 0xfd, 0x4b, 0x81, 0xbb, 0x35, 0x62, 0xc4, 0xe1, 0x76, 0x45, 0x79, - 0x65, 0xd3, 0xdf, 0x2c, 0x2c, 0xac, 0x3c, 0xeb, 0xca, 0xca, 0xc8, 0xad, 0xc5, 0x45, 0xa1, 0x17, - 0xd0, 0xbc, 0xd8, 0x76, 0xb7, 0xbe, 0x6d, 0x51, 0x23, 0x29, 0x04, 0xdd, 0x3f, 0x00, 0x1a, 0xe2, - 0x1d, 0xdd, 0x81, 0x55, 0x49, 0xa0, 0x71, 0x24, 0x6b, 0x5c, 0x27, 0x2b, 0xf2, 0xdd, 0x8a, 0xd0, - 0x6d, 0x58, 0x11, 0x64, 0x11, 0x59, 0x92, 0x91, 0x96, 0x78, 0xb5, 0x22, 0x74, 0x1f, 0xd6, 0x0a, - 0x4d, 0x96, 0x87, 0x39, 0xd3, 0x96, 0x3b, 0x4a, 0xef, 0x3a, 0x01, 0x09, 0x79, 0x02, 0x41, 0x3b, - 0xd0, 0x1e, 0x87, 0x29, 0xe3, 0x39, 0x9d, 0x26, 0x68, 0xc8, 0x04, 0xeb, 0x05, 0xea, 0x15, 0x69, - 0x10, 0x34, 0x78, 0x38, 0x62, 0x5a, 0x53, 0xea, 0xe5, 0x33, 0xfa, 0x1e, 0x1a, 0x6f, 0x63, 0x1e, - 0x69, 0xad, 0x8e, 0xd2, 0x6b, 0x3f, 0x7f, 0x72, 0x75, 0x43, 0xf2, 0xcf, 0x7e, 0xcc, 0x23, 0x22, - 0x85, 0x68, 0x00, 0x5b, 0x59, 0x1e, 0xa6, 0x39, 0xcd, 0xe3, 0x11, 0xa3, 0x13, 0x1e, 0xbf, 0xa3, - 0x3c, 0xe4, 0x89, 0xb6, 0xd2, 0x51, 0x7a, 0x2d, 0x72, 0x43, 0xc6, 0xfc, 0x78, 0xc4, 0x02, 0x1e, - 0xbf, 0x73, 0x42, 0x9e, 0xa0, 0x27, 0x80, 0x18, 0x8f, 0x2e, 0xd3, 0x57, 0x25, 0x7d, 0x93, 0xf1, - 0x68, 0x8e, 0xfc, 0x0a, 0x20, 0xcc, 0xf3, 0x34, 0x3e, 0x9a, 0xe4, 0x2c, 0xd3, 0xae, 0xcb, 0x53, - 0x7f, 0x78, 0xc5, 0x4c, 0xf7, 0xd9, 0xfb, 0xc3, 0xf0, 0x74, 0xc2, 0xc8, 0x05, 0x29, 0x7a, 0x01, - 0x5a, 0x94, 0x26, 0xe3, 0x31, 0x8b, 0xe8, 0x0c, 0xa5, 0xc7, 0xc9, 0x84, 0xe7, 0x1a, 0x74, 0x94, - 0xde, 0x06, 0xb9, 0x55, 0xc6, 0xf5, 0xf3, 0xb0, 0x21, 0xa2, 0xe8, 0x07, 0x68, 0xb1, 0x33, 0xc6, - 0xf3, 0x4c, 0x5b, 0x93, 0xf6, 0xbd, 0x0f, 0x38, 0x23, 0x2c, 0x04, 0xa4, 0xd4, 0xa1, 0xaf, 0x60, - 0x6b, 0xea, 0x5d, 0x20, 0xa5, 0xef, 0xba, 0xf4, 0x45, 0x65, 0x4c, 0x6a, 0x4a, 0xcf, 0x97, 0xd0, - 0x3c, 0x8d, 0xf9, 0xdb, 0x4c, 0xdb, 0xa8, 0xe9, 0x78, 0xde, 0xd2, 0x8e, 0xf9, 0x5b, 0x52, 0xa8, - 0x50, 0x1f, 0x3e, 0x99, 0x1a, 0x4a, 0xa0, 0xf4, 0x6b, 0x4b, 0xbf, 0x1b, 0x65, 0x48, 0x08, 0x4a, - 0xbb, 0xef, 0xa0, 0x25, 0x36, 0x6b, 0x92, 0x69, 0x9b, 0xf2, 0xd6, 0xec, 0x5c, 0xe1, 0x27, 0xb9, - 0xa4, 0xd4, 0x6c, 0xff, 0xa9, 0x40, 0x53, 0x16, 0x2f, 0xd6, 0xf0, 0xd2, 0x58, 0x15, 0x39, 0xd6, - 0xf5, 0xfc, 0xe2, 0x4c, 0xa7, 0x6b, 0xb8, 0x74, 0x61, 0x0d, 0xe7, 0xe7, 0xbc, 0xfc, 0x71, 0xe6, - 0xdc, 0xa8, 0x9b, 0xf3, 0xf6, 0xbf, 0x0a, 0x34, 0xc4, 0x99, 0x7c, 0x9c, 0x1b, 0x3a, 0xdf, 0x60, - 0xe3, 0xe3, 0x34, 0xd8, 0xac, 0x6b, 0xb0, 0xfb, 0xbb, 0x02, 0xab, 0xd3, 0xcb, 0x8b, 0xee, 0xc0, - 0x4d, 0x6f, 0xa8, 0x3b, 0x74, 0xdf, 0x72, 0x4c, 0x1a, 0x38, 0xde, 0x10, 0x1b, 0xd6, 0x9e, 0x85, - 0x4d, 0xf5, 0x1a, 0xba, 0x05, 0x68, 0x16, 0xb2, 0x1c, 0x1f, 0x13, 0x47, 0xb7, 0x55, 0x05, 0x6d, - 0x81, 0x3a, 0xc3, 0x3d, 0x4c, 0x0e, 0x31, 0x51, 0x97, 0xe6, 0x51, 0xc3, 0xb6, 0xb0, 0xe3, 0xab, - 0xcb, 0xf3, 0x39, 0x86, 0xc4, 0x35, 0x03, 0x03, 0x13, 0xb5, 0x31, 0x8f, 0x1b, 0xae, 0xe3, 0x05, - 0x07, 0x98, 0xa8, 0xcd, 0xee, 0x7f, 0x2b, 0xd0, 0x2a, 0xd6, 0x0a, 0xfd, 0x02, 0x9b, 0x11, 0x1b, - 0xa7, 0xec, 0x38, 0xcc, 0x59, 0x44, 0x8f, 0x93, 0xa8, 0xf8, 0x01, 0x6b, 0x5f, 0xf5, 0x23, 0x53, - 0xc8, 0xfb, 0xe6, 0xb9, 0xb6, 0x00, 0x8c, 0x24, 0x62, 0xbb, 0x4b, 0x9a, 0x42, 0xda, 0xb3, 0xac, - 0x02, 0x43, 0x1a, 0xac, 0x8c, 0x58, 0x96, 0x85, 0x27, 0xd3, 0x4d, 0x9c, 0xbe, 0x22, 0x03, 0x1a, - 0xd2, 0x76, 0x59, 0xda, 0x0e, 0x3e, 0xc8, 0x76, 0x66, 0x46, 0xa4, 0xb8, 0xfb, 0x4f, 0x13, 0xb6, - 0x16, 0xd5, 0x82, 0xee, 0xc1, 0x1d, 0x13, 0x0f, 0x09, 0x36, 0x74, 0x1f, 0x9b, 0xd4, 0xf3, 0x75, - 0x3f, 0xf0, 0xa8, 0xe1, 0x9a, 0x98, 0xba, 0xfb, 0xea, 0x35, 0xb4, 0x03, 0x9d, 0x8a, 0xb0, 0xa1, - 0x3b, 0x06, 0xb6, 0x6d, 0x6c, 0xaa, 0x0a, 0xea, 0xc1, 0x4e, 0x05, 0x2b, 0x70, 0xf6, 0x1d, 0xf7, - 0x47, 0x87, 0x62, 0x42, 0x5c, 0x31, 0x9f, 0x27, 0xf0, 0xb0, 0x82, 0x69, 0x39, 0x87, 0xba, 0x6d, - 0x99, 0x54, 0x27, 0xaf, 0x82, 0x83, 0x62, 0x6c, 0x5f, 0x42, 0xaf, 0x82, 0x6c, 0x62, 0xdd, 0xb4, - 0x2d, 0x07, 0x53, 0xfc, 0xc6, 0xc0, 0xd8, 0xc4, 0xa6, 0xda, 0xa8, 0x29, 0xd5, 0x71, 0x7d, 0xba, - 0xe7, 0x06, 0x8e, 0xa9, 0x36, 0xd1, 0x23, 0xf8, 0xa2, 0x82, 0xa5, 0xdb, 0x04, 0xeb, 0xe6, 0x4f, - 0x14, 0xbf, 0xb1, 0x3c, 0xdf, 0x53, 0x5b, 0x35, 0xf6, 0x43, 0x4c, 0x0e, 0x2c, 0xcf, 0xb3, 0x5c, - 0x87, 0x9a, 0xd8, 0x11, 0x7b, 0xba, 0x82, 0x9e, 0xc2, 0xa3, 0x0a, 0x36, 0xc1, 0x9e, 0x1b, 0x10, - 0x43, 0x14, 0xfb, 0x5a, 0x0f, 0x3c, 0x1f, 0x9b, 0xea, 0x2a, 0xea, 0xc3, 0xe3, 0x0a, 0xfa, 0x9e, - 0x6e, 0xd9, 0x58, 0xac, 0x29, 0x36, 0x5c, 0xc7, 0xb4, 0x7c, 0xcb, 0x75, 0xd4, 0xeb, 0xa8, 0x0b, - 0x9f, 0x55, 0xd5, 0xbd, 0xeb, 0x12, 0x91, 0x13, 0xd0, 0x43, 0xf8, 0xbc, 0x6a, 0x96, 0x81, 0x4f, - 0xdd, 0x3d, 0x4a, 0x74, 0xe7, 0x15, 0x56, 0xd7, 0x6a, 0xe7, 0x65, 0x1d, 0x0c, 0x6d, 0x2c, 0x06, - 0x80, 0x4d, 0x75, 0xbd, 0xe6, 0xb8, 0xa6, 0x57, 0xb1, 0x1c, 0xed, 0x06, 0x7a, 0x00, 0xdd, 0xca, - 0xa4, 0xfa, 0xa1, 0x6e, 0xd9, 0xfa, 0xae, 0x8d, 0xd5, 0x76, 0xcd, 0x9c, 0x4c, 0xdd, 0xd7, 0xa9, - 0xed, 0x7a, 0x9e, 0xba, 0x89, 0x1e, 0xc3, 0x83, 0xea, 0x6c, 0x81, 0xff, 0x1a, 0x3b, 0xbe, 0x25, - 0x63, 0xaa, 0xda, 0x75, 0x00, 0x2e, 0x6c, 0xf4, 0x4d, 0xb8, 0x31, 0x4f, 0xf7, 0xb0, 0xaf, 0x5e, - 0x43, 0x08, 0xda, 0x97, 0xb6, 0x5b, 0xb9, 0x4c, 0x2d, 0x97, 0x74, 0x97, 0xc3, 0xfd, 0x38, 0xa9, - 0xbd, 0x67, 0xbb, 0xe0, 0x8b, 0xa7, 0xa1, 0x00, 0x87, 0xca, 0xcf, 0x2f, 0x4f, 0xe2, 0xfc, 0xd7, - 0xc9, 0x91, 0xf8, 0x67, 0x39, 0x10, 0xb2, 0xa7, 0xb3, 0x8f, 0xe5, 0xb9, 0x2c, 0x4f, 0x8b, 0x4f, - 0xe7, 0x13, 0xc6, 0x07, 0x27, 0xb3, 0xaf, 0xf6, 0xa3, 0x96, 0x84, 0xbf, 0xfe, 0x3f, 0x00, 0x00, - 0xff, 0xff, 0x63, 0x4b, 0xfa, 0x64, 0xdc, 0x0b, 0x00, 0x00, -} diff --git a/route/errors.go b/route/errors.go index ff697d9546..c901f8be32 100644 --- a/route/errors.go +++ b/route/errors.go @@ -4,6 +4,8 @@ import ( "fmt" "net/http" "runtime/debug" + + husky "github.com/honeycombio/husky/otlp" ) type handlerError struct { @@ -33,7 +35,7 @@ var ( ErrUpstreamUnavailable = handlerError{nil, "upstream target unavailable", http.StatusServiceUnavailable, true, true} ErrReqToEvent = handlerError{nil, "failed to parse event", http.StatusBadRequest, false, true} ErrBatchToEvent = handlerError{nil, "failed to parse event within batch", http.StatusBadRequest, false, true} - ErrInvalidContentType = handlerError{nil, "invalid content-type - only 'application/protobuf' is supported", http.StatusNotImplemented, false, true} + ErrInvalidContentType = handlerError{nil, husky.ErrInvalidContentType.Message, husky.ErrInvalidContentType.HTTPStatusCode, false, true} ) func (r *Router) handlerReturnWithError(w http.ResponseWriter, he handlerError, err error) { diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 11bd45d032..4e98729ebf 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -1,63 +1,52 @@ package route import ( - "bytes" - "compress/gzip" "context" - "encoding/binary" - "encoding/hex" "errors" - "fmt" - "io" - "io/ioutil" "net/http" "time" - "github.com/golang/protobuf/proto" + huskyotlp "github.com/honeycombio/husky/otlp" "github.com/honeycombio/refinery/types" - "github.com/klauspost/compress/zstd" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" - common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" - trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" + collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" ) func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - contentType := req.Header.Get("content-type") - if contentType != "application/protobuf" && contentType != "application/x-protobuf" { - router.handlerReturnWithError(w, ErrInvalidContentType, errors.New("invalid content-type")) + ri := huskyotlp.GetRequestInfoFromHttpHeaders(req.Header) + if err := ri.ValidateHeaders(); err != nil { + if errors.Is(err, huskyotlp.ErrInvalidContentType) { + router.handlerReturnWithError(w, ErrInvalidContentType, err) + } else { + router.handlerReturnWithError(w, ErrAuthNeeded, err) + } return } - apiKey, datasetName, err := getAPIKeyDatasetAndTokenFromHttpHeaders(req) + batch, err := huskyotlp.TranslateHttpTraceRequest(req.Body, ri) if err != nil { - router.handlerReturnWithError(w, ErrAuthNeeded, err) + router.handlerReturnWithError(w, ErrUpstreamFailed, err) return } - request, cleanup, err := parseOTLPBody(req, router.zstdDecoders) - defer cleanup() - if err != nil { - router.handlerReturnWithError(w, ErrPostBody, err) - } - - if err := processTraceRequest(req.Context(), router, request, apiKey, datasetName); err != nil { + if err := processTraceRequest(req.Context(), router, batch, ri.ApiKey, ri.Dataset); err != nil { router.handlerReturnWithError(w, ErrUpstreamFailed, err) } } func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { - apiKey, datasetName, err := getAPIKeyDatasetAndTokenFromMetadata(ctx) + ri := huskyotlp.GetRequestInfoFromGrpcMetadata(ctx) + if err := ri.ValidateHeaders(); err != nil { + return nil, huskyotlp.AsGRPCError(err) + } + + batch, err := huskyotlp.TranslateGrpcTraceRequest(req) if err != nil { - return nil, status.Error(codes.Unauthenticated, err.Error()) + return nil, huskyotlp.AsGRPCError(err) } - if err := processTraceRequest(ctx, router, req, apiKey, datasetName); err != nil { - return nil, status.Error(codes.Internal, err.Error()) + if err := processTraceRequest(ctx, router, batch, ri.ApiKey, ri.Dataset); err != nil { + return nil, huskyotlp.AsGRPCError(err) } return &collectortrace.ExportTraceServiceResponse{}, nil @@ -66,7 +55,7 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac func processTraceRequest( ctx context.Context, router *Router, - request *collectortrace.ExportTraceServiceRequest, + batch []map[string]interface{}, apiKey string, datasetName string) error { @@ -79,328 +68,27 @@ func processTraceRequest( return err } - for _, resourceSpan := range request.ResourceSpans { - resourceAttrs := make(map[string]interface{}) - - if resourceSpan.Resource != nil { - addAttributesToMap(resourceAttrs, resourceSpan.Resource.Attributes) - } - - for _, librarySpan := range resourceSpan.InstrumentationLibrarySpans { - library := librarySpan.InstrumentationLibrary - if library != nil { - if len(library.Name) > 0 { - resourceAttrs["library.name"] = library.Name - } - if len(library.Version) > 0 { - resourceAttrs["library.version"] = library.Version - } - } - - for _, span := range librarySpan.GetSpans() { - traceID := bytesToTraceID(span.TraceId) - spanID := hex.EncodeToString(span.SpanId) - timestamp := time.Unix(0, int64(span.StartTimeUnixNano)).UTC() - - spanKind := getSpanKind(span.Kind) - eventAttrs := map[string]interface{}{ - "trace.trace_id": traceID, - "trace.span_id": spanID, - "type": spanKind, - "span.kind": spanKind, - "name": span.Name, - "duration_ms": float64(span.EndTimeUnixNano-span.StartTimeUnixNano) / float64(time.Millisecond), - "status_code": int32(getSpanStatusCode(span.Status)), - } - if span.ParentSpanId != nil { - eventAttrs["trace.parent_id"] = hex.EncodeToString(span.ParentSpanId) - } - if getSpanStatusCode(span.Status) == trace.Status_STATUS_CODE_ERROR { - eventAttrs["error"] = true - } - if span.Status != nil && len(span.Status.Message) > 0 { - eventAttrs["status_message"] = span.Status.Message - } - if span.Attributes != nil { - addAttributesToMap(eventAttrs, span.Attributes) - } - - sampleRate, err := getSampleRateFromAttributes(eventAttrs) - if err != nil { - debugLog.WithField("error", err.Error()).WithField("sampleRate", eventAttrs["sampleRate"]).Logf("error parsing sampleRate") - } - - // copy resource attributes to event attributes - for k, v := range resourceAttrs { - eventAttrs[k] = v - } - - events := make([]*types.Event, 0, 1+len(span.Events)+len(span.Links)) - events = append(events, &types.Event{ - Context: ctx, - APIHost: apiHost, - APIKey: apiKey, - Dataset: datasetName, - SampleRate: uint(sampleRate), - Timestamp: timestamp, - Data: eventAttrs, - }) - - for _, sevent := range span.Events { - timestamp := time.Unix(0, int64(sevent.TimeUnixNano)).UTC() - attrs := map[string]interface{}{ - "trace.trace_id": traceID, - "trace.parent_id": spanID, - "name": sevent.Name, - "parent_name": span.Name, - "meta.annotation_type": "span_event", - } - - if sevent.Attributes != nil { - addAttributesToMap(attrs, sevent.Attributes) - } - for k, v := range resourceAttrs { - attrs[k] = v - } - sampleRate, err := getSampleRateFromAttributes(attrs) - if err != nil { - debugLog. - WithField("error", err.Error()). - WithField("sampleRate", attrs["sampleRate"]). - Logf("error parsing sampleRate") - } - events = append(events, &types.Event{ - Context: ctx, - APIHost: apiHost, - APIKey: apiKey, - Dataset: datasetName, - SampleRate: uint(sampleRate), - Timestamp: timestamp, - Data: attrs, - }) - } - - for _, slink := range span.Links { - attrs := map[string]interface{}{ - "trace.trace_id": traceID, - "trace.parent_id": spanID, - "trace.link.trace_id": bytesToTraceID(slink.TraceId), - "trace.link.span_id": hex.EncodeToString(slink.SpanId), - "parent_name": span.Name, - "meta.annotation_type": "link", - } - - if slink.Attributes != nil { - addAttributesToMap(attrs, slink.Attributes) - } - for k, v := range resourceAttrs { - attrs[k] = v - } - sampleRate, err := getSampleRateFromAttributes(attrs) - if err != nil { - debugLog. - WithField("error", err.Error()). - WithField("sampleRate", attrs["sampleRate"]). - Logf("error parsing sampleRate") - } - events = append(events, &types.Event{ - Context: ctx, - APIHost: apiHost, - APIKey: apiKey, - Dataset: datasetName, - SampleRate: uint(sampleRate), - Timestamp: time.Time{}, //links don't have timestamps, so use empty time - Data: attrs, - }) - } - - for _, evt := range events { - err = router.processEvent(evt, requestID) - if err != nil { - router.Logger.Error().Logf("Error processing event: " + err.Error()) - } - } - - } + for _, ev := range batch { + attrs := ev["data"].(map[string]interface{}) + timestamp := ev["time"].(time.Time) + sampleRate, err := getSampleRateFromAttributes(attrs) + if err != nil { + debugLog.WithField("error", err.Error()).WithField("sampleRate", attrs["sampleRate"]).Logf("error parsing sampleRate") } - } - - return nil -} -func addAttributesToMap(attrs map[string]interface{}, attributes []*common.KeyValue) { - for _, attr := range attributes { - if attr.Key == "" { - continue + event := &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + Dataset: datasetName, + SampleRate: uint(sampleRate), + Timestamp: timestamp, + Data: attrs, } - switch attr.Value.Value.(type) { - case *common.AnyValue_StringValue: - attrs[attr.Key] = attr.Value.GetStringValue() - case *common.AnyValue_BoolValue: - attrs[attr.Key] = attr.Value.GetBoolValue() - case *common.AnyValue_DoubleValue: - attrs[attr.Key] = attr.Value.GetDoubleValue() - case *common.AnyValue_IntValue: - attrs[attr.Key] = attr.Value.GetIntValue() + if err = router.processEvent(event, requestID); err != nil { + router.Logger.Error().Logf("Error processing event: " + err.Error()) } } -} - -func getSpanKind(kind trace.Span_SpanKind) string { - switch kind { - case trace.Span_SPAN_KIND_CLIENT: - return "client" - case trace.Span_SPAN_KIND_SERVER: - return "server" - case trace.Span_SPAN_KIND_PRODUCER: - return "producer" - case trace.Span_SPAN_KIND_CONSUMER: - return "consumer" - case trace.Span_SPAN_KIND_INTERNAL: - return "internal" - case trace.Span_SPAN_KIND_UNSPECIFIED: - fallthrough - default: - return "unspecified" - } -} -// bytesToTraceID returns an ID suitable for use for spans and traces. Before -// encoding the bytes as a hex string, we want to handle cases where we are -// given 128-bit IDs with zero padding, e.g. 0000000000000000f798a1e7f33c8af6. -// To do this, we borrow a strategy from Jaeger [1] wherein we split the byte -// sequence into two parts. The leftmost part could contain all zeros. We use -// that to determine whether to return a 64-bit hex encoded string or a 128-bit -// one. -// -// [1]: https://github.com/jaegertracing/jaeger/blob/cd19b64413eca0f06b61d92fe29bebce1321d0b0/model/ids.go#L81 -func bytesToTraceID(traceID []byte) string { - // binary.BigEndian.Uint64() does a bounds check on traceID which will - // cause a panic if traceID is fewer than 8 bytes. In this case, we don't - // need to check for zero padding on the high part anyway, so just return a - // hex string. - if len(traceID) < traceIDShortLength { - return fmt.Sprintf("%x", traceID) - } - var low uint64 - if len(traceID) == traceIDLongLength { - low = binary.BigEndian.Uint64(traceID[traceIDShortLength:]) - if high := binary.BigEndian.Uint64(traceID[:traceIDShortLength]); high != 0 { - return fmt.Sprintf("%016x%016x", high, low) - } - } else { - low = binary.BigEndian.Uint64(traceID) - } - - return fmt.Sprintf("%016x", low) -} - -// getSpanStatusCode checks the value of both the deprecated code and code fields -// on the span status and using the rules specified in the backward compatibility -// notes in the protobuf definitions. See: -// -// https://github.com/open-telemetry/opentelemetry-proto/blob/59c488bfb8fb6d0458ad6425758b70259ff4a2bd/opentelemetry/proto/trace/v1/trace.proto#L230 -func getSpanStatusCode(status *trace.Status) trace.Status_StatusCode { - if status == nil { - return trace.Status_STATUS_CODE_UNSET - } - if status.Code == trace.Status_STATUS_CODE_UNSET { - if status.DeprecatedCode == trace.Status_DEPRECATED_STATUS_CODE_OK { - return trace.Status_STATUS_CODE_UNSET - } - return trace.Status_STATUS_CODE_ERROR - } - return status.Code -} - -func getAPIKeyDatasetAndTokenFromMetadata(ctx context.Context) ( - apiKey string, - datasetName string, - err error) { - if md, ok := metadata.FromIncomingContext(ctx); ok { - apiKey = getValueFromMetadata(md, "x-honeycomb-team") - datasetName = getValueFromMetadata(md, "x-honeycomb-dataset") - } - - if err := validateHeaders(apiKey, datasetName); err != nil { - return "", "", err - } - return apiKey, datasetName, nil -} - -func getValueFromMetadata(md metadata.MD, key string) string { - if vals := md.Get(key); len(vals) > 0 { - return vals[0] - } - return "" -} - -func getAPIKeyDatasetAndTokenFromHttpHeaders(r *http.Request) ( - apiKey string, - datasetName string, - err error) { - apiKey = r.Header.Get("x-honeycomb-team") - datasetName = r.Header.Get("x-honeycomb-dataset") - - if err := validateHeaders(apiKey, datasetName); err != nil { - return "", "", err - } - return apiKey, datasetName, nil -} - -func validateHeaders(apiKey string, datasetName string) error { - if apiKey == "" { - return errors.New("missing x-honeycomb-team header") - } - if datasetName == "" { - return errors.New("missing x-honeycomb-team header") - } return nil } - -func parseOTLPBody(r *http.Request, zstdDecoders chan *zstd.Decoder) (request *collectortrace.ExportTraceServiceRequest, cleanup func(), err error) { - cleanup = func() { /* empty cleanup */ } - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, cleanup, err - } - bodyReader := bytes.NewReader(bodyBytes) - - var reader io.Reader - switch r.Header.Get("Content-Encoding") { - case "gzip": - var err error - reader, err = gzip.NewReader(bodyReader) - if err != nil { - return nil, cleanup, err - } - case "zstd": - zReader := <-zstdDecoders - cleanup = func() { - zReader.Reset(nil) - zstdDecoders <- zReader - } - - err = zReader.Reset(bodyReader) - if err != nil { - return nil, cleanup, err - } - - reader = zReader - default: - reader = bodyReader - } - - bytes, err := ioutil.ReadAll(reader) - if err != nil { - return nil, cleanup, err - } - - otlpRequet := &collectortrace.ExportTraceServiceRequest{} - err = proto.Unmarshal(bytes, otlpRequet) - if err != nil { - return nil, cleanup, err - } - - return otlpRequet, cleanup, nil -} diff --git a/route/otlp_trace_test.go b/route/otlp_trace_test.go index 2354b29031..9a9381bfce 100644 --- a/route/otlp_trace_test.go +++ b/route/otlp_trace_test.go @@ -12,15 +12,16 @@ import ( "time" "github.com/golang/protobuf/proto" + huskyotlp "github.com/honeycombio/husky/otlp" "github.com/honeycombio/refinery/config" - collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" - common "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/common/v1" - trace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/trace/v1" "github.com/honeycombio/refinery/logger" "github.com/honeycombio/refinery/metrics" "github.com/honeycombio/refinery/transmit" "github.com/klauspost/compress/zstd" "github.com/stretchr/testify/assert" + collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" + common "go.opentelemetry.io/proto/otlp/common/v1" + trace "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/grpc/metadata" ) @@ -128,7 +129,7 @@ func TestOTLPHandler(t *testing.T) { spanEvent := mockTransmission.Events[0] // assert.Equal(t, time.Unix(0, int64(12345)).UTC(), spanEvent.Timestamp) - assert.Equal(t, bytesToTraceID(traceID), spanEvent.Data["trace.trace_id"]) + assert.Equal(t, huskyotlp.BytesToTraceID(traceID), spanEvent.Data["trace.trace_id"]) assert.Equal(t, hex.EncodeToString(spanID), spanEvent.Data["trace.span_id"]) assert.Equal(t, "span_link", spanEvent.Data["span.name"]) assert.Equal(t, "span_with_event", spanEvent.Data["parent.name"]) @@ -174,9 +175,9 @@ func TestOTLPHandler(t *testing.T) { assert.Equal(t, 2, len(mockTransmission.Events)) spanLink := mockTransmission.Events[1] - assert.Equal(t, bytesToTraceID(traceID), spanLink.Data["trace.trace_id"]) + assert.Equal(t, huskyotlp.BytesToTraceID(traceID), spanLink.Data["trace.trace_id"]) assert.Equal(t, hex.EncodeToString(spanID), spanLink.Data["trace.span_id"]) - assert.Equal(t, bytesToTraceID(linkTraceID), spanLink.Data["trace.link.trace_id"]) + assert.Equal(t, huskyotlp.BytesToTraceID(linkTraceID), spanLink.Data["trace.link.trace_id"]) assert.Equal(t, hex.EncodeToString(linkSpanID), spanLink.Data["trace.link.span_id"]) assert.Equal(t, "link", spanLink.Data["meta.annotation_type"]) assert.Equal(t, "link_attr_val", spanLink.Data["link_attr_key"]) diff --git a/route/route.go b/route/route.go index 6597318cc1..218c3085ac 100644 --- a/route/route.go +++ b/route/route.go @@ -35,7 +35,7 @@ import ( "github.com/honeycombio/refinery/transmit" "github.com/honeycombio/refinery/types" - collectortrace "github.com/honeycombio/refinery/internal/opentelemetry-proto-gen/collector/trace/v1" + collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" ) const ( @@ -77,6 +77,9 @@ type Router struct { server *http.Server grpcServer *grpc.Server doneWG sync.WaitGroup + + // used to identify Router as a OTLP TraceServer + collectortrace.UnimplementedTraceServiceServer } type BatchResponse struct { From d9d748b4f039b2dadd31a0768d53938d4f4f1fc3 Mon Sep 17 00:00:00 2001 From: Robb Kidd Date: Mon, 29 Nov 2021 18:16:40 -0500 Subject: [PATCH 097/351] update CHANGELOG for v1.7.0 (#360) Signed-off-by: Robb Kidd --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca6e581bb7..3c1ec08555 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Refinery Changelog +## 1.7.0 2021-11-29 + +### Enhancements + +- Replace internal duplicated code with Husky (#341) [@MikeGoldsmith](https://github.com/MikeGoldsmith) + - Also fixes segfaults caused by nil appearing in OTLP data as described in (#358) +- Improves histogram buckets over the default set (#355) [@bdarfler](https://github.com/bdarfler) + +### Maintenance + +- Update dependabot to monthly (#356) [@vreynolds](https://github.com/vreynolds) + ## 1.6.1 2021-11-10 - Revert "Use alpine as base image (#343)" (#352) From 6302fae55d5762929fc0a3d14fb482e15e56d78b Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Mon, 6 Dec 2021 15:44:03 +0000 Subject: [PATCH 098/351] Bump husky to v0.4.0 (#361) --- go.mod | 4 +-- go.sum | 8 ++--- route/otlp_trace.go | 26 +++++----------- route/route.go | 40 ------------------------ route/route_test.go | 75 --------------------------------------------- 5 files changed, 14 insertions(+), 139 deletions(-) diff --git a/go.mod b/go.mod index fbc797e057..36de63a7b7 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.2.0 + github.com/honeycombio/husky v0.4.0 github.com/honeycombio/libhoney-go v1.15.6 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 @@ -28,7 +28,7 @@ require ( github.com/spf13/viper v1.9.0 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 - go.opentelemetry.io/proto/otlp v0.11.0 + go.opentelemetry.io/proto/otlp v0.9.0 google.golang.org/grpc v1.42.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect diff --git a/go.sum b/go.sum index eafe20de69..21ac7d7784 100644 --- a/go.sum +++ b/go.sum @@ -228,8 +228,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.2.0 h1:vjuUU9HtQnNLOk+VLUG7AQ7b/1oO7gL13N10nFTUaXs= -github.com/honeycombio/husky v0.2.0/go.mod h1:OKDdF3gAoP6GtQsMgFJKUqApY+x6T0mWv1S+VHpbH8A= +github.com/honeycombio/husky v0.4.0 h1:wTp7gXe0GTFkHi4kf1kaC5JT6ApHKhHxd0XgC4429Nw= +github.com/honeycombio/husky v0.4.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= github.com/honeycombio/libhoney-go v1.15.6 h1:zbwfdo74Gsedmu6OT/oAHv4pfKNoseTXRMA/4e5XWew= github.com/honeycombio/libhoney-go v1.15.6/go.mod h1:8NyBoM746bz+nw3yQzQF8gtJO/z4mkr/MD5C4r4uC2Y= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -368,8 +368,8 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= +go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 4e98729ebf..df6a2ecf59 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -4,7 +4,6 @@ import ( "context" "errors" "net/http" - "time" huskyotlp "github.com/honeycombio/husky/otlp" "github.com/honeycombio/refinery/types" @@ -23,13 +22,13 @@ func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { return } - batch, err := huskyotlp.TranslateHttpTraceRequest(req.Body, ri) + result, err := huskyotlp.TranslateTraceRequestFromReader(req.Body, ri) if err != nil { router.handlerReturnWithError(w, ErrUpstreamFailed, err) return } - if err := processTraceRequest(req.Context(), router, batch, ri.ApiKey, ri.Dataset); err != nil { + if err := processTraceRequest(req.Context(), router, result.Events, ri.ApiKey, ri.Dataset); err != nil { router.handlerReturnWithError(w, ErrUpstreamFailed, err) } } @@ -40,12 +39,12 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac return nil, huskyotlp.AsGRPCError(err) } - batch, err := huskyotlp.TranslateGrpcTraceRequest(req) + result, err := huskyotlp.TranslateTraceRequest(req) if err != nil { return nil, huskyotlp.AsGRPCError(err) } - if err := processTraceRequest(ctx, router, batch, ri.ApiKey, ri.Dataset); err != nil { + if err := processTraceRequest(ctx, router, result.Events, ri.ApiKey, ri.Dataset); err != nil { return nil, huskyotlp.AsGRPCError(err) } @@ -55,13 +54,11 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac func processTraceRequest( ctx context.Context, router *Router, - batch []map[string]interface{}, + batch []huskyotlp.Event, apiKey string, datasetName string) error { var requestID types.RequestIDContextKey - debugLog := router.iopLogger.Debug().WithField("request_id", requestID) - apiHost, err := router.Config.GetHoneycombAPI() if err != nil { router.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") @@ -69,21 +66,14 @@ func processTraceRequest( } for _, ev := range batch { - attrs := ev["data"].(map[string]interface{}) - timestamp := ev["time"].(time.Time) - sampleRate, err := getSampleRateFromAttributes(attrs) - if err != nil { - debugLog.WithField("error", err.Error()).WithField("sampleRate", attrs["sampleRate"]).Logf("error parsing sampleRate") - } - event := &types.Event{ Context: ctx, APIHost: apiHost, APIKey: apiKey, Dataset: datasetName, - SampleRate: uint(sampleRate), - Timestamp: timestamp, - Data: attrs, + SampleRate: uint(ev.SampleRate), + Timestamp: ev.Timestamp, + Data: ev.Attributes, } if err = router.processEvent(event, requestID); err != nil { router.Logger.Error().Logf("Error processing event: " + err.Error()) diff --git a/route/route.go b/route/route.go index 218c3085ac..d0f7ee8005 100644 --- a/route/route.go +++ b/route/route.go @@ -626,43 +626,3 @@ func getFirstValueFromMetadata(key string, md metadata.MD) string { } return "" } - -func getSampleRateFromAttributes(attrs map[string]interface{}) (int, error) { - var sampleRateKey string - if attrs["sampleRate"] != nil { - sampleRateKey = "sampleRate" - } else if attrs["SampleRate"] != nil { - sampleRateKey = "SampleRate" - } - if len(sampleRateKey) == 0 || attrs[sampleRateKey] == nil { - return defaultSampleRate, nil - } - var sampleRate int - var err error - switch v := attrs[sampleRateKey].(type) { - case string: - var i int64 - i, err = strconv.ParseInt(v, 10, 32) - sampleRate = int(i) - case int: - if v > math.MaxInt32 { - sampleRate = math.MaxInt32 - } else { - sampleRate = v - } - case int32: - sampleRate = int(v) - case int64: - if v > math.MaxInt32 { - sampleRate = math.MaxInt32 - } else { - sampleRate = int(v) - } - default: - err = fmt.Errorf("unrecognised sampleRate datatype - %T", sampleRate) - sampleRate = defaultSampleRate - } - // remove sampleRate from event fields - delete(attrs, sampleRateKey) - return sampleRate, err -} diff --git a/route/route_test.go b/route/route_test.go index 2080396a87..47b90364fe 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "io/ioutil" - "math" "net/http" "net/http/httptest" "strings" @@ -304,80 +303,6 @@ func TestGetAPIKeyAndDatasetFromMetadataCaseInsensitive(t *testing.T) { } } -func TestGetSampleRateFromAttributes(t *testing.T) { - const ( - defaultSampleRate = 1 - ) - tests := []struct { - name string - attrKey string - attrValue interface{} - expectedValue int - }{ - { - name: "missing attr gets default value", - attrKey: "", - attrValue: nil, - expectedValue: defaultSampleRate, - }, - { - name: "can parse integer value", - attrKey: "sampleRate", - attrValue: 5, - expectedValue: 5, - }, - { - name: "can parse string value", - attrKey: "sampleRate", - attrValue: "5", - expectedValue: 5, - }, - { - name: "can parse int64 value (less than int32 max)", - attrKey: "sampleRate", - attrValue: int64(100), - expectedValue: 100, - }, - { - name: "can parse int64 value (greater than int32 max)", - attrKey: "sampleRate", - attrValue: int64(math.MaxInt32 + 100), - expectedValue: math.MaxInt32, - }, - { - name: "does not parse float, gets default value", - attrKey: "sampleRate", - attrValue: 0.25, - expectedValue: defaultSampleRate, - }, - { - name: "does not parse bool, gets default value", - attrKey: "sampleRate", - attrValue: true, - expectedValue: defaultSampleRate, - }, - { - name: "does not parse struct, gets default value", - attrKey: "sampleRate", - attrValue: struct{}{}, - expectedValue: defaultSampleRate, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - attrs := map[string]interface{}{ - tt.attrKey: tt.attrValue, - } - - sampleRate, _ := getSampleRateFromAttributes(attrs) - if sampleRate != tt.expectedValue { - t.Errorf("got: %d\n\twant: %d", sampleRate, tt.expectedValue) - } - }) - } -} - func TestDebugTrace(t *testing.T) { req, _ := http.NewRequest("GET", "/debug/trace/123abcdef", nil) req = mux.SetURLVars(req, map[string]string{"traceID": "123abcdef"}) From 5715a32d28cbc968b72fd248964b0feca9119944 Mon Sep 17 00:00:00 2001 From: JamieDanielson Date: Tue, 7 Dec 2021 12:10:02 -0500 Subject: [PATCH 099/351] feat: Make MaxBatchSize configurable (#365) * Make MaxBatchSize configurable * Make MaxBatchSize uint in Config Co-authored-by: Mike Goldsmith --- app/app_test.go | 3 ++- cmd/refinery/main.go | 4 ++-- config/config.go | 3 +++ config/file_config.go | 9 +++++++++ config/mock.go | 8 ++++++++ config_complete.toml | 3 +++ 6 files changed, 27 insertions(+), 3 deletions(-) diff --git a/app/app_test.go b/app/app_test.go index 22019fcc29..8c643d47ac 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -105,6 +105,7 @@ func newStartedApp( c := &config.MockConfig{ GetSendDelayVal: 0, GetTraceTimeoutVal: 10 * time.Millisecond, + GetMaxBatchSizeVal: 500, GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1}, SendTickerVal: 2 * time.Millisecond, PeerManagementType: "file", @@ -160,7 +161,7 @@ func newStartedApp( sdPeer, _ := statsd.New(statsd.Prefix("refinery.peer")) peerClient, err := libhoney.NewClient(libhoney.ClientConfig{ Transmission: &transmission.Honeycomb{ - MaxBatchSize: 500, + MaxBatchSize: c.GetMaxBatchSize(), BatchTimeout: libhoney.DefaultBatchTimeout, MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetPeerBufferSize()), diff --git a/cmd/refinery/main.go b/cmd/refinery/main.go index 58552db8e4..532929f970 100644 --- a/cmd/refinery/main.go +++ b/cmd/refinery/main.go @@ -134,7 +134,7 @@ func main() { userAgentAddition := "refinery/" + version upstreamClient, err := libhoney.NewClient(libhoney.ClientConfig{ Transmission: &transmission.Honeycomb{ - MaxBatchSize: 500, + MaxBatchSize: c.GetMaxBatchSize(), BatchTimeout: libhoney.DefaultBatchTimeout, MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetUpstreamBufferSize()), @@ -152,7 +152,7 @@ func main() { peerClient, err := libhoney.NewClient(libhoney.ClientConfig{ Transmission: &transmission.Honeycomb{ - MaxBatchSize: 500, + MaxBatchSize: c.GetMaxBatchSize(), BatchTimeout: libhoney.DefaultBatchTimeout, MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetPeerBufferSize()), diff --git a/config/config.go b/config/config.go index a24beffd0e..b364375ee5 100644 --- a/config/config.go +++ b/config/config.go @@ -72,6 +72,9 @@ type Config interface { // duration. GetTraceTimeout() (time.Duration, error) + // GetMaxBatchSize is the number of events to be included in the batch for sending + GetMaxBatchSize() uint + // GetOtherConfig attempts to fill the passed in struct with the contents of // a subsection of the config. This is used by optional configurations to // allow different implementations of necessary interfaces configure diff --git a/config/file_config.go b/config/file_config.go index 587e356b51..702b708851 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -38,6 +38,7 @@ type configContents struct { Metrics string `validate:"required,oneof= prometheus honeycomb"` SendDelay time.Duration `validate:"required"` TraceTimeout time.Duration `validate:"required"` + MaxBatchSize uint `validate:"required"` SendTicker time.Duration `validate:"required"` UpstreamBufferSize int `validate:"required"` PeerBufferSize int `validate:"required"` @@ -113,6 +114,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("Metrics", "honeycomb") c.SetDefault("SendDelay", 2*time.Second) c.SetDefault("TraceTimeout", 60*time.Second) + c.SetDefault("MaxBatchSize", 500) c.SetDefault("SendTicker", 100*time.Millisecond) c.SetDefault("UpstreamBufferSize", libhoney.DefaultPendingWorkCapacity) c.SetDefault("PeerBufferSize", libhoney.DefaultPendingWorkCapacity) @@ -644,6 +646,13 @@ func (f *fileConfig) GetTraceTimeout() (time.Duration, error) { return f.conf.TraceTimeout, nil } +func (f *fileConfig) GetMaxBatchSize() uint { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.MaxBatchSize +} + func (f *fileConfig) GetOtherConfig(name string, iface interface{}) error { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index 7bf07c2a0a..a4025c1042 100644 --- a/config/mock.go +++ b/config/mock.go @@ -56,6 +56,7 @@ type MockConfig struct { GetSendDelayVal time.Duration GetTraceTimeoutErr error GetTraceTimeoutVal time.Duration + GetMaxBatchSizeVal uint GetUpstreamBufferSizeVal int GetPeerBufferSizeVal int SendTickerVal time.Duration @@ -221,6 +222,13 @@ func (m *MockConfig) GetTraceTimeout() (time.Duration, error) { return m.GetTraceTimeoutVal, m.GetTraceTimeoutErr } +func (m *MockConfig) GetMaxBatchSize() uint { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.GetMaxBatchSizeVal +} + // TODO: allow per-dataset mock values func (m *MockConfig) GetSamplerConfigForDataset(dataset string) (interface{}, error) { m.Mux.RLock() diff --git a/config_complete.toml b/config_complete.toml index 70978294a7..87fe76e793 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -67,6 +67,9 @@ SendDelay = "2s" # Eligible for live reload. TraceTimeout = "60s" +# MaxBatchSize is the number of events to be included in the batch for sending +MaxBatchSize = 500 + # SendTicker is a short timer; it determines the duration to use to check for traces to send SendTicker = "100ms" From fbd59d16ecaa37c6751ac5a077795c3e8ed3d01c Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 8 Dec 2021 09:30:04 +0000 Subject: [PATCH 100/351] Bump husky to v0.5.0 (#366) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 36de63a7b7..23a36ada7c 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.4.0 + github.com/honeycombio/husky v0.5.0 github.com/honeycombio/libhoney-go v1.15.6 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 diff --git a/go.sum b/go.sum index 21ac7d7784..f2f5127294 100644 --- a/go.sum +++ b/go.sum @@ -228,8 +228,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.4.0 h1:wTp7gXe0GTFkHi4kf1kaC5JT6ApHKhHxd0XgC4429Nw= -github.com/honeycombio/husky v0.4.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= +github.com/honeycombio/husky v0.5.0 h1:JEuZwoF0kS87DpwyAveEPNSvrvb0dhorF7r/K9HWzrY= +github.com/honeycombio/husky v0.5.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= github.com/honeycombio/libhoney-go v1.15.6 h1:zbwfdo74Gsedmu6OT/oAHv4pfKNoseTXRMA/4e5XWew= github.com/honeycombio/libhoney-go v1.15.6/go.mod h1:8NyBoM746bz+nw3yQzQF8gtJO/z4mkr/MD5C4r4uC2Y= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= From 7a010c6595cf10a4b9a153dfde0cad50bfbb3112 Mon Sep 17 00:00:00 2001 From: JamieDanielson Date: Wed, 8 Dec 2021 13:54:45 -0500 Subject: [PATCH 101/351] prep v1.8.0 (#367) --- CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c1ec08555..b18709bb4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Refinery Changelog +## 1.8.0 2021-12-08 + +### Enhancements + +- Make MaxBatchSize configurable (#365) | [@JamieDanielson](https://github.com/JamieDanielson) + +### Maintenance + +- Bump husky to v0.5.0 (#366) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) +- Bump husky to v0.4.0 (#361) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) + ## 1.7.0 2021-11-29 ### Enhancements From e2ed21d490c6287f79767d03a7709afd52507707 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Dec 2021 14:32:57 -0700 Subject: [PATCH 102/351] Bump github.com/prometheus/client_golang from 0.9.4 to 1.11.0 (#357) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 0.9.4 to 1.11.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/master/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v0.9.4...v1.11.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 46 ++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 41 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 23a36ada7c..1807e5011e 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/klauspost/compress v1.13.6 github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v0.9.4 + github.com/prometheus/client_golang v1.11.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.9.0 diff --git a/go.sum b/go.sum index f2f5127294..b86231dbaa 100644 --- a/go.sum +++ b/go.sum @@ -48,18 +48,24 @@ github.com/DataDog/zstd v1.4.8 h1:Rpmta4xZ/MgZnriKNd24iZMhGpP5dvUcs/uqfBapKZY= github.com/DataDog/zstd v1.4.8/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -114,7 +120,11 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= @@ -236,19 +246,23 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -288,6 +302,7 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -301,17 +316,24 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.4 h1:Y8E/JaaPbmFSW2V81Ab/d8yZFYQQGbni1b1jPcG9Y6A= -github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -320,6 +342,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -429,6 +453,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -492,6 +517,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -504,6 +530,7 @@ golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -518,6 +545,8 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -525,6 +554,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -534,6 +564,7 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -758,7 +789,10 @@ gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 94cc5a2c7609d8e1f9f7889fb9c1de92790fdd30 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Tue, 28 Dec 2021 14:02:56 -0700 Subject: [PATCH 103/351] gh: add re-triage workflow (#368) --- .github/workflows/re-triage.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/workflows/re-triage.yml diff --git a/.github/workflows/re-triage.yml b/.github/workflows/re-triage.yml new file mode 100644 index 0000000000..a98366047e --- /dev/null +++ b/.github/workflows/re-triage.yml @@ -0,0 +1,12 @@ +name: Re-triage issues with new comments +on: + issue_comment: + types: [created] +jobs: + re-triage: + runs-on: ubuntu-latest + name: Re-triage issues with new comments + steps: + - uses: honeycombio/oss-management-actions/re-triage@v1 + with: + ghprojects-token: ${{ secrets.GHPROJECTS_TOKEN }} From a2e5100c737eab58b43b716c33a70ae395fb8e0e Mon Sep 17 00:00:00 2001 From: Liz Fong-Jones Date: Thu, 6 Jan 2022 06:04:27 -0500 Subject: [PATCH 104/351] deps: bump libhoney & golang (#373) --- .circleci/config.yml | 8 ++++---- go.mod | 7 +++++-- go.sum | 26 ++++++++++++++------------ 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b95cb462ca..0b3d38d81e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -27,7 +27,7 @@ commands: jobs: test: docker: - - image: cimg/go:1.16 + - image: cimg/go:1.17 - image: redis:6 steps: - checkout @@ -51,7 +51,7 @@ jobs: build_binaries: docker: - - image: cimg/go:1.16 + - image: cimg/go:1.17 steps: - checkout - go-build: @@ -124,7 +124,7 @@ jobs: build_docker: docker: - - image: cimg/go:1.16 + - image: cimg/go:1.17 steps: - run: go install github.com/google/ko@latest - checkout @@ -135,7 +135,7 @@ jobs: publish_docker: docker: - - image: cimg/go:1.16 + - image: cimg/go:1.17 steps: - run: go install github.com/google/ko@latest - checkout diff --git a/go.mod b/go.mod index 1807e5011e..540fabce80 100644 --- a/go.mod +++ b/go.mod @@ -12,11 +12,11 @@ require ( github.com/go-playground/validator v9.31.0+incompatible github.com/golang/protobuf v1.5.2 github.com/gomodule/redigo v1.8.5 - github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d + github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/husky v0.5.0 - github.com/honeycombio/libhoney-go v1.15.6 + github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.13.6 @@ -29,6 +29,9 @@ require ( github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.9.0 + golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect + golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.42.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect diff --git a/go.sum b/go.sum index b86231dbaa..1d9ec41d33 100644 --- a/go.sum +++ b/go.sum @@ -44,8 +44,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.4.8 h1:Rpmta4xZ/MgZnriKNd24iZMhGpP5dvUcs/uqfBapKZY= -github.com/DataDog/zstd v1.4.8/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= +github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -208,8 +208,8 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d h1:mksP7mUlZu0fpgMVMfDnaVvErqRL05HM3Kk+rBkZK54= -github.com/gorilla/mux v1.6.3-0.20190108142930-08e7f807d38d/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= @@ -240,8 +240,8 @@ github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+D github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= github.com/honeycombio/husky v0.5.0 h1:JEuZwoF0kS87DpwyAveEPNSvrvb0dhorF7r/K9HWzrY= github.com/honeycombio/husky v0.5.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= -github.com/honeycombio/libhoney-go v1.15.6 h1:zbwfdo74Gsedmu6OT/oAHv4pfKNoseTXRMA/4e5XWew= -github.com/honeycombio/libhoney-go v1.15.6/go.mod h1:8NyBoM746bz+nw3yQzQF8gtJO/z4mkr/MD5C4r4uC2Y= +github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= +github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= @@ -258,7 +258,6 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -370,8 +369,8 @@ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/msgpack/v5 v5.3.4 h1:qMKAwOV+meBw2Y8k9cVwAy7qErtYCwBzZ2ellBfvnqc= -github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= @@ -480,8 +479,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8= +golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -570,8 +570,9 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 h1:xrCZDmdtoloIiooiA9q0OQb9r8HejIHYoHGhGCe1pGg= +golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -580,8 +581,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 5bb46f5e9fdf87e6e26955923562aa528cc4fcf7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jan 2022 11:54:29 +0000 Subject: [PATCH 105/351] Bump github.com/honeycombio/husky from 0.5.0 to 0.6.0 (#370) Bumps [github.com/honeycombio/husky](https://github.com/honeycombio/husky) from 0.5.0 to 0.6.0. - [Release notes](https://github.com/honeycombio/husky/releases) - [Changelog](https://github.com/honeycombio/husky/blob/main/CHANGELOG.md) - [Commits](https://github.com/honeycombio/husky/compare/v0.5.0...v0.6.0) --- updated-dependencies: - dependency-name: github.com/honeycombio/husky dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 540fabce80..70391561fd 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.5.0 + github.com/honeycombio/husky v0.6.0 github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 diff --git a/go.sum b/go.sum index 1d9ec41d33..39d459b54a 100644 --- a/go.sum +++ b/go.sum @@ -238,8 +238,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.5.0 h1:JEuZwoF0kS87DpwyAveEPNSvrvb0dhorF7r/K9HWzrY= -github.com/honeycombio/husky v0.5.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= +github.com/honeycombio/husky v0.6.0 h1:VufNrLZoVMqDZrj8hHIF6izh/6LbpYfPPaKeGttXMII= +github.com/honeycombio/husky v0.6.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= From 447dacd83b9a7762143dd32d1ef736124f76923f Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Thu, 6 Jan 2022 15:42:44 +0000 Subject: [PATCH 106/351] Prepare v1.8.1 release (#376) * prepare v1.8.1 release * fix changelog entry user --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b18709bb4d..1964778750 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Refinery Changelog +## 1.8.1 2022-01-06 + +### Maintenance + +- Add re-triage workflow (#368) | [@vreynolds](https://github.com/vreynolds) +- Bump libhoney & golang (#373) | [@lizthegrey](https://github.com/lizthegrey) +- Bump github.com/honeycombio/husky from 0.5.0 to 0.6.0 (#370) +- Bump github.com/prometheus/client_golang from 0.9.4 to 1.11.0 (#357) + ## 1.8.0 2021-12-08 ### Enhancements From 0333a07592ea9a40630f0b497113178a26e5b545 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Jan 2022 16:44:23 -0700 Subject: [PATCH 107/351] Bump google.golang.org/grpc from 1.42.0 to 1.43.0 (#372) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.42.0 to 1.43.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.42.0...v1.43.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 70391561fd..0021c09a1b 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/grpc v1.42.0 + google.golang.org/grpc v1.43.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect ) diff --git a/go.sum b/go.sum index 39d459b54a..9166614f27 100644 --- a/go.sum +++ b/go.sum @@ -759,8 +759,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From a971b13c090a74c9025aa26eb9bafa9da8b69b8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Jan 2022 18:47:26 -0700 Subject: [PATCH 108/351] Bump github.com/spf13/viper from 1.9.0 to 1.10.1 (#375) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.9.0 to 1.10.1. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.9.0...v1.10.1) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 +-- go.sum | 102 ++++++++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 80 insertions(+), 26 deletions(-) diff --git a/go.mod b/go.mod index 0021c09a1b..0ecac786a7 100644 --- a/go.mod +++ b/go.mod @@ -25,13 +25,11 @@ require ( github.com/prometheus/client_golang v1.11.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 - github.com/spf13/viper v1.9.0 + github.com/spf13/viper v1.10.1 github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.9.0 golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect - golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 // indirect - golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.43.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect diff --git a/go.sum b/go.sum index 9166614f27..9b82099f72 100644 --- a/go.sum +++ b/go.sum @@ -23,6 +23,9 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -31,7 +34,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -44,6 +47,7 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -55,6 +59,7 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -63,13 +68,17 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -78,7 +87,9 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -92,7 +103,9 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= @@ -113,6 +126,7 @@ github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpm github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -139,6 +153,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -208,19 +223,25 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -233,21 +254,23 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= github.com/honeycombio/husky v0.6.0 h1:VufNrLZoVMqDZrj8hHIF6izh/6LbpYfPPaKeGttXMII= github.com/honeycombio/husky v0.6.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -272,27 +295,31 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= -github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -303,6 +330,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -316,6 +344,7 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -325,11 +354,13 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -338,7 +369,7 @@ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqn github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= +github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -346,6 +377,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= @@ -354,8 +386,8 @@ github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmq github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= -github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= +github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= +github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -367,6 +399,7 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= @@ -380,9 +413,9 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -397,7 +430,6 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -441,9 +473,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -479,7 +511,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8= golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -497,6 +531,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -511,7 +547,6 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -556,6 +591,7 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -570,9 +606,14 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 h1:xrCZDmdtoloIiooiA9q0OQb9r8HejIHYoHGhGCe1pGg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486 h1:5hpz5aRr+W1erYCL5JRhSUBJRph7l9XkNveoExlrKYk= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -672,7 +713,12 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -732,8 +778,17 @@ google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -759,6 +814,7 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -786,8 +842,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= -gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From bfda2835d18f70e814a3f2f6c53051fa56a1102a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 15 Jan 2022 16:16:30 -0700 Subject: [PATCH 109/351] Bump github.com/gomodule/redigo from 1.8.5 to 1.8.8 (#374) Bumps [github.com/gomodule/redigo](https://github.com/gomodule/redigo) from 1.8.5 to 1.8.8. - [Release notes](https://github.com/gomodule/redigo/releases) - [Commits](https://github.com/gomodule/redigo/compare/v1.8.5...v1.8.8) --- updated-dependencies: - dependency-name: github.com/gomodule/redigo dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0ecac786a7..11b70e3df0 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible github.com/golang/protobuf v1.5.2 - github.com/gomodule/redigo v1.8.5 + github.com/gomodule/redigo v1.8.8 github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 diff --git a/go.sum b/go.sum index 9b82099f72..8e415b6989 100644 --- a/go.sum +++ b/go.sum @@ -182,8 +182,8 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.8.5 h1:nRAxCa+SVsyjSBrtZmG/cqb6VbTmuRzpg/PoTFlpumc= -github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/gomodule/redigo v1.8.8 h1:f6cXq6RRfiyrOJEV7p3JhLDlmawGBVBBP1MggY8Mo4E= +github.com/gomodule/redigo v1.8.8/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= From b02dbb101f3837f0ba386d76618e1aef463f59ba Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Tue, 18 Jan 2022 12:05:09 -0700 Subject: [PATCH 110/351] docs: update rules example (#378) - rule conditions are ANDed not ORed --- config/config_test.go | 2 +- rules_complete.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index f2c0c6f825..787ab79b09 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -217,7 +217,7 @@ func TestReadRulesConfig(t *testing.T) { rule = r.Rule[1] assert.Equal(t, 1, rule.SampleRate) - assert.Equal(t, "500 errors or slow", rule.Name) + assert.Equal(t, "keep slow 500 errors", rule.Name) assert.Len(t, rule.Condition, 2) default: diff --git a/rules_complete.toml b/rules_complete.toml index acb738e850..e3711d989a 100644 --- a/rules_complete.toml +++ b/rules_complete.toml @@ -216,7 +216,7 @@ SampleRate = 1 value = "/health-check" [[dataset4.rule]] - name = "500 errors or slow" + name = "keep slow 500 errors" SampleRate = 1 [[dataset4.rule.condition]] field = "status_code" @@ -228,7 +228,7 @@ SampleRate = 1 value = 1000.789 [[dataset4.rule]] - name = "dynamic sample 200 responses" + name = "dynamically sample 200 responses" [[dataset4.rule.condition]] field = "status_code" operator = "=" From 1d4cfb02fd2991b6c48f8a1594e79607394d42f9 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 26 Jan 2022 18:06:43 +0000 Subject: [PATCH 111/351] Add retries when connecting to redis during init (#382) --- internal/peer/redis.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/internal/peer/redis.go b/internal/peer/redis.go index aa88bb06e7..71c8a9a876 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -59,7 +59,26 @@ func newRedisPeers(c config.Config) (Peers, error) { IdleTimeout: 5 * time.Minute, Wait: true, Dial: func() (redis.Conn, error) { - return redis.Dial("tcp", redisHost, options...) + // if redis is started at the same time as refinery, connecting to redis can + // fail and cause refinery to error out. + // Instead, we will try to connect to redis for up to 10 seconds with + // a 1 second delay between attempts to allow the redis process to init + var ( + conn redis.Conn + err error + ) + for timeout := time.After(10 * time.Second); ; { + select { + case <-timeout: + return nil, err + default: + conn, err = redis.Dial("tcp", redisHost, options...) + if err == nil { + return conn, nil + } + time.Sleep(time.Second) + } + } }, } From 033ba0fa426c331da8c1106c923d61418358d0fc Mon Sep 17 00:00:00 2001 From: Jason Harley Date: Tue, 1 Feb 2022 05:32:12 -0500 Subject: [PATCH 112/351] Properly set meta.refinery.local_hostname field (#387) Co-authored-by: Mike Goldsmth --- .gitignore | 2 ++ collect/collect.go | 12 ++++++++++++ transmit/transmit.go | 8 -------- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index 76638ae4d0..91efd2d6de 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,5 @@ refinery !/cmd/refinery test_redimem !/cmd/test_redimem + +dockerize* diff --git a/collect/collect.go b/collect/collect.go index b4cffd3e52..ec2bb31b78 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -69,6 +69,8 @@ type InMemCollector struct { incoming chan *types.Span fromPeer chan *types.Span reload chan struct{} + + hostname string } // traceSentRecord is the bit we leave behind when sending a trace to remember @@ -113,6 +115,13 @@ func (i *InMemCollector) Start() error { i.fromPeer = make(chan *types.Span, imcConfig.CacheCapacity*3) i.reload = make(chan struct{}, 1) i.datasetSamplers = make(map[string]sample.Sampler) + + if i.Config.GetAddHostMetadataToTrace() { + if hostname, err := os.Hostname(); err == nil && hostname != "" { + i.hostname = hostname + } + } + // spin up one collector because this is a single threaded collector go i.collect() @@ -467,6 +476,9 @@ func (i *InMemCollector) send(trace *types.Trace) { field := i.Config.GetDryRunFieldName() sp.Data[field] = shouldSend } + if i.hostname != "" { + sp.Data["meta.refinery.local_hostname"] = i.hostname + } // if spans are already sampled, take that in to account when computing // the final rate sp.SampleRate *= trace.SampleRate diff --git a/transmit/transmit.go b/transmit/transmit.go index fa46711211..38df5cffa9 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -2,7 +2,6 @@ package transmit import ( "context" - "os" "sync" libhoney "github.com/honeycombio/libhoney-go" @@ -55,13 +54,6 @@ func (d *DefaultTransmission) Start() error { return err } - if d.Config.GetAddHostMetadataToTrace() { - if hostname, err := os.Hostname(); err == nil && hostname != "" { - // add hostname to spans - d.LibhClient.AddField("meta.refinery.local_hostname", hostname) - } - } - d.builder = d.LibhClient.NewBuilder() d.builder.APIHost = upstreamAPI From b30df6558035ebc58f6d0747baaf699e726fd08d Mon Sep 17 00:00:00 2001 From: "Steven E. Harris" Date: Tue, 1 Feb 2022 05:34:21 -0500 Subject: [PATCH 113/351] Honor env. variable to set gRPC listener address (#386) --- config/config_test.go | 31 +++++++++++++++++++++++++------ config/file_config.go | 1 + 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 787ab79b09..b67568d538 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -12,10 +12,28 @@ import ( "github.com/stretchr/testify/assert" ) +func TestGRPCListenAddrEnvVar(t *testing.T) { + const address = "127.0.0.1:4317" + const envVarName = "REFINERY_GRPC_LISTEN_ADDRESS" + os.Setenv(envVarName, address) + defer os.Unsetenv(envVarName) + + c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) + + if err != nil { + t.Error(err) + } + + if a, _ := c.GetGRPCListenAddr(); a != address { + t.Error("received", a, "expected", address) + } +} + func TestRedisHostEnvVar(t *testing.T) { - host := "redis.magic:1337" - os.Setenv("REFINERY_REDIS_HOST", host) - defer os.Unsetenv("REFINERY_REDIS_HOST") + const host = "redis.magic:1337" + const envVarName = "REFINERY_REDIS_HOST" + os.Setenv(envVarName, host) + defer os.Unsetenv(envVarName) c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) @@ -29,9 +47,10 @@ func TestRedisHostEnvVar(t *testing.T) { } func TestRedisPasswordEnvVar(t *testing.T) { - password := ***REMOVED*** - os.Setenv("REFINERY_REDIS_PASSWORD", password) - defer os.Unsetenv("REFINERY_REDIS_PASSWORD") + const password = ***REMOVED*** + const envVarName = "REFINERY_REDIS_PASSWORD" + os.Setenv(envVarName, password) + defer os.Unsetenv(envVarName) c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) diff --git a/config/file_config.go b/config/file_config.go index 702b708851..a7a7ec1eb6 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -94,6 +94,7 @@ type PeerManagementConfig struct { func NewConfig(config, rules string, errorCallback func(error)) (Config, error) { c := viper.New() + c.BindEnv("GRPCListenAddr", "REFINERY_GRPC_LISTEN_ADDRESS") c.BindEnv("PeerManagement.RedisHost", "REFINERY_REDIS_HOST") c.BindEnv("PeerManagement.RedisPassword", "REFINERY_REDIS_PASSWORD") c.BindEnv("HoneycombLogger.LoggerAPIKey", "REFINERY_HONEYCOMB_API_KEY") From 4f9db1d36b17eae6b6fb76ca254cb018c72e9f2f Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 2 Feb 2022 09:59:23 +0000 Subject: [PATCH 114/351] prepare v1.9.0 release (#388) --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1964778750..3397e383a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Refinery Changelog +## 1.9.0 2022-02-01 + +### Enhancements + +- Honor env. variable to set gRPC listener address (#386) | [@seh](https://github.com/seh) +- Add retries when connecting to redis during init (#382) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) + +### Fixes + +- Properly set meta.refinery.local_hostname field (#387) | [@jharley](https://github.com/jharley) + +### Maintenance + +- docs: update rules example (#378) | [@vreynolds](https://github.com/vreynolds) +- Bump github.com/gomodule/redigo from 1.8.5 to 1.8.8 (#374) +- Bump github.com/spf13/viper from 1.9.0 to 1.10.1 (#375) +- Bump google.golang.org/grpc from 1.42.0 to 1.43.0 (#372) + ## 1.8.1 2022-01-06 ### Maintenance From 6813f9d25971564dae225b720f5b0e8bd7104a70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miche=C3=A1l=20Looney?= Date: Fri, 4 Feb 2022 18:16:53 +0000 Subject: [PATCH 115/351] Fixed "honeeycomb" typo in log output when reloading config (#394) --- metrics/honeycomb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go index de226a0f7a..c89d731168 100644 --- a/metrics/honeycomb.go +++ b/metrics/honeycomb.go @@ -87,7 +87,7 @@ func (h *HoneycombMetrics) Start() error { } func (h *HoneycombMetrics) reloadBuilder() { - h.Logger.Debug().Logf("reloading config for honeeycomb metrics reporter") + h.Logger.Debug().Logf("reloading config for honeycomb metrics reporter") mc, err := h.Config.GetHoneycombMetricsConfig() if err != nil { // complain about this both to STDOUT and to the previously configured From 15c2392665e007d48035d2131ed425c6994c0e83 Mon Sep 17 00:00:00 2001 From: Jason Harley Date: Tue, 8 Feb 2022 11:33:59 -0500 Subject: [PATCH 116/351] build: add ARM64 (aarch64) RPM artifact (#395) --- .circleci/config.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0b3d38d81e..af51ac8ddc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -82,6 +82,9 @@ jobs: - run: name: build_rpm_amd64 command: ./build-pkg.sh -m amd64 -v "${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" -t rpm && mv *.rpm ~/artifacts + - run: + name: build_rpm_arm64 + command: ./build-pkg.sh -m arm64 -v "${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" -t rpm && mv *.rpm ~/artifacts - run: name: copy_binaries command: cp $GOPATH/bin/refinery-* ~/artifacts @@ -197,4 +200,3 @@ workflows: only: /^v.*/ branches: ignore: /.*/ - From ff5ba5495fcb6136356acbfdd4bb4f401bbdf971 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Thu, 10 Feb 2022 10:59:02 -0700 Subject: [PATCH 117/351] fix: deadlock when reloading configs (#398) This manifested in k8s because we get double notified about config changes. Previously, onChange would acquire a read lock, and then each callback would also acquire a read lock when reading whatever config value they care about. onChange also acquires a write lock when unmarshalling. With additional goroutines calling onChange (e.g. in k8s), we were almost guaranteed to have a write lock in between two recursive read locks. From Go RLock docs: // It should not be used for recursive read locking; a blocked Lock // call excludes new readers from acquiring the lock. See the // documentation on the RWMutex type. Removing the read lock around callbacks - every config accessor function already has a read lock. --- config/file_config.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/config/file_config.go b/config/file_config.go index a7a7ec1eb6..8d95899dc1 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -206,9 +206,6 @@ func (f *fileConfig) onChange(in fsnotify.Event) { f.unmarshal() - f.mux.RLock() - defer f.mux.RUnlock() - for _, c := range f.callbacks { c() } From 36089933693aada1d01eded7ffa7021c98853de0 Mon Sep 17 00:00:00 2001 From: ecobrien29 <46940457+ecobrien29@users.noreply.github.com> Date: Thu, 10 Feb 2022 10:26:30 -0800 Subject: [PATCH 118/351] added username in config for redis auth (#397) --- README.md | 4 ++-- config/config.go | 4 ++++ config/config_test.go | 17 +++++++++++++++++ config/file_config.go | 9 +++++++++ config/mock.go | 8 ++++++++ config_complete.toml | 6 ++++++ internal/peer/redis.go | 7 ++++++- 7 files changed, 52 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7385f2a82f..74db7a164d 100644 --- a/README.md +++ b/README.md @@ -61,8 +61,8 @@ To enable the redis-based config: When launched in redis-config mode, Refinery needs a redis host to use for managing the list of peers in the Refinery cluster. This hostname and port can be specified in one of two ways: -- set the `REFINERY_REDIS_HOST` environment variable (and optionally the `REFINERY_REDIS_PASSWORD` environment variable) -- set the `RedisHost` field in the config file (and optionally the `RedisPassword` field in the config file) +- set the `REFINERY_REDIS_HOST` environment variable (and optionally the `REFINERY_REDIS_USERNAME` and `REFINERY_REDIS_PASSWORD` environment variables) +- set the `RedisHost` field in the config file (and optionally the `RedisUsername` and `RedisPassword` fields in the config file) The Redis host should be a hostname and a port, for example `redis.mydomain.com:6379`. The example config file has `localhost:6379` which obviously will not work with more than one host. When TLS is required to connect to the Redis instance, set the `UseTLS` config to `true`. diff --git a/config/config.go b/config/config.go index b364375ee5..989ae9f815 100644 --- a/config/config.go +++ b/config/config.go @@ -45,6 +45,10 @@ type Config interface { // management. GetRedisHost() (string, error) + // GetRedisUsername returns the username of a Redis instance to use for peer + // management. + GetRedisUsername() (string, error) + // GetRedisPassword returns the password of a Redis instance to use for peer // management. GetRedisPassword() (string, error) diff --git a/config/config_test.go b/config/config_test.go index b67568d538..b2910a8457 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -46,6 +46,23 @@ func TestRedisHostEnvVar(t *testing.T) { } } +func TestRedisUsernameEnvVar(t *testing.T) { + const username = "admin" + const envVarName = "REFINERY_REDIS_USERNAME" + os.Setenv(envVarName, username) + defer os.Unsetenv(envVarName) + + c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) + + if err != nil { + t.Error(err) + } + + if d, _ := c.GetRedisUsername(); d != username { + t.Error("received", d, "expected", username) + } +} + func TestRedisPasswordEnvVar(t *testing.T) { const password = ***REMOVED*** const envVarName = "REFINERY_REDIS_PASSWORD" diff --git a/config/file_config.go b/config/file_config.go index 8d95899dc1..bf6e19dc91 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -82,6 +82,7 @@ type PeerManagementConfig struct { Type string `validate:"required,oneof= file redis"` Peers []string `validate:"dive,url"` RedisHost string + RedisUsername string RedisPassword string UseTLS bool UseTLSInsecure bool @@ -96,6 +97,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.BindEnv("GRPCListenAddr", "REFINERY_GRPC_LISTEN_ADDRESS") c.BindEnv("PeerManagement.RedisHost", "REFINERY_REDIS_HOST") + c.BindEnv("PeerManagement.RedisUsername", "REFINERY_REDIS_USERNAME") c.BindEnv("PeerManagement.RedisPassword", "REFINERY_REDIS_PASSWORD") c.BindEnv("HoneycombLogger.LoggerAPIKey", "REFINERY_HONEYCOMB_API_KEY") c.BindEnv("HoneycombMetrics.MetricsAPIKey", "REFINERY_HONEYCOMB_API_KEY") @@ -411,6 +413,13 @@ func (f *fileConfig) GetRedisHost() (string, error) { return f.config.GetString("PeerManagement.RedisHost"), nil } +func (f *fileConfig) GetRedisUsername() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.config.GetString("PeerManagement.RedisUsername"), nil +} + func (f *fileConfig) GetRedisPassword() (string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index a4025c1042..35a70a5846 100644 --- a/config/mock.go +++ b/config/mock.go @@ -38,6 +38,8 @@ type MockConfig struct { GetPeersVal []string GetRedisHostErr error GetRedisHostVal string + GetRedisUsernameErr error + GetRedisUsernameVal string GetRedisPasswordErr error GetRedisPasswordVal string GetUseTLSErr error @@ -173,6 +175,12 @@ func (m *MockConfig) GetRedisHost() (string, error) { return m.GetRedisHostVal, m.GetRedisHostErr } +func (m *MockConfig) GetRedisUsername() (string, error) { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.GetRedisUsernameVal, m.GetRedisUsernameErr +} func (m *MockConfig) GetRedisPassword() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index 87fe76e793..f8de3b3674 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -152,6 +152,12 @@ Metrics = "honeycomb" # Not eligible for live reload. # RedisHost = "localhost:6379" +# RedisUsername is the username used to connect to redis for peer cluster membership management. +# If the environment variable 'REFINERY_REDIS_USERNAME' is set it takes +# precedence and this value is ignored. +# Not eligible for live reload. +# RedisUsername = "" + # RedisPassword is the password used to connect to redis for peer cluster membership management. # If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes # precedence and this value is ignored. diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 71c8a9a876..4507e17843 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -65,7 +65,7 @@ func newRedisPeers(c config.Config) (Peers, error) { // a 1 second delay between attempts to allow the redis process to init var ( conn redis.Conn - err error + err error ) for timeout := time.After(10 * time.Second); ; { select { @@ -190,6 +190,11 @@ func buildOptions(c config.Config) []redis.DialOption { redis.DialDatabase(0), // TODO enable multiple databases for multiple samproxies } + username, _ := c.GetRedisUsername() + if username != "" { + options = append(options, redis.DialUsername(username)) + } + password, _ := c.GetRedisPassword() if password != "" { options = append(options, redis.DialPassword(password)) From 230392064e2f51d21b6ac1cbd2c355c20add22c4 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Thu, 10 Feb 2022 12:16:40 -0700 Subject: [PATCH 119/351] prepare 1.10.0 release (#399) --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3397e383a0..cee33b1588 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Refinery Changelog +## 1.10.0 2022-02-10 + +### Enhancements + +- added username in config for redis auth (#397) | [@ecobrien29](https://github.com/ecobrien29) +- build: add ARM64 (aarch64) RPM artifact (#395) | [@jharley](https://github.com/jharley) + +### Fixes + +- fix: deadlock when reloading configs (#398) | [@vreynolds](https://github.com/vreynolds) +- Fixed "honeeycomb" typo in log output when reloading config (#394) | [@looneym](https://github.com/looneym) + ## 1.9.0 2022-02-01 ### Enhancements From d73365fd55468cb4b0d3b633b86e9da95e08348e Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Thu, 10 Feb 2022 12:41:19 -0700 Subject: [PATCH 120/351] docs: add helm charts step to releasing (#400) --- RELEASING.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/RELEASING.md b/RELEASING.md index 2e245e277d..3339caea7b 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -1,7 +1,8 @@ # Release Process 1. Add release entry to [changelog](./CHANGELOG.md) -3. Open a PR with the above, and merge that into main -4. Create new tag on merged commit with the new version (e.g. `v1.4.1`) -5. Push the tag upstream (this will kick off the release pipeline in CI) -6. Copy change log entry for newest version into draft GitHub release created as part of CI publish steps +2. Open a PR with the above, and merge that into main +3. Create new tag on merged commit with the new version (e.g. `v1.4.1`) +4. Push the tag upstream (this will kick off the release pipeline in CI) +5. Copy change log entry for newest version into draft GitHub release created as part of CI publish steps +6. Update the `appVersion` and any relevant chart changes in [helm-charts](https://github.com/honeycombio/helm-charts/tree/main/charts/refinery) From 54f719e1dbf72eba4da4fb27fe712754f382b47d Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Thu, 17 Feb 2022 20:05:30 +0000 Subject: [PATCH 121/351] Add Environment & Services support (#403) Co-authored-by: Vera Reynolds --- app/app_test.go | 172 ++++++++++++++++++++++++++++++++--- collect/collect.go | 26 ++++-- collect/collect_test.go | 21 ++++- config/config.go | 2 + config/config_test.go | 4 + config/file_config.go | 9 ++ config/mock.go | 8 ++ config_complete.toml | 7 ++ go.mod | 2 +- go.sum | 4 +- route/otlp_trace.go | 48 +++++----- route/otlp_trace_test.go | 80 +++++++++++++++-- route/route.go | 190 ++++++++++++++++++++++++++++++++++----- route/route_test.go | 69 ++++++++++++++ types/event.go | 34 +++++-- 15 files changed, 595 insertions(+), 81 deletions(-) diff --git a/app/app_test.go b/app/app_test.go index 8c643d47ac..405ab633d9 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -37,6 +37,9 @@ import ( "github.com/honeycombio/refinery/transmit" ) +const legacyAPIKey = "***REMOVED***" +const nonLegacyAPIKey = "***REMOVED***" + type countingWriterSender struct { transmission.WriterSender @@ -113,7 +116,7 @@ func newStartedApp( GetPeerBufferSizeVal: 10000, GetListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort), GetPeerListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort+1), - GetAPIKeysVal: []string{"KEY"}, + GetAPIKeysVal: []string{legacyAPIKey, nonLegacyAPIKey}, GetHoneycombAPIVal: "http://api.honeycomb.io", GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{CacheCapacity: 10000}, AddHostMetadataToTrace: enableHostMetadata, @@ -227,7 +230,48 @@ func TestAppIntegration(t *testing.T) { "http://localhost:10000/1/batch/dataset", strings.NewReader(`[{"data":{"trace.trace_id":"1","foo":"bar"}}]`), ) - req.Header.Set("X-Honeycomb-Team", "KEY") + req.Header.Set("X-Honeycomb-Team", legacyAPIKey) + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultTransport.RoundTrip(req) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + resp.Body.Close() + + err = startstop.Stop(graph.Objects(), nil) + assert.NoError(t, err) + + // Wait for span to be sent. + deadline := time.After(time.Second) + for { + if out.Len() > 62 { + break + } + select { + case <-deadline: + t.Error("timed out waiting for output") + return + case <-time.After(time.Millisecond): + } + } + assert.Equal(t, `{"data":{"foo":"bar","trace.trace_id":"1"},"dataset":"dataset"}`+"\n", out.String()) +} + +func TestAppIntegrationWithNonLegacyKey(t *testing.T) { + t.Parallel() + + var out bytes.Buffer + a, graph := newStartedApp(t, &transmission.WriterSender{W: &out}, 10500, nil, false) + a.IncomingRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) {return "test", nil}) + a.PeerRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) {return "test", nil}) + + // Send a root span, it should be sent in short order. + req := httptest.NewRequest( + "POST", + "http://localhost:10500/1/batch/dataset", + strings.NewReader(`[{"data":{"trace.trace_id":"1","foo":"bar"}}]`), + ) + req.Header.Set("X-Honeycomb-Team", nonLegacyAPIKey) req.Header.Set("Content-Type", "application/json") resp, err := http.DefaultTransport.RoundTrip(req) @@ -284,7 +328,7 @@ func TestPeerRouting(t *testing.T) { nil, ) assert.NoError(t, err) - req.Header.Set("X-Honeycomb-Team", "KEY") + req.Header.Set("X-Honeycomb-Team", legacyAPIKey) req.Header.Set("Content-Type", "application/json") blob := `[` + string(spans[0]) + `]` @@ -295,7 +339,7 @@ func TestPeerRouting(t *testing.T) { }, 2*time.Second, 2*time.Millisecond) expectedEvent := &transmission.Event{ - APIKey: "KEY", + APIKey: legacyAPIKey, Dataset: "dataset", SampleRate: 2, APIHost: "http://api.honeycomb.io", @@ -330,7 +374,7 @@ func TestPeerRouting(t *testing.T) { nil, ) assert.NoError(t, err) - req.Header.Set("X-Honeycomb-Team", "KEY") + req.Header.Set("X-Honeycomb-Team", legacyAPIKey) req.Header.Set("Content-Type", "application/json") req.Body = ioutil.NopCloser(strings.NewReader(blob)) @@ -354,7 +398,7 @@ func TestHostMetadataSpanAdditions(t *testing.T) { "http://localhost:14000/1/batch/dataset", strings.NewReader(`[{"data":{"foo":"bar","trace.trace_id":"1"}}]`), ) - req.Header.Set("X-Honeycomb-Team", "KEY") + req.Header.Set("X-Honeycomb-Team", legacyAPIKey) req.Header.Set("Content-Type", "application/json") resp, err := http.DefaultTransport.RoundTrip(req) @@ -415,7 +459,7 @@ func TestEventsEndpoint(t *testing.T) { bytes.NewReader(blob), ) assert.NoError(t, err) - req.Header.Set("X-Honeycomb-Team", "KEY") + req.Header.Set("X-Honeycomb-Team", legacyAPIKey) req.Header.Set("Content-Type", "application/json") req.Header.Set("Content-Encoding", "zstd") req.Header.Set("X-Honeycomb-Event-Time", now.Format(time.RFC3339Nano)) @@ -429,7 +473,7 @@ func TestEventsEndpoint(t *testing.T) { assert.Equal( t, &transmission.Event{ - APIKey: "KEY", + APIKey: legacyAPIKey, Dataset: "dataset", SampleRate: 10, APIHost: "http://api.honeycomb.io", @@ -457,7 +501,111 @@ func TestEventsEndpoint(t *testing.T) { buf, ) assert.NoError(t, err) - req.Header.Set("X-Honeycomb-Team", "KEY") + req.Header.Set("X-Honeycomb-Team", legacyAPIKey) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Encoding", "gzip") + req.Header.Set("X-Honeycomb-Event-Time", now.Format(time.RFC3339Nano)) + req.Header.Set("X-Honeycomb-Samplerate", "10") + + post(t, req) + assert.Eventually(t, func() bool { + return len(senders[1].Events()) == 1 + }, 2*time.Second, 2*time.Millisecond) + + assert.Equal( + t, + &transmission.Event{ + APIKey: legacyAPIKey, + Dataset: "dataset", + SampleRate: 10, + APIHost: "http://api.honeycomb.io", + Timestamp: now, + Data: map[string]interface{}{ + "trace.trace_id": "1", + "foo": "bar", + }, + }, + senders[1].Events()[0], + ) +} + +func TestEventsEndpointWithNonLegacyKey(t *testing.T) { + t.Parallel() + + peers := &testPeers{ + peers: []string{ + "http://localhost:15001", + "http://localhost:15003", + }, + } + + var apps [2]*App + var addrs [2]string + var senders [2]*transmission.MockSender + for i := range apps { + basePort := 15000 + (i * 2) + senders[i] = &transmission.MockSender{} + app, graph := newStartedApp(t, senders[i], basePort, peers, false) + app.IncomingRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) { return "test", nil}) + app.PeerRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) { return "test", nil}) + apps[i] = app + defer startstop.Stop(graph.Objects(), nil) + + addrs[i] = "localhost:" + strconv.Itoa(basePort) + } + + // Deliver to host 1, it should be passed to host 0 and emitted there. + zEnc, _ := zstd.NewWriter(nil) + blob := zEnc.EncodeAll([]byte(`{"foo":"bar","trace.trace_id":"1"}`), nil) + req, err := http.NewRequest( + "POST", + "http://localhost:15002/1/events/dataset", + bytes.NewReader(blob), + ) + assert.NoError(t, err) + req.Header.Set("X-Honeycomb-Team", nonLegacyAPIKey) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Encoding", "zstd") + req.Header.Set("X-Honeycomb-Event-Time", now.Format(time.RFC3339Nano)) + req.Header.Set("X-Honeycomb-Samplerate", "10") + + post(t, req) + assert.Eventually(t, func() bool { + return len(senders[0].Events()) == 1 + }, 2*time.Second, 2*time.Millisecond) + + assert.Equal( + t, + &transmission.Event{ + APIKey: nonLegacyAPIKey, + Dataset: "dataset", + SampleRate: 10, + APIHost: "http://api.honeycomb.io", + Timestamp: now, + Data: map[string]interface{}{ + "trace.trace_id": "1", + "foo": "bar", + }, + }, + senders[0].Events()[0], + ) + + // Repeat, but deliver to host 1 on the peer channel, it should not be + // passed to host 0. + + blob = blob[:0] + buf := bytes.NewBuffer(blob) + gz := gzip.NewWriter(buf) + gz.Write([]byte(`{"foo":"bar","trace.trace_id":"1"}`)) + gz.Close() + + req, err = http.NewRequest( + "POST", + "http://localhost:15003/1/events/dataset", + buf, + ) + assert.NoError(t, err) + req.Header.Set("X-Honeycomb-Team", nonLegacyAPIKey) req.Header.Set("Content-Type", "application/json") req.Header.Set("Content-Encoding", "gzip") req.Header.Set("X-Honeycomb-Event-Time", now.Format(time.RFC3339Nano)) @@ -471,7 +619,7 @@ func TestEventsEndpoint(t *testing.T) { assert.Equal( t, &transmission.Event{ - APIKey: "KEY", + APIKey: nonLegacyAPIKey, Dataset: "dataset", SampleRate: 10, APIHost: "http://api.honeycomb.io", @@ -553,7 +701,7 @@ func BenchmarkTraces(b *testing.B) { nil, ) assert.NoError(b, err) - req.Header.Set("X-Honeycomb-Team", "KEY") + req.Header.Set("X-Honeycomb-Team", legacyAPIKey) req.Header.Set("Content-Type", "application/json") b.Run("single", func(b *testing.B) { @@ -656,7 +804,7 @@ func BenchmarkDistributedTraces(b *testing.B) { nil, ) assert.NoError(b, err) - req.Header.Set("X-Honeycomb-Team", "KEY") + req.Header.Set("X-Honeycomb-Team", legacyAPIKey) req.Header.Set("Content-Type", "application/json") b.Run("single", func(b *testing.B) { diff --git a/collect/collect.go b/collect/collect.go index ec2bb31b78..07bd273721 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -17,6 +17,7 @@ import ( "github.com/honeycombio/refinery/sample" "github.com/honeycombio/refinery/transmit" "github.com/honeycombio/refinery/types" + "github.com/sirupsen/logrus" ) var ErrWouldBlock = errors.New("not adding span, channel buffer is full") @@ -437,10 +438,21 @@ func (i *InMemCollector) send(trace *types.Trace) { var sampler sample.Sampler var found bool - if sampler, found = i.datasetSamplers[trace.Dataset]; !found { - sampler = i.SamplerFactory.GetSamplerImplementationForDataset(trace.Dataset) - // save sampler for later - i.datasetSamplers[trace.Dataset] = sampler + // get sampler key (dataset for legacy keys, environment for new keys) + samplerKey, isLegacyKey := trace.GetSamplerKey() + logFields := logrus.Fields{ + "trace_id": trace.TraceID, + } + if isLegacyKey { + logFields["dataset"] = samplerKey + } else { + logFields["environment"] = samplerKey + } + + // use sampler key to find sampler, crete and cache if not found + if sampler, found = i.datasetSamplers[samplerKey]; !found { + sampler = i.SamplerFactory.GetSamplerImplementationForDataset(samplerKey) + i.datasetSamplers[samplerKey] = sampler } // make sampling decision and update the trace @@ -458,16 +470,16 @@ func (i *InMemCollector) send(trace *types.Trace) { // if we're supposed to drop this trace, and dry run mode is not enabled, then we're done. if !shouldSend && !i.Config.GetIsDryRun() { i.Metrics.Increment("trace_send_dropped") - i.Logger.Info().WithString("trace_id", trace.TraceID).WithString("dataset", trace.Dataset).Logf("Dropping trace because of sampling, trace to dataset") + i.Logger.Info().WithFields(logFields).Logf("Dropping trace because of sampling") return } i.Metrics.Increment("trace_send_kept") // ok, we're not dropping this trace; send all the spans if i.Config.GetIsDryRun() && !shouldSend { - i.Logger.Info().WithString("trace_id", trace.TraceID).WithString("dataset", trace.Dataset).Logf("Trace would have been dropped, but dry run mode is enabled") + i.Logger.Info().WithFields(logFields).Logf("Trace would have been dropped, but dry run mode is enabled") } - i.Logger.Info().WithString("trace_id", trace.TraceID).WithString("dataset", trace.Dataset).Logf("Sending trace to dataset") + i.Logger.Info().WithFields(logFields).Logf("Sending trace") for _, sp := range trace.GetSpans() { if sp.SampleRate < 1 { sp.SampleRate = 1 diff --git a/collect/collect_test.go b/collect/collect_test.go index 2d8b62aeff..d0ddfbf684 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -21,6 +21,8 @@ import ( "github.com/honeycombio/refinery/types" ) +const legacyAPIKey = "***REMOVED***" + // TestAddRootSpan tests that adding a root span winds up with a trace object in // the cache and that that trace gets sent func TestAddRootSpan(t *testing.T) { @@ -62,6 +64,7 @@ func TestAddRootSpan(t *testing.T) { TraceID: traceID1, Event: types.Event{ Dataset: "aoeu", + APIKey: legacyAPIKey, }, } coll.AddSpan(span) @@ -81,6 +84,7 @@ func TestAddRootSpan(t *testing.T) { TraceID: traceID2, Event: types.Event{ Dataset: "aoeu", + APIKey: legacyAPIKey, }, } coll.AddSpanFromPeer(span) @@ -138,6 +142,7 @@ func TestAddSpan(t *testing.T) { Data: map[string]interface{}{ "trace.parent_id": "unused", }, + APIKey: legacyAPIKey, }, } coll.AddSpanFromPeer(span) @@ -150,6 +155,7 @@ func TestAddSpan(t *testing.T) { Event: types.Event{ Dataset: "aoeu", Data: map[string]interface{}{}, + APIKey: legacyAPIKey, }, } coll.AddSpan(rootSpan) @@ -216,7 +222,8 @@ func TestDryRunMode(t *testing.T) { span := &types.Span{ TraceID: traceID1, Event: types.Event{ - Data: map[string]interface{}{}, + Data: map[string]interface{}{}, + APIKey: legacyAPIKey, }, } coll.AddSpan(span) @@ -239,6 +246,7 @@ func TestDryRunMode(t *testing.T) { Data: map[string]interface{}{ "trace.parent_id": "unused", }, + APIKey: legacyAPIKey, }, } coll.AddSpanFromPeer(span) @@ -248,7 +256,8 @@ func TestDryRunMode(t *testing.T) { span = &types.Span{ TraceID: traceID2, Event: types.Event{ - Data: map[string]interface{}{}, + Data: map[string]interface{}{}, + APIKey: legacyAPIKey, }, } coll.AddSpanFromPeer(span) @@ -264,7 +273,8 @@ func TestDryRunMode(t *testing.T) { span = &types.Span{ TraceID: traceID3, Event: types.Event{ - Data: map[string]interface{}{}, + Data: map[string]interface{}{}, + APIKey: legacyAPIKey, }, } coll.AddSpan(span) @@ -314,6 +324,7 @@ func TestCacheSizeReload(t *testing.T) { Data: map[string]interface{}{ "trace.parent_id": "1", }, + APIKey: legacyAPIKey, } coll.AddSpan(&types.Span{TraceID: "1", Event: event}) @@ -388,6 +399,7 @@ func TestSampleConfigReload(t *testing.T) { TraceID: "1", Event: types.Event{ Dataset: dataset, + APIKey: legacyAPIKey, }, } @@ -415,6 +427,7 @@ func TestSampleConfigReload(t *testing.T) { TraceID: "2", Event: types.Event{ Dataset: dataset, + APIKey: legacyAPIKey, }, } @@ -469,6 +482,7 @@ func TestMaxAlloc(t *testing.T) { "trace.parent_id": "unused", "id": i, }, + APIKey: legacyAPIKey, }, } coll.AddSpan(span) @@ -555,6 +569,7 @@ func TestAddSpanNoBlock(t *testing.T) { TraceID: "1", Event: types.Event{ Dataset: "aoeu", + APIKey: legacyAPIKey, }, } diff --git a/config/config.go b/config/config.go index 989ae9f815..4028535348 100644 --- a/config/config.go +++ b/config/config.go @@ -137,4 +137,6 @@ type Config interface { GetDryRunFieldName() string GetAddHostMetadataToTrace() bool + + GetEnvironmentCacheTTL() time.Duration } diff --git a/config/config_test.go b/config/config_test.go index b2910a8457..fb2d7e2a46 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -208,6 +208,10 @@ func TestReadDefaults(t *testing.T) { t.Error("received", d, "expected", false) } + if d := c.GetEnvironmentCacheTTL(); d != time.Hour { + t.Error("received", d, "expected", time.Hour) + } + d, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist") assert.NoError(t, err) assert.IsType(t, &DeterministicSamplerConfig{}, d) diff --git a/config/file_config.go b/config/file_config.go index bf6e19dc91..13484922b3 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -48,6 +48,7 @@ type configContents struct { PeerManagement PeerManagementConfig `validate:"required"` InMemCollector InMemoryCollectorCacheCapacity `validate:"required"` AddHostMetadataToTrace bool + EnvironmentCacheTTL time.Duration } type InMemoryCollectorCacheCapacity struct { @@ -125,6 +126,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("HoneycombLogger.LoggerSamplerEnabled", false) c.SetDefault("HoneycombLogger.LoggerSamplerThroughput", 5) c.SetDefault("AddHostMetadataToTrace", false) + c.SetDefault("EnvironmentCacheTTL", time.Hour) c.SetConfigFile(config) err := c.ReadInConfig() @@ -727,3 +729,10 @@ func (f *fileConfig) GetAddHostMetadataToTrace() bool { return f.conf.AddHostMetadataToTrace } + +func (f *fileConfig) GetEnvironmentCacheTTL() time.Duration { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.EnvironmentCacheTTL +} diff --git a/config/mock.go b/config/mock.go index 35a70a5846..76793eabb6 100644 --- a/config/mock.go +++ b/config/mock.go @@ -70,6 +70,7 @@ type MockConfig struct { DryRun bool DryRunFieldName string AddHostMetadataToTrace bool + EnvironmentCacheTTL time.Duration Mux sync.RWMutex } @@ -320,3 +321,10 @@ func (m *MockConfig) GetAddHostMetadataToTrace() bool { return m.AddHostMetadataToTrace } + +func (f *MockConfig) GetEnvironmentCacheTTL() time.Duration { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.EnvironmentCacheTTL +} diff --git a/config_complete.toml b/config_complete.toml index f8de3b3674..0401a4c902 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -98,6 +98,13 @@ PeerBufferSize = 10000 # Not eligible for live reload AddHostMetadataToTrace = false +# EnvironmentCacheTTL is the amount of time a cache entry will live that associates +# an API key with an environment name. +# Cache misses lookup the environment name using HoneycombAPI config value. +# Default is 1 hour ("1h"). +# Not eligible for live reload. +EnvironmentCacheTTL = "1h" + ############################ ## Implementation Choices ## ############################ diff --git a/go.mod b/go.mod index 11b70e3df0..b31953fbba 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.6.0 + github.com/honeycombio/husky v0.9.0 github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 diff --git a/go.sum b/go.sum index 8e415b6989..6cfaaba06d 100644 --- a/go.sum +++ b/go.sum @@ -259,8 +259,8 @@ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.6.0 h1:VufNrLZoVMqDZrj8hHIF6izh/6LbpYfPPaKeGttXMII= -github.com/honeycombio/husky v0.6.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= +github.com/honeycombio/husky v0.9.0 h1:TppxWwGCZb54qwHuPRAkxhht4b3btFcM2OvV1/Zs3/s= +github.com/honeycombio/husky v0.9.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index df6a2ecf59..6b75b29acd 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -13,7 +13,7 @@ import ( func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { ri := huskyotlp.GetRequestInfoFromHttpHeaders(req.Header) - if err := ri.ValidateHeaders(); err != nil { + if err := ri.ValidateTracesHeaders(); err != nil { if errors.Is(err, huskyotlp.ErrInvalidContentType) { router.handlerReturnWithError(w, ErrInvalidContentType, err) } else { @@ -28,23 +28,23 @@ func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { return } - if err := processTraceRequest(req.Context(), router, result.Events, ri.ApiKey, ri.Dataset); err != nil { + if err := processTraceRequest(req.Context(), router, result.Batches, ri.ApiKey); err != nil { router.handlerReturnWithError(w, ErrUpstreamFailed, err) } } func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { ri := huskyotlp.GetRequestInfoFromGrpcMetadata(ctx) - if err := ri.ValidateHeaders(); err != nil { + if err := ri.ValidateTracesHeaders(); err != nil { return nil, huskyotlp.AsGRPCError(err) } - result, err := huskyotlp.TranslateTraceRequest(req) + result, err := huskyotlp.TranslateTraceRequest(req, ri) if err != nil { return nil, huskyotlp.AsGRPCError(err) } - if err := processTraceRequest(ctx, router, result.Events, ri.ApiKey, ri.Dataset); err != nil { + if err := processTraceRequest(ctx, router, result.Batches, ri.ApiKey); err != nil { return nil, huskyotlp.AsGRPCError(err) } @@ -54,9 +54,8 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac func processTraceRequest( ctx context.Context, router *Router, - batch []huskyotlp.Event, - apiKey string, - datasetName string) error { + batches []huskyotlp.Batch, + apiKey string) error { var requestID types.RequestIDContextKey apiHost, err := router.Config.GetHoneycombAPI() @@ -65,18 +64,27 @@ func processTraceRequest( return err } - for _, ev := range batch { - event := &types.Event{ - Context: ctx, - APIHost: apiHost, - APIKey: apiKey, - Dataset: datasetName, - SampleRate: uint(ev.SampleRate), - Timestamp: ev.Timestamp, - Data: ev.Attributes, - } - if err = router.processEvent(event, requestID); err != nil { - router.Logger.Error().Logf("Error processing event: " + err.Error()) + // get environment name - will be empty for legacy keys + environment, err := router.getEnvironmentName(apiKey) + if err != nil { + return nil + } + + for _, batch := range batches { + for _, ev := range batch.Events { + event := &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + Dataset: batch.Dataset, + Environment: environment, + SampleRate: uint(ev.SampleRate), + Timestamp: ev.Timestamp, + Data: ev.Attributes, + } + if err = router.processEvent(event, requestID); err != nil { + router.Logger.Error().Logf("Error processing event: " + err.Error()) + } } } diff --git a/route/otlp_trace_test.go b/route/otlp_trace_test.go index 9a9381bfce..a3f13eb877 100644 --- a/route/otlp_trace_test.go +++ b/route/otlp_trace_test.go @@ -21,12 +21,15 @@ import ( "github.com/stretchr/testify/assert" collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" common "go.opentelemetry.io/proto/otlp/common/v1" + resource "go.opentelemetry.io/proto/otlp/resource/v1" trace "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/grpc/metadata" ) +const legacyAPIKey = "***REMOVED***" + func TestOTLPHandler(t *testing.T) { - md := metadata.New(map[string]string{"x-honeycomb-team": "meow", "x-honeycomb-dataset": "ds"}) + md := metadata.New(map[string]string{"x-honeycomb-team": legacyAPIKey, "x-honeycomb-dataset": "ds"}) ctx := metadata.NewIncomingContext(context.Background(), md) mockMetrics := metrics.MockMetrics{} @@ -45,8 +48,9 @@ func TestOTLPHandler(t *testing.T) { Logger: &logger.MockLogger{}, incomingOrPeer: "incoming", }, - Logger: &logger.MockLogger{}, - zstdDecoders: decoders, + Logger: &logger.MockLogger{}, + zstdDecoders: decoders, + environmentCache: newEnvironmentCache(time.Second, nil), } conf := &config.MockConfig{ @@ -200,7 +204,7 @@ func TestOTLPHandler(t *testing.T) { request, _ := http.NewRequest("POST", "/v1/traces", strings.NewReader(string(body))) request.Header = http.Header{} request.Header.Set("content-type", "application/protobuf") - request.Header.Set("x-honeycomb-team", "apikey") + request.Header.Set("x-honeycomb-team", legacyAPIKey) request.Header.Set("x-honeycomb-dataset", "dataset") w := httptest.NewRecorder() @@ -236,7 +240,7 @@ func TestOTLPHandler(t *testing.T) { request.Header = http.Header{} request.Header.Set("content-type", "application/protobuf") request.Header.Set("content-encoding", "gzip") - request.Header.Set("x-honeycomb-team", "apikey") + request.Header.Set("x-honeycomb-team", legacyAPIKey) request.Header.Set("x-honeycomb-dataset", "dataset") w := httptest.NewRecorder() @@ -275,7 +279,7 @@ func TestOTLPHandler(t *testing.T) { request.Header = http.Header{} request.Header.Set("content-type", "application/protobuf") request.Header.Set("content-encoding", "zstd") - request.Header.Set("x-honeycomb-team", "apikey") + request.Header.Set("x-honeycomb-team", legacyAPIKey) request.Header.Set("x-honeycomb-dataset", "dataset") w := httptest.NewRecorder() @@ -290,7 +294,7 @@ func TestOTLPHandler(t *testing.T) { request, _ := http.NewRequest("POST", "/v1/traces", strings.NewReader("{}")) request.Header = http.Header{} request.Header.Set("content-type", "application/json") - request.Header.Set("x-honeycomb-team", "apikey") + request.Header.Set("x-honeycomb-team", legacyAPIKey) request.Header.Set("x-honeycomb-dataset", "dataset") w := httptest.NewRecorder() @@ -301,6 +305,68 @@ func TestOTLPHandler(t *testing.T) { assert.Equal(t, 0, len(mockTransmission.Events)) mockTransmission.Flush() }) + + t.Run("events created with legacy keys use dataset header", func(t *testing.T) { + md := metadata.New(map[string]string{"x-honeycomb-team": legacyAPIKey, "x-honeycomb-dataset": "my-dataset"}) + ctx := metadata.NewIncomingContext(context.Background(), md) + + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + Resource: &resource.Resource{ + Attributes: []*common.KeyValue{ + {Key: "service.name", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "my-service"}}}, + }, + }, + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: []*trace.Span{{ + Name: "my-span", + }}, + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + assert.Equal(t, 1, len(mockTransmission.Events)) + event := mockTransmission.Events[0] + assert.Equal(t, "my-dataset", event.Dataset) + assert.Equal(t, "", event.Environment) + mockTransmission.Flush() + }) + + t.Run("events created with non-legacy keys lookup and use envionment name", func(t *testing.T) { + apiKey := "my-api-key" + md := metadata.New(map[string]string{"x-honeycomb-team": apiKey}) + ctx := metadata.NewIncomingContext(context.Background(), md) + + // add cached environment lookup + router.environmentCache.addItem(apiKey, "local", time.Minute) + + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + Resource: &resource.Resource{ + Attributes: []*common.KeyValue{ + {Key: "service.name", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "my-service"}}}, + }, + }, + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + Spans: []*trace.Span{{ + Name: "my-span", + }}, + }}, + }}, + } + _, err := router.Export(ctx, req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + assert.Equal(t, 1, len(mockTransmission.Events)) + event := mockTransmission.Events[0] + assert.Equal(t, "my-service", event.Dataset) + assert.Equal(t, "local", event.Environment) + mockTransmission.Flush() + }) } func helperOTLPRequestSpansWithoutStatus() []*trace.Span { diff --git a/route/route.go b/route/route.go index d0f7ee8005..ab39b05c03 100644 --- a/route/route.go +++ b/route/route.go @@ -12,6 +12,7 @@ import ( "math" "net" "net/http" + "net/url" "strconv" "sync" "time" @@ -80,6 +81,8 @@ type Router struct { // used to identify Router as a OTLP TraceServer collectortrace.UnimplementedTraceServiceServer + + environmentCache *environmentCache } type BatchResponse struct { @@ -123,6 +126,7 @@ func (r *Router) LnS(incomingOrPeer string) { Timeout: time.Second * 10, Transport: r.HTTPTransport, } + r.environmentCache = newEnvironmentCache(r.Config.GetEnvironmentCacheTTL(), r.lookupEnvironment) var err error r.zstdDecoders, err = makeDecoders(numZstdDecoders) @@ -309,6 +313,13 @@ func (r *Router) requestToEvent(req *http.Request, reqBod []byte) (*types.Event, if err != nil { return nil, err } + + // get environment name - will be empty for legacy keys + environment, err := r.getEnvironmentName(apiKey) + if err != nil { + return nil, err + } + data := map[string]interface{}{} err = unmarshal(req, bytes.NewReader(reqBod), &data) if err != nil { @@ -316,13 +327,14 @@ func (r *Router) requestToEvent(req *http.Request, reqBod []byte) (*types.Event, } return &types.Event{ - Context: req.Context(), - APIHost: apiHost, - APIKey: apiKey, - Dataset: dataset, - SampleRate: uint(sampleRate), - Timestamp: eventTime, - Data: data, + Context: req.Context(), + APIHost: apiHost, + APIKey: apiKey, + Dataset: dataset, + Environment: environment, + SampleRate: uint(sampleRate), + Timestamp: eventTime, + Data: data, }, nil } @@ -353,9 +365,20 @@ func (r *Router) batch(w http.ResponseWriter, req *http.Request) { return } + apiKey := req.Header.Get(types.APIKeyHeader) + if apiKey == "" { + apiKey = req.Header.Get(types.APIKeyHeaderShort) + } + + // get environment name - will be empty for legacy keys + environment, err := r.getEnvironmentName(apiKey) + if err != nil { + r.handlerReturnWithError(w, ErrReqToEvent, err) + } + batchedResponses := make([]*BatchResponse, 0, len(batchedEvents)) for _, bev := range batchedEvents { - ev, err := r.batchedEventToEvent(req, bev) + ev, err := r.batchedEventToEvent(req, bev, apiKey, environment) if err != nil { batchedResponses = append( batchedResponses, @@ -395,7 +418,8 @@ func (r *Router) processEvent(ev *types.Event, reqID interface{}) error { debugLog := r.iopLogger.Debug(). WithField("request_id", reqID). WithString("api_host", ev.APIHost). - WithString("dataset", ev.Dataset) + WithString("dataset", ev.Dataset). + WithString("environment", ev.Environment) // extract trace ID, route to self or peer, pass on to collector // TODO make trace ID field configurable @@ -491,12 +515,7 @@ func (r *Router) getMaybeCompressedBody(req *http.Request) (io.Reader, error) { return reader, nil } -func (r *Router) batchedEventToEvent(req *http.Request, bev batchedEvent) (*types.Event, error) { - apiKey := req.Header.Get(types.APIKeyHeader) - if apiKey == "" { - apiKey = req.Header.Get(types.APIKeyHeaderShort) - } - +func (r *Router) batchedEventToEvent(req *http.Request, bev batchedEvent, apiKey string, environment string) (*types.Event, error) { sampleRate := bev.SampleRate if sampleRate == 0 { sampleRate = 1 @@ -511,13 +530,14 @@ func (r *Router) batchedEventToEvent(req *http.Request, bev batchedEvent) (*type return nil, err } return &types.Event{ - Context: req.Context(), - APIHost: apiHost, - APIKey: apiKey, - Dataset: dataset, - SampleRate: uint(sampleRate), - Timestamp: eventTime, - Data: bev.Data, + Context: req.Context(), + APIHost: apiHost, + APIKey: apiKey, + Dataset: dataset, + Environment: environment, + SampleRate: uint(sampleRate), + Timestamp: eventTime, + Data: bev.Data, }, nil } @@ -626,3 +646,129 @@ func getFirstValueFromMetadata(key string, md metadata.MD) string { } return "" } + +type environmentCache struct { + mutex sync.RWMutex + items map[string]*cacheItem + ttl time.Duration + getFn func(string) (string, error) +} + +func (r *Router) SetEnvironmentCache(ttl time.Duration, getFn func(string)(string, error)) { + r.environmentCache = newEnvironmentCache(ttl, getFn) +} + +func newEnvironmentCache(ttl time.Duration, getFn func(string)(string, error)) *environmentCache { + return &environmentCache{ + items: make(map[string]*cacheItem), + ttl: ttl, + getFn: getFn, + } +} + +type cacheItem struct { + expiresAt time.Time + value string +} + +// get queries the cached items, returning cache hits that have not expired. +// Cache missed use the configured getFn to populate the cache. +func (c *environmentCache) get(key string) (string, error) { + if item, ok := c.items[key]; ok { + if time.Now().Before(item.expiresAt) { + return item.value, nil + } + } + + // get write lock early so we don't execute getFn in parallel so the + // the result will be cached before the next lock is aquired to prevent + // subsequent calls to getFn for the same key + c.mutex.Lock() + defer c.mutex.Unlock() + + // check if the cache has been populated while waiting for a write lock + if item, ok := c.items[key]; ok { + if time.Now().Before(item.expiresAt) { + return item.value, nil + } + } + + val, err := c.getFn(key) + if err != nil { + return "", err + } + + c.addItem(key, val, c.ttl) + return val, nil +} + +// addItem create a new cache entry in the environment cache. +// This is not thread-safe, and should only be used in tests +func (c *environmentCache) addItem(key string, value string, ttl time.Duration) { + c.items[key] = &cacheItem{ + expiresAt: time.Now().Add(ttl), + value: value, + } +} + +type SlugInfo struct { + Slug string `json:"slug"` +} + +type AuthInfo struct { + APIKeyAccess map[string]bool `json:"api_key_access"` + Team SlugInfo `json:"team"` + Environment SlugInfo `json:"environment"` +} + +func (r *Router) getEnvironmentName(apiKey string) (string, error) { + if apiKey == "" || types.IsLegacyAPIKey(apiKey) { + return "", nil + } + + env, err := r.environmentCache.get(apiKey) + if err != nil { + return "", err + } + return env, nil +} + +func (r *Router) lookupEnvironment(apiKey string) (string, error) { + apiEndpoint, err := r.Config.GetHoneycombAPI() + if err != nil { + return "", fmt.Errorf("failed to read Honeycomb API config value. %w", err) + } + authURL, err := url.Parse(apiEndpoint) + if err != nil { + return "", fmt.Errorf("failed to parse Honeycomb API URL config value. %w", err) + } + + authURL.Path = "/1/auth" + req, err := http.NewRequest("GET", authURL.String(), nil) + if err != nil { + return "", fmt.Errorf("failed to create AuthInfo request. %w", err) + } + + req.Header.Set("x-Honeycomb-team", apiKey) + + r.Logger.Debug().WithString("api_key", apiKey).WithString("endpoint", authURL.String()).Logf("Attempting to get environment name using API key") + resp, err := r.proxyClient.Do(req) + if err != nil { + return "", fmt.Errorf("failed sending AuthInfo request to Honeycomb API. %w", err) + } + defer resp.Body.Close() + + switch { + case resp.StatusCode == http.StatusUnauthorized: + return "", fmt.Errorf("received 401 response for AuthInfo request from Honeycomb API - check your API key") + case resp.StatusCode > 299: + return "", fmt.Errorf("received %d response for AuthInfo request from Honeycomb API", resp.StatusCode) + } + + authinfo := AuthInfo{} + if err := json.NewDecoder(resp.Body).Decode(&authinfo); err != nil { + return "", fmt.Errorf("failed to JSON decode of AuthInfo response from Honeycomb API") + } + r.Logger.Debug().WithString("environment", authinfo.Environment.Slug).Logf("Got environment") + return authinfo.Environment.Slug, nil +} diff --git a/route/route_test.go b/route/route_test.go index 47b90364fe..c0932cbc27 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -3,6 +3,7 @@ package route import ( "bytes" "compress/gzip" + "errors" "fmt" "io" "io/ioutil" @@ -356,3 +357,71 @@ type TestShard struct { func (s *TestShard) Equals(other sharder.Shard) bool { return true } func (s *TestShard) GetAddress() string { return s.addr } + +func TestEnvironmentCache(t *testing.T) { + t.Run("calls getFn on cache miss", func(t *testing.T) { + cache := newEnvironmentCache(time.Second, func(key string) (string, error) { + if key != "key" { + t.Errorf("expected %s - got %s", "key", key) + } + return "test", nil + }) + + val, err := cache.get("key") + if err != nil { + t.Errorf("got error calling getOrSet - %e", err) + } + if val != "test" { + t.Errorf("expected %s - got %s", "test", val) + } + }) + + t.Run("does not call getFn on cache hit", func(t *testing.T) { + cache := newEnvironmentCache(time.Second, func(key string) (string, error) { + t.Errorf("should not have called getFn") + return "", nil + }) + cache.addItem("key", "value", time.Second) + + val, err := cache.get("key") + if err != nil { + t.Errorf("got error calling getOrSet - %e", err) + } + if val != "value" { + t.Errorf("expected %s - got %s", "value", val) + } + }) + + t.Run("ignores expired items", func(t *testing.T) { + called := false + cache := newEnvironmentCache(time.Millisecond, func(key string) (string, error) { + called = true + return "value", nil + }) + cache.addItem("key", "value", time.Millisecond) + time.Sleep(time.Millisecond * 5) + + val, err := cache.get("key") + if err != nil { + t.Errorf("got error calling getOrSet - %e", err) + } + if val != "value" { + t.Errorf("expected %s - got %s", "value", val) + } + if !called { + t.Errorf("expected to call getFn") + } + }) + + t.Run("errors returned from getFn are propagated", func(t *testing.T) { + expectedErr := errors.New("error") + cache := newEnvironmentCache(time.Second, func(key string) (string, error) { + return "", expectedErr + }) + + _, err := cache.get("key") + if err != expectedErr { + t.Errorf("expected %e - got %e", expectedErr, err) + } + }) +} diff --git a/types/event.go b/types/event.go index 0a1115155c..5eea293c1b 100644 --- a/types/event.go +++ b/types/event.go @@ -19,13 +19,14 @@ type RequestIDContextKey struct{} // event is not part of a trace - it's an event that showed up with no trace ID type Event struct { - Context context.Context - APIHost string - APIKey string - Dataset string - SampleRate uint - Timestamp time.Time - Data map[string]interface{} + Context context.Context + APIHost string + APIKey string + Dataset string + Environment string + SampleRate uint + Timestamp time.Time + Data map[string]interface{} } // Trace isn't something that shows up on the wire; it gets created within @@ -64,7 +65,22 @@ func (t *Trace) AddSpan(sp *Span) { // GetSpans returns the list of spans in this trace func (t *Trace) GetSpans() []*Span { return t.spans +} + +func (t *Trace) GetSamplerKey() (string, bool) { + if IsLegacyAPIKey(t.APIKey) { + return t.Dataset, true + } + env := "" + for _, sp := range t.GetSpans() { + if sp.Event.Environment != "" { + env = sp.Event.Environment + break + } + } + + return env, false } // Span is an event that shows up with a trace ID, so will be part of a Trace @@ -72,3 +88,7 @@ type Span struct { Event TraceID string } + +func IsLegacyAPIKey(apiKey string) bool { + return len(apiKey) == 32 +} From 653d9226eed6841a444aafe85a2f5e6a26bfda8d Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 18 Feb 2022 00:29:04 +0000 Subject: [PATCH 122/351] prepare v1.11.0 release (#405) --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cee33b1588..9c047c3173 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Refinery Changelog +## 1.11.0 2022-02-17 + +### Enhancements + +- Add Environment & Services support (#403) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) + +### Maintenance + +- docs: add helm charts step to releasing (#400) | [@vreynolds](https://github.com/vreynolds) + ## 1.10.0 2022-02-10 ### Enhancements From c7acf30cb07a5d75fad8efb5c6debf487a3bc9f4 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Tue, 22 Feb 2022 19:10:50 +0000 Subject: [PATCH 123/351] update aws-client orb to latest (#409) --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index af51ac8ddc..d3ed5bfac4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,7 +1,7 @@ version: 2.1 orbs: - aws-cli: circleci/aws-cli@1.3.0 + aws-cli: circleci/aws-cli@2.1.0 commands: go-build: From 89c37da501ebfb8a0876a8da9ca507c2a14fbbf7 Mon Sep 17 00:00:00 2001 From: JamieDanielson Date: Thu, 24 Feb 2022 14:06:46 -0500 Subject: [PATCH 124/351] feat: add support for env name from auth (#410) * add support for env name from auth * use only env name in auth call and cache closes [Support non-slugified environment names](https://github.com/honeycombio/refinery/issues/407) #407 * Environment name will be returned in auth call in addition to slug. * Environment slug is no longer supported in the rules file. If it becomes an issue and there is high demand for it, we can consider adding it as an option later. * Using the environment name instead of the slug is more intuitive for end users, as they set the environment name intentionally, not the slug. This can now be used in the rules config instead of worrying about the structure of the slugified version of the environment. --- route/route.go | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/route/route.go b/route/route.go index ab39b05c03..93f74e4155 100644 --- a/route/route.go +++ b/route/route.go @@ -650,18 +650,18 @@ func getFirstValueFromMetadata(key string, md metadata.MD) string { type environmentCache struct { mutex sync.RWMutex items map[string]*cacheItem - ttl time.Duration + ttl time.Duration getFn func(string) (string, error) } -func (r *Router) SetEnvironmentCache(ttl time.Duration, getFn func(string)(string, error)) { +func (r *Router) SetEnvironmentCache(ttl time.Duration, getFn func(string) (string, error)) { r.environmentCache = newEnvironmentCache(ttl, getFn) } -func newEnvironmentCache(ttl time.Duration, getFn func(string)(string, error)) *environmentCache { +func newEnvironmentCache(ttl time.Duration, getFn func(string) (string, error)) *environmentCache { return &environmentCache{ items: make(map[string]*cacheItem), - ttl: ttl, + ttl: ttl, getFn: getFn, } } @@ -681,7 +681,7 @@ func (c *environmentCache) get(key string) (string, error) { } // get write lock early so we don't execute getFn in parallel so the - // the result will be cached before the next lock is aquired to prevent + // the result will be cached before the next lock is aquired to prevent // subsequent calls to getFn for the same key c.mutex.Lock() defer c.mutex.Unlock() @@ -711,14 +711,19 @@ func (c *environmentCache) addItem(key string, value string, ttl time.Duration) } } -type SlugInfo struct { +type TeamInfo struct { Slug string `json:"slug"` } +type EnvironmentInfo struct { + Slug string `json:"slug"` + Name string `json:"name"` +} + type AuthInfo struct { APIKeyAccess map[string]bool `json:"api_key_access"` - Team SlugInfo `json:"team"` - Environment SlugInfo `json:"environment"` + Team TeamInfo `json:"team"` + Environment EnvironmentInfo `json:"environment"` } func (r *Router) getEnvironmentName(apiKey string) (string, error) { @@ -769,6 +774,6 @@ func (r *Router) lookupEnvironment(apiKey string) (string, error) { if err := json.NewDecoder(resp.Body).Decode(&authinfo); err != nil { return "", fmt.Errorf("failed to JSON decode of AuthInfo response from Honeycomb API") } - r.Logger.Debug().WithString("environment", authinfo.Environment.Slug).Logf("Got environment") - return authinfo.Environment.Slug, nil + r.Logger.Debug().WithString("environment", authinfo.Environment.Name).Logf("Got environment") + return authinfo.Environment.Name, nil } From 85d44328076eab25da0ff4d23140bd988e626f31 Mon Sep 17 00:00:00 2001 From: JamieDanielson Date: Thu, 24 Feb 2022 16:52:04 -0500 Subject: [PATCH 125/351] Prep v1.12.0 (#412) --- CHANGELOG.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c047c3173..b9f3876e37 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,22 @@ # Refinery Changelog +## 1.12.0 2022-02-24 + +### Enhancements + +- feat: add support for env name from auth (#410) | [@JamieDanielson](https://github.com/JamieDanielson) + +### Maintenance + +- update aws-client orb to latest (#409) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) + ## 1.11.0 2022-02-17 -### Enhancements +### Enhancements + +**Note: Environment & Services Support requires v1.12.0 and higher** + +Do **not** use this version with Environment & Services. - Add Environment & Services support (#403) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) From 14531d209a2db07cb7f3212873b56ec816df6064 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Thu, 24 Mar 2022 14:43:33 -0400 Subject: [PATCH 126/351] fix: error log event metadata (#422) aligned metadata attached to events with what we attach to error logs --- app/app_test.go | 25 +++++++++++++++++++++++++ transmit/transmit.go | 15 ++++++++++----- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/app/app_test.go b/app/app_test.go index 405ab633d9..5c464ca5f6 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -363,6 +363,11 @@ func TestPeerRouting(t *testing.T) { "long": "this is a test of the emergency broadcast system", "foo": "bar", }, + Metadata: map[string]string{ + "api_host": "http://api.honeycomb.io", + "dataset": "dataset", + "environment": "", + }, } assert.Equal(t, expectedEvent, senders[0].Events()[0]) @@ -482,6 +487,11 @@ func TestEventsEndpoint(t *testing.T) { "trace.trace_id": "1", "foo": "bar", }, + Metadata: map[string]string{ + "api_host": "http://api.honeycomb.io", + "dataset": "dataset", + "environment": "", + }, }, senders[0].Events()[0], ) @@ -524,6 +534,11 @@ func TestEventsEndpoint(t *testing.T) { "trace.trace_id": "1", "foo": "bar", }, + Metadata: map[string]string{ + "api_host": "http://api.honeycomb.io", + "dataset": "dataset", + "environment": "", + }, }, senders[1].Events()[0], ) @@ -586,6 +601,11 @@ func TestEventsEndpointWithNonLegacyKey(t *testing.T) { "trace.trace_id": "1", "foo": "bar", }, + Metadata: map[string]string{ + "api_host": "http://api.honeycomb.io", + "dataset": "dataset", + "environment": "test", + }, }, senders[0].Events()[0], ) @@ -628,6 +648,11 @@ func TestEventsEndpointWithNonLegacyKey(t *testing.T) { "trace.trace_id": "1", "foo": "bar", }, + Metadata: map[string]string{ + "api_host": "http://api.honeycomb.io", + "dataset": "dataset", + "environment": "test", + }, }, senders[1].Events()[0], ) diff --git a/transmit/transmit.go b/transmit/transmit.go index 38df5cffa9..30309c7a47 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -97,6 +97,12 @@ func (d *DefaultTransmission) EnqueueEvent(ev *types.Event) { libhEv.Dataset = ev.Dataset libhEv.SampleRate = ev.SampleRate libhEv.Timestamp = ev.Timestamp + // metadata is used to make error logs more helpful when processing libhoney responses + libhEv.Metadata = map[string]string{ + "api_host": ev.APIHost, + "dataset": ev.Dataset, + "environment": ev.Environment, + } for k, v := range ev.Data { libhEv.AddField(k, v) @@ -110,6 +116,7 @@ func (d *DefaultTransmission) EnqueueEvent(ev *types.Event) { WithField("request_id", ev.Context.Value(types.RequestIDContextKey{})). WithString("dataset", ev.Dataset). WithString("api_host", ev.APIHost). + WithString("environment", ev.Environment). Logf("failed to enqueue event") } } @@ -141,19 +148,17 @@ func (d *DefaultTransmission) processResponses( select { case r := <-responses: if r.Err != nil || r.StatusCode > 202 { - var apiHost, dataset, evType, target string + var apiHost, dataset, environment string if metadata, ok := r.Metadata.(map[string]string); ok { apiHost = metadata["api_host"] dataset = metadata["dataset"] - evType = metadata["type"] - target = metadata["target"] + environment = metadata["environment"] } log := d.Logger.Error().WithFields(map[string]interface{}{ "status_code": r.StatusCode, "api_host": apiHost, "dataset": dataset, - "event_type": evType, - "target": target, + "environment": environment, }) if r.Err != nil { log = log.WithField("error", r.Err.Error()) From d0ecf072f0e038182b55a15f39d0631a4c79a599 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Fri, 25 Mar 2022 12:23:30 +0530 Subject: [PATCH 127/351] Changes in exported payload --- .circleci/config.yml | 8 +-- .github/ISSUE_TEMPLATE/bug_report.md | 4 +- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- .github/ISSUE_TEMPLATE/question-discussion.md | 2 +- .../security-vulnerability-report.md | 4 +- .github/PULL_REQUEST_TEMPLATE.md | 2 +- .github/workflows/stale.yml | 4 +- .gitignore | 4 +- CHANGELOG.md | 20 +++--- CONTRIBUTING.md | 2 +- Dockerfile | 6 +- README.md | 58 ++++++++--------- app/app.go | 12 ++-- app/app_test.go | 41 ++++++------ build-docker.sh | 2 +- build-pkg.sh | 16 ++--- cmd/test_redimem/main.go | 2 +- cmd/{refinery => tracing-proxy}/main.go | 52 +++++++-------- collect/cache/cache.go | 6 +- collect/cache/cache_test.go | 7 ++- collect/collect.go | 14 ++--- collect/collect_benchmark_test.go | 15 ++--- collect/collect_test.go | 15 ++--- config.toml | 6 +- config/config.go | 10 +-- config/config_test.go | 45 ++++++------- config/config_test_reload_error_test.go | 3 +- config/file_config.go | 18 +++--- config_complete.toml | 25 ++++---- go.mod | 14 ++++- go.sum | 23 ++++--- internal/peer/file.go | 2 +- internal/peer/file_test.go | 3 +- internal/peer/peers.go | 2 +- internal/peer/peers_test.go | 3 +- internal/peer/redis.go | 12 ++-- internal/redimem/redimem.go | 2 +- logger/honeycomb.go | 20 +++--- logger/logger.go | 2 +- logger/logger_test.go | 3 +- logger/logrus.go | 2 +- logger/mock.go | 2 +- metrics/honeycomb.go | 24 +++---- metrics/metrics.go | 2 +- metrics/prometheus.go | 4 +- metrics/prometheus_test.go | 5 +- refinery.service | 6 +- refinery.upstart | 10 +-- route/errors.go | 2 +- route/errors_test.go | 3 +- route/middleware.go | 4 +- route/otlp_trace.go | 63 +++++++++++-------- route/otlp_trace_test.go | 10 +-- route/route.go | 18 +++--- route/route_test.go | 12 ++-- rules_complete.toml | 4 +- sample/deterministic.go | 6 +- sample/deterministic_test.go | 7 ++- sample/dynamic.go | 8 +-- sample/dynamic_ema.go | 8 +-- sample/dynamic_ema_test.go | 9 +-- sample/dynamic_test.go | 9 +-- sample/rules.go | 8 +-- sample/rules_test.go | 9 +-- sample/sample.go | 8 +-- sample/sample_test.go | 7 ++- sample/totalthroughput.go | 8 +-- sample/totalthroughput_test.go | 9 +-- sample/trace_key.go | 2 +- sample/trace_key_test.go | 3 +- service/debug/debug_service.go | 2 +- sharder/deterministic.go | 8 +-- sharder/deterministic_test.go | 7 ++- sharder/sharder.go | 6 +- sharder/single.go | 2 +- transmit/mock.go | 2 +- transmit/transmit.go | 20 +++--- transmit/transmit_test.go | 15 ++--- types/event.go | 22 ++++--- 79 files changed, 452 insertions(+), 395 deletions(-) rename cmd/{refinery => tracing-proxy}/main.go (81%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0b3d38d81e..ccf629c3d4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -21,8 +21,8 @@ commands: GOOS=<< parameters.os >> \ GOARCH=<< parameters.arch >> \ go build -ldflags "-X main.BuildID=${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" \ - -o $GOPATH/bin/refinery-<< parameters.os >>-<< parameters.arch >> \ - ./cmd/refinery + -o $GOPATH/bin/tracing-proxy-<< parameters.os >>-<< parameters.arch >> \ + ./cmd/tracing-proxy jobs: test: @@ -84,7 +84,7 @@ jobs: command: ./build-pkg.sh -m amd64 -v "${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" -t rpm && mv *.rpm ~/artifacts - run: name: copy_binaries - command: cp $GOPATH/bin/refinery-* ~/artifacts + command: cp $GOPATH/bin/tracing-proxy-* ~/artifacts - run: echo "finished builds" && find ~/artifacts -ls - persist_to_workspace: root: ~/ @@ -120,7 +120,7 @@ jobs: command: | version=${CIRCLE_TAG:1} if [[ -z "$version" ]] ; then version=${CIRCLE_SHA1:0:7}; fi - aws s3 sync ~/artifacts s3://honeycomb-builds/honeycombio/refinery/$version/ + aws s3 sync ~/artifacts s3://honeycomb-builds/honeycombio/tracing-proxy/$version/ build_docker: docker: diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 9cd88bd5fe..5e73f80fc7 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -12,13 +12,13 @@ Thank you for taking the time to report bugs! We love code snippets and links to repositories that reproduce the issue, but understand if you don't have the time to add them. We'll do our best with the info you provide, and might ask follow-up questions. -Please see our [OSS process document](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md#) to get an idea of how we operate. +Please see our [OSS process document](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md#) to get an idea of how we operate. ---> **Versions** - Go: -- Refinery: +- tracing-proxy: **Steps to reproduce** diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 457405f3e6..f8ecf25263 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -10,7 +10,7 @@ assignees: '' **Is your feature request related to a problem? Please describe.** diff --git a/.github/ISSUE_TEMPLATE/question-discussion.md b/.github/ISSUE_TEMPLATE/question-discussion.md index 63cc4a175c..3aa8920cb0 100644 --- a/.github/ISSUE_TEMPLATE/question-discussion.md +++ b/.github/ISSUE_TEMPLATE/question-discussion.md @@ -10,5 +10,5 @@ assignees: '' diff --git a/.github/ISSUE_TEMPLATE/security-vulnerability-report.md b/.github/ISSUE_TEMPLATE/security-vulnerability-report.md index 41337972a3..9efc7f698c 100644 --- a/.github/ISSUE_TEMPLATE/security-vulnerability-report.md +++ b/.github/ISSUE_TEMPLATE/security-vulnerability-report.md @@ -10,12 +10,12 @@ assignees: '' **Versions** - Go: -- Refinery: +- tracing-proxy: **Description** diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index fd8fd3b3f1..429aebf7fd 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -7,7 +7,7 @@ Please make sure to: - Add unit tests - Mention any relevant issues in the PR description (e.g. "Fixes #123") -Please see our [OSS process document](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md#) to get an idea of how we operate. +Please see our [OSS process document](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md#) to get an idea of how we operate. --> ## Which problem is this PR solving? diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index d4a449e250..5a4d4373c9 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -17,8 +17,8 @@ jobs: start-date: '2021-09-01T00:00:00Z' stale-issue-message: 'Marking this issue as stale because it has been open 14 days with no activity. Please add a comment if this is still an ongoing issue; otherwise this issue will be automatically closed in 7 days.' stale-pr-message: 'Marking this PR as stale because it has been open 30 days with no activity. Please add a comment if this PR is still relevant; otherwise this PR will be automatically closed in 7 days.' - close-issue-message: 'Closing this issue due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' - close-pr-message: 'Closing this PR due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' + close-issue-message: 'Closing this issue due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' + close-pr-message: 'Closing this PR due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' days-before-issue-stale: 14 days-before-pr-stale: 30 days-before-issue-close: 7 diff --git a/.gitignore b/.gitignore index 76638ae4d0..7513609170 100644 --- a/.gitignore +++ b/.gitignore @@ -11,7 +11,7 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out -refinery -!/cmd/refinery +tracing-proxy +!/cmd/tracing-proxy test_redimem !/cmd/test_redimem diff --git a/CHANGELOG.md b/CHANGELOG.md index 1964778750..844acf48f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,11 @@ -# Refinery Changelog +# tracing-proxy Changelog ## 1.8.1 2022-01-06 ### Maintenance - Add re-triage workflow (#368) | [@vreynolds](https://github.com/vreynolds) -- Bump libhoney & golang (#373) | [@lizthegrey](https://github.com/lizthegrey) +- Bump libtrace & golang (#373) | [@lizthegrey](https://github.com/lizthegrey) - Bump github.com/honeycombio/husky from 0.5.0 to 0.6.0 (#370) - Bump github.com/prometheus/client_golang from 0.9.4 to 1.11.0 (#357) @@ -42,7 +42,7 @@ ### Fixes -- bump libhoney-go to v1.15.6 +- bump libtrace-go to v1.15.6 - empower apply-labels action to apply labels (#344) - Bump github.com/honeycombio/libhoney-go from 1.15.4 to 1.15.5 (#327) - Re-add missing docker login when publishing (#338) @@ -104,7 +104,7 @@ ### Added -- Add support for OTLP over HTTP/protobuf [#279](https://github.com/honeycombio/refinery/pull/279) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) +- Add support for OTLP over HTTP/protobuf [#279](https://github.com/jirs5/tracing-proxy/pull/279) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) ### Maintenance @@ -120,11 +120,11 @@ ### Added -- Add support to "does-not-contain" operator on RulesBasedSampler [#267](https://github.com/honeycombio/refinery/pull/267) | [@tr-fteixeira](https://github.com/tr-fteixeira) +- Add support to "does-not-contain" operator on RulesBasedSampler [#267](https://github.com/jirs5/tracing-proxy/pull/267) | [@tr-fteixeira](https://github.com/tr-fteixeira) ### Fixes -- Ensure span links and events generate events and get resource attrs [#264](https://github.com/honeycombio/refinery/pull/264) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) +- Ensure span links and events generate events and get resource attrs [#264](https://github.com/jirs5/tracing-proxy/pull/264) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) ## 1.2.1 @@ -137,7 +137,7 @@ ### Added - Add `UseTLSInsecure` config option to skip TLS verification with Redis (#254) | [@beanieboi](https://github.com/beanieboi) -- Add `AddHostMetadataToTrace` config option to add Refinery hostname information to spans (#250) | [@jharley](https://github.com/jharley) +- Add `AddHostMetadataToTrace` config option to add tracing-proxy hostname information to spans (#250) | [@jharley](https://github.com/jharley) - Additional config validation: verify that sample rate trace field key is specified, if needed (#248) | [@paulosman](https://github.com/paulosman) ### Changed @@ -163,7 +163,7 @@ ### Fixes -- Refinery startup issues in v1.1.0 +- tracing-proxy startup issues in v1.1.0 ## 1.1.0 @@ -175,7 +175,7 @@ ### Fixes -- Pass along upstream and peer metrics configs to libhoney (#227) +- Pass along upstream and peer metrics configs to libtrace (#227) - Guard against nil pointer dereference when processing OTLP span.Status (#223) - Fix YAML config parsing (#220) @@ -185,4 +185,4 @@ ## 1.0.0 -Initial GA release of Refinery +Initial GA release of tracing-proxy diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0cc0b86141..9af4c28183 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,3 @@ # Contributing Guide -Please see our [general guide for OSS lifecycle and practices.](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) +Please see our [general guide for OSS lifecycle and practices.](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) diff --git a/Dockerfile b/Dockerfile index 0474fa666f..ca4bb18cc0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,11 +17,11 @@ RUN CGO_ENABLED=0 \ GOOS=linux \ GOARCH=amd64 \ go build -ldflags "-X main.BuildID=${BUILD_ID}" \ - -o refinery \ - ./cmd/refinery + -o tracing-proxy \ + ./cmd/tracing-proxy FROM scratch COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY --from=builder /app/refinery /usr/bin/refinery +COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy diff --git a/README.md b/README.md index 7385f2a82f..4a4848fc64 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,31 @@ -# Refinery - the Honeycomb Sampling Proxy +# tracing-proxy - the Honeycomb Sampling Proxy -![refinery](https://user-images.githubusercontent.com/6510988/94976958-8cadba80-04cb-11eb-9883-6e8ea554a081.png) +![tracing-proxy](https://user-images.githubusercontent.com/6510988/94976958-8cadba80-04cb-11eb-9883-6e8ea554a081.png) -[![OSS Lifecycle](https://img.shields.io/osslifecycle/honeycombio/refinery?color=success)](https://github.com/honeycombio/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) -[![Build Status](https://circleci.com/gh/honeycombio/refinery.svg?style=shield)](https://circleci.com/gh/honeycombio/refinery) +[![OSS Lifecycle](https://img.shields.io/osslifecycle/honeycombio/tracing-proxy?color=success)](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) +[![Build Status](https://circleci.com/gh/honeycombio/tracing-proxy.svg?style=shield)](https://circleci.com/gh/honeycombio/tracing-proxy) ## Purpose -Refinery is a trace-aware sampling proxy. It collects spans emitted by your application, gathers them into traces, and examines them as a whole. This enables Refinery to make an intelligent sampling decision (whether to keep or discard) based on the entire trace. Buffering the spans allows you to use fields that might be present in different spans within the trace to influence the sampling decision. For example, the root span might have HTTP status code, whereas another span might have information on whether the request was served from a cache. Using Refinery, you can choose to keep only traces that had a 500 status code and were also served from a cache. +tracing-proxy is a trace-aware sampling proxy. It collects spans emitted by your application, gathers them into traces, and examines them as a whole. This enables tracing-proxy to make an intelligent sampling decision (whether to keep or discard) based on the entire trace. Buffering the spans allows you to use fields that might be present in different spans within the trace to influence the sampling decision. For example, the root span might have HTTP status code, whereas another span might have information on whether the request was served from a cache. Using tracing-proxy, you can choose to keep only traces that had a 500 status code and were also served from a cache. -## Setting up Refinery +## Setting up tracing-proxy -Refinery is designed to sit within your infrastructure where all sources of Honeycomb events (aka spans if you're doing tracing) can reach it. -A standard deployment will have a cluster of two or more Refinery processes accessible via a separate load balancer. -Refinery processes must be able to communicate with each other to concentrate traces on single servers. +tracing-proxy is designed to sit within your infrastructure where all sources of Honeycomb events (aka spans if you're doing tracing) can reach it. +A standard deployment will have a cluster of two or more tracing-proxy processes accessible via a separate load balancer. +tracing-proxy processes must be able to communicate with each other to concentrate traces on single servers. Within your application (or other Honeycomb event sources) you would configure the `API Host` to be http(s)://load-balancer/. Everything else remains the same (api key, dataset name, etc. - all that lives with the originating client). ### Minimum configuration -The Refinery cluster should have at least 2 servers with 2GB RAM and access to 2 cores each. +The tracing-proxy cluster should have at least 2 servers with 2GB RAM and access to 2 cores each. Additional RAM and CPU can be used by increasing configuration values to have a larger `CacheCapacity`. The cluster should be monitored for panics caused by running out of memory and scaled up (with either more servers or more RAM per server) when they occur. ### Builds -Refinery is built by [CircleCI](https://circleci.com/gh/honeycombio/refinery). Released versions of Refinery are available via Github under the Releases tab. +tracing-proxy is built by [CircleCI](https://circleci.com/gh/honeycombio/tracing-proxy). Released versions of tracing-proxy are available via Github under the Releases tab. ## Configuration @@ -36,43 +36,43 @@ There are a few vital configuration options; read through this list and make sur ### File-based Config -- API Keys: Refinery itself needs to be configured with a list of your API keys. This lets it respond with a 401/Unauthorized if an unexpected API key is used. You can configure Refinery to accept all API keys by setting it to `*` but then you will lose the authentication feedback to your application. Refinery will accept all events even if those events will eventually be rejected by the Honeycomb API due to an API key issue. +- API Keys: tracing-proxy itself needs to be configured with a list of your API keys. This lets it respond with a 401/Unauthorized if an unexpected API key is used. You can configure tracing-proxy to accept all API keys by setting it to `*` but then you will lose the authentication feedback to your application. tracing-proxy will accept all events even if those events will eventually be rejected by the Honeycomb API due to an API key issue. - Goal Sample Rate and the list of fields you'd like to use to generate the keys off which sample rate is chosen. This is where the power of the proxy comes in - being able to dynamically choose sample rates based on the contents of the traces as they go by. There is an overall default and dataset-specific sections for this configuration, so that different datasets can have different sets of fields and goal sample rates. - Trace timeout - it should be set higher (maybe double?) the longest expected trace. If all of your traces complete in under 10 seconds, 30 is a good value here. If you have traces that can last minutes, it should be raised accordingly. Note that the trace doesn't _have_ to complete before this timer expires - but the sampling decision will be made at that time. So any spans that contain fields that you want to use to compute the sample rate should arrive before this timer expires. Additional spans that arrive after the timer has expired will be sent or dropped according to the sampling decision made when the timer expired. -- Peer list: this is a list of all the other servers participating in this Refinery cluster. Traces are evenly distributed across all available servers, and any one trace must be concentrated on one server, regardless of which server handled the incoming spans. The peer list lets the cluster move spans around to the server that is handling the trace. (Not used in the Redis-based config.) +- Peer list: this is a list of all the other servers participating in this tracing-proxy cluster. Traces are evenly distributed across all available servers, and any one trace must be concentrated on one server, regardless of which server handled the incoming spans. The peer list lets the cluster move spans around to the server that is handling the trace. (Not used in the Redis-based config.) - Buffer size: The `InMemCollector`'s `CacheCapacity` setting determines how many in-flight traces you can have. This should be large enough to avoid overflow. Some multiple (2x, 3x) the total number of in-flight traces you expect is a good place to start. If it's too low you will see the `collect_cache_buffer_overrun` metric increment. If you see that, you should increase the size of the buffer. -There are a few components of Refinery with multiple implementations; the config file lets you choose which you'd like. As an example, there are two logging implementations - one that uses `logrus` and sends logs to STDOUT and a `honeycomb` implementation that sends the log messages to a Honeycomb dataset instead. Components with multiple implementations have one top level config item that lets you choose which implementation to use and then a section further down with additional config options for that choice (for example, the Honeycomb logger requires an API key). +There are a few components of tracing-proxy with multiple implementations; the config file lets you choose which you'd like. As an example, there are two logging implementations - one that uses `logrus` and sends logs to STDOUT and a `honeycomb` implementation that sends the log messages to a Honeycomb dataset instead. Components with multiple implementations have one top level config item that lets you choose which implementation to use and then a section further down with additional config options for that choice (for example, the Honeycomb logger requires an API key). -When configuration changes, send Refinery a USR1 signal and it will re-read the configuration. +When configuration changes, send tracing-proxy a USR1 signal and it will re-read the configuration. ### Redis-based Peer Management With peer management in Redis, all config options _except_ peer management are still handled by the config file. -Only coordinating the list of peers in the Refinery cluster is managed with Redis. +Only coordinating the list of peers in the tracing-proxy cluster is managed with Redis. To enable the redis-based config: - set PeerManagement.Type in the config file to "redis" -When launched in redis-config mode, Refinery needs a redis host to use for managing the list of peers in the Refinery cluster. This hostname and port can be specified in one of two ways: +When launched in redis-config mode, tracing-proxy needs a redis host to use for managing the list of peers in the tracing-proxy cluster. This hostname and port can be specified in one of two ways: -- set the `REFINERY_REDIS_HOST` environment variable (and optionally the `REFINERY_REDIS_PASSWORD` environment variable) +- set the `tracing-proxy_REDIS_HOST` environment variable (and optionally the `tracing-proxy_REDIS_PASSWORD` environment variable) - set the `RedisHost` field in the config file (and optionally the `RedisPassword` field in the config file) The Redis host should be a hostname and a port, for example `redis.mydomain.com:6379`. The example config file has `localhost:6379` which obviously will not work with more than one host. When TLS is required to connect to the Redis instance, set the `UseTLS` config to `true`. -By default, a Refinery process will register itself in Redis using its local hostname as its identifier for peer communications. +By default, a tracing-proxy process will register itself in Redis using its local hostname as its identifier for peer communications. In environments where domain name resolution is slow or unreliable, override the reliance on name lookups by specifying the name of the peering network interface with the `IdentifierInterfaceName` configuration option. -See the [Refinery documentation](https://docs.honeycomb.io/manage-data-volume/refinery/) for more details on tuning a cluster. +See the [tracing-proxy documentation](https://docs.honeycomb.io/manage-data-volume/tracing-proxy/) for more details on tuning a cluster. ## How sampling decisions are made -In the configuration file, you can choose from a few sampling methods and specify options for each. The `DynamicSampler` is the most interesting and most commonly used. It uses the `AvgSampleRate` algorithm from the [`dynsampler-go`](https://github.com/honeycombio/dynsampler-go) package. Briefly described, you configure Refinery to examine the trace for a set of fields (for example, `request.status_code` and `request.method`). It collects all the values found in those fields anywhere in the trace (eg "200" and "GET") together into a key it hands to the dynsampler. The dynsampler code will look at the frequency that key appears during the previous 30 seconds (or other value set by the `ClearFrequencySec` setting) and use that to hand back a desired sample rate. More frequent keys are sampled more heavily, so that an even distribution of traffic across the keyspace is represented in Honeycomb. +In the configuration file, you can choose from a few sampling methods and specify options for each. The `DynamicSampler` is the most interesting and most commonly used. It uses the `AvgSampleRate` algorithm from the [`dynsampler-go`](https://github.com/honeycombio/dynsampler-go) package. Briefly described, you configure tracing-proxy to examine the trace for a set of fields (for example, `request.status_code` and `request.method`). It collects all the values found in those fields anywhere in the trace (eg "200" and "GET") together into a key it hands to the dynsampler. The dynsampler code will look at the frequency that key appears during the previous 30 seconds (or other value set by the `ClearFrequencySec` setting) and use that to hand back a desired sample rate. More frequent keys are sampled more heavily, so that an even distribution of traffic across the keyspace is represented in Honeycomb. By selecting fields well, you can drop significant amounts of traffic while still retaining good visibility into the areas of traffic that interest you. For example, if you want to make sure you have a complete list of all URL handlers invoked, you would add the URL (or a normalized form) as one of the fields to include. Be careful in your selection though, because if the combination of fields creates a unique key each time, you won't sample out any traffic. Because of this it is not effective to use fields that have unique values (like a UUID) as one of the sampling fields. Each field included should ideally have values that appear many times within any given 30 second window in order to effectively turn in to a sample rate. @@ -80,19 +80,19 @@ For more detail on how this algorithm works, please refer to the `dynsampler` pa ## Dry Run Mode -When getting started with Refinery or when updating sampling rules, it may be helpful to verify that the rules are working as expected before you start dropping traffic. By enabling dry run mode, all spans in each trace will be marked with the sampling decision in a field called `refinery_kept`. All traces will be sent to Honeycomb regardless of the sampling decision. You can then run queries in Honeycomb on this field to check your results and verify that the rules are working as intended. Enable dry run mode by adding `DryRun = true` in your configuration, as noted in `rules_complete.toml`. +When getting started with tracing-proxy or when updating sampling rules, it may be helpful to verify that the rules are working as expected before you start dropping traffic. By enabling dry run mode, all spans in each trace will be marked with the sampling decision in a field called `tracing-proxy_kept`. All traces will be sent to Honeycomb regardless of the sampling decision. You can then run queries in Honeycomb on this field to check your results and verify that the rules are working as intended. Enable dry run mode by adding `DryRun = true` in your configuration, as noted in `rules_complete.toml`. When dry run mode is enabled, the metric `trace_send_kept` will increment for each trace, and the metric for `trace_send_dropped` will remain 0, reflecting that we are sending all traces to Honeycomb. ## Scaling Up -Refinery uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces exceeds the size of the buffer, things will start to go wrong. If you have statistics configured, a counter named `collect_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the throughput of traces (e.g. traces / second started) and multiply that by the maximum duration of a trace (say, 3 seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom. +tracing-proxy uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces exceeds the size of the buffer, things will start to go wrong. If you have statistics configured, a counter named `collect_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the throughput of traces (e.g. traces / second started) and multiply that by the maximum duration of a trace (say, 3 seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom. Determining the number of machines necessary in the cluster is not an exact science, and is best influenced by watching for buffer overruns. But for a rough heuristic, count on a single machine using about 2G of memory to handle 5000 incoming events and tracking 500 sub-second traces per second (for each full trace lasting less than a second and an average size of 10 spans per trace). ## Understanding Regular Operation -Refinery emits a number of metrics to give some indication about the health of the process. These metrics can be exposed to Prometheus or sent up to Honeycomb. The interesting ones to watch are: +tracing-proxy emits a number of metrics to give some indication about the health of the process. These metrics can be exposed to Prometheus or sent up to Honeycomb. The interesting ones to watch are: - Sample rates: how many traces are kept / dropped, and what does the sample rate distribution look like? - [incoming|peer]_router_\*: how many events (no trace info) vs. spans (have trace info) have been accepted, and how many sent on to peers? @@ -105,9 +105,9 @@ The default logging level of `warn` is almost entirely silent. The `debug` level ## Restarts -Refinery does not yet buffer traces or sampling decisions to disk. When you restart the process all in-flight traces will be flushed (sent upstream to Honeycomb), but you will lose the record of past trace decisions. When started back up, it will start with a clean slate. +tracing-proxy does not yet buffer traces or sampling decisions to disk. When you restart the process all in-flight traces will be flushed (sent upstream to Honeycomb), but you will lose the record of past trace decisions. When started back up, it will start with a clean slate. -## Architecture of Refinery itself (for contributors) +## Architecture of tracing-proxy itself (for contributors) Within each directory, the interface the dependency exports is in the file with the same name as the directory and then (for the most part) each of the other files are alternative implementations of that interface. For example, in `logger`, `/logger/logger.go` contains the interface definition and `logger/honeycomb.go` contains the implementation of the `logger` interface that will send logs to Honeycomb. @@ -115,16 +115,16 @@ Within each directory, the interface the dependency exports is in the file with `app/app.go` is the main control point. When its `Start` function ends, the program shuts down. It launches two `Router`s which listen for incoming events. -`route/route.go` listens on the network for incoming traffic. There are two routers running and they handle different types of incoming traffic: events coming from the outside world (the `incoming` router) and events coming from another member of the Refinery cluster (`peer` traffic). Once it gets an event, it decides where it should go next: is this incoming request an event (or batch of events), and if so, does it have a trace ID? Everything that is not an event or an event that does not have a trace ID is immediately handed to `transmission` to be forwarded on to Honeycomb. If it is an event with a trace ID, the router extracts the trace ID and then uses the `sharder` to decide which member of the Refinery cluster should handle this trace. If it's a peer, the event will be forwarded to that peer. If it's us, the event will be transformed into an internal representation and handed to the `collector` to bundle spans into traces. +`route/route.go` listens on the network for incoming traffic. There are two routers running and they handle different types of incoming traffic: events coming from the outside world (the `incoming` router) and events coming from another member of the tracing-proxy cluster (`peer` traffic). Once it gets an event, it decides where it should go next: is this incoming request an event (or batch of events), and if so, does it have a trace ID? Everything that is not an event or an event that does not have a trace ID is immediately handed to `transmission` to be forwarded on to Honeycomb. If it is an event with a trace ID, the router extracts the trace ID and then uses the `sharder` to decide which member of the tracing-proxy cluster should handle this trace. If it's a peer, the event will be forwarded to that peer. If it's us, the event will be transformed into an internal representation and handed to the `collector` to bundle spans into traces. `collect/collect.go` the collector is responsible for bundling spans together into traces and deciding when to send them to Honeycomb or if they should be dropped. The first time a trace ID is seen, the collector starts a timer. If the root span (aka a span with a trace ID and no parent ID) arrives before the timer expires, then the trace is considered complete. The trace is sent and the timer is canceled. If the timer expires before the root span arrives, the trace will be sent whether or not it is complete. Just before sending, the collector asks the `sampler` for a sample rate and whether or not to keep the trace. The collector obeys this sampling decision and records it (the record is applied to any spans that may come in as part of the trace after the decision has been made). After making the sampling decision, if the trace is to be kept, it is passed along to the `transmission` for actual sending. `transmit/transmit.go` is a wrapper around the HTTP interactions with the Honeycomb API. It handles batching events together and sending them upstream. -`logger` and `metrics` are for managing the logs and metrics that Refinery itself produces. +`logger` and `metrics` are for managing the logs and metrics that tracing-proxy itself produces. `sampler` contains algorithms to compute sample rates based on the traces provided. -`sharder` determines which peer in a clustered Refinery config is supposed to handle an individual trace. +`sharder` determines which peer in a clustered tracing-proxy config is supposed to handle an individual trace. `types` contains a few type definitions that are used to hand data in between packages. diff --git a/app/app.go b/app/app.go index 3109a1ba60..cfd51e3396 100644 --- a/app/app.go +++ b/app/app.go @@ -1,11 +1,11 @@ package app import ( - "github.com/honeycombio/refinery/collect" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/route" + "github.com/jirs5/tracing-proxy/collect" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/route" ) type App struct { @@ -16,7 +16,7 @@ type App struct { Collector collect.Collector `inject:""` Metrics metrics.Metrics `inject:"metrics"` - // Version is the build ID for Refinery so that the running process may answer + // Version is the build ID for tracing-proxy so that the running process may answer // requests for the version Version string } diff --git a/app/app_test.go b/app/app_test.go index 8c643d47ac..6dbb353fc6 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package app @@ -27,14 +28,14 @@ import ( "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" - "github.com/honeycombio/refinery/collect" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/internal/peer" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/sample" - "github.com/honeycombio/refinery/sharder" - "github.com/honeycombio/refinery/transmit" + "github.com/jirs5/tracing-proxy/collect" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/internal/peer" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/sample" + "github.com/jirs5/tracing-proxy/sharder" + "github.com/jirs5/tracing-proxy/transmit" ) type countingWriterSender struct { @@ -97,7 +98,7 @@ func (p *testPeers) RegisterUpdatedPeersCallback(callback func()) { func newStartedApp( t testing.TB, - libhoneyT transmission.Sender, + libtraceT transmission.Sender, basePort int, peers peer.Peers, enableHostMetadata bool, @@ -114,7 +115,7 @@ func newStartedApp( GetListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort), GetPeerListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort+1), GetAPIKeysVal: []string{"KEY"}, - GetHoneycombAPIVal: "http://api.honeycomb.io", + GetHoneycombAPIVal: "http://jirs5", GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{CacheCapacity: 10000}, AddHostMetadataToTrace: enableHostMetadata, } @@ -153,17 +154,17 @@ func newStartedApp( samplerFactory := &sample.SamplerFactory{} - upstreamClient, err := libhoney.NewClient(libhoney.ClientConfig{ - Transmission: libhoneyT, + upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{ + Transmission: libtraceT, }) assert.NoError(t, err) - sdPeer, _ := statsd.New(statsd.Prefix("refinery.peer")) - peerClient, err := libhoney.NewClient(libhoney.ClientConfig{ + sdPeer, _ := statsd.New(statsd.Prefix("tracing-proxy.peer")) + peerClient, err := libtrace.NewClient(libtrace.ClientConfig{ Transmission: &transmission.Honeycomb{ MaxBatchSize: c.GetMaxBatchSize(), - BatchTimeout: libhoney.DefaultBatchTimeout, - MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, + BatchTimeout: libtrace.DefaultBatchTimeout, + MaxConcurrentBatches: libtrace.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetPeerBufferSize()), Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, @@ -298,7 +299,7 @@ func TestPeerRouting(t *testing.T) { APIKey: "KEY", Dataset: "dataset", SampleRate: 2, - APIHost: "http://api.honeycomb.io", + APIHost: "http://api.jirs5", Timestamp: now, Data: map[string]interface{}{ "trace.trace_id": "1", @@ -379,7 +380,7 @@ func TestHostMetadataSpanAdditions(t *testing.T) { } } - expectedSpan := `{"data":{"foo":"bar","meta.refinery.local_hostname":"%s","trace.trace_id":"1"},"dataset":"dataset"}` + "\n" + expectedSpan := `{"data":{"foo":"bar","meta.tracing-proxy.local_hostname":"%s","trace.trace_id":"1"},"dataset":"dataset"}` + "\n" assert.Equal(t, fmt.Sprintf(expectedSpan, hostname), out.String()) } @@ -432,7 +433,7 @@ func TestEventsEndpoint(t *testing.T) { APIKey: "KEY", Dataset: "dataset", SampleRate: 10, - APIHost: "http://api.honeycomb.io", + APIHost: "http://api.jirs5", Timestamp: now, Data: map[string]interface{}{ "trace.trace_id": "1", @@ -474,7 +475,7 @@ func TestEventsEndpoint(t *testing.T) { APIKey: "KEY", Dataset: "dataset", SampleRate: 10, - APIHost: "http://api.honeycomb.io", + APIHost: "http://api.jirs5", Timestamp: now, Data: map[string]interface{}{ "trace.trace_id": "1", diff --git a/build-docker.sh b/build-docker.sh index c27a5091dd..4a6324a1db 100755 --- a/build-docker.sh +++ b/build-docker.sh @@ -21,4 +21,4 @@ ko publish \ --tags "${TAGS}" \ --base-import-paths \ --platform "linux/amd64,linux/arm64" \ - ./cmd/refinery + ./cmd/tracing-proxy diff --git a/build-pkg.sh b/build-pkg.sh index c26cc0a6cd..1cd44f1111 100755 --- a/build-pkg.sh +++ b/build-pkg.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Build deb or rpm packages for Refinery. +# Build deb or rpm packages for tracing-proxy. set -e function usage() { @@ -30,14 +30,14 @@ if [ -z "$version" ]; then version=v0.0.0-dev fi -fpm -s dir -n refinery \ - -m "Honeycomb " \ +fpm -s dir -n tracing-proxy \ + -m "Opsramp " \ -v ${version#v} \ -t $pkg_type \ -a $arch \ --pre-install=./preinstall \ - $GOPATH/bin/refinery-linux-${arch}=/usr/bin/refinery \ - ./refinery.upstart=/etc/init/refinery.conf \ - ./refinery.service=/lib/systemd/system/refinery.service \ - ./config.toml=/etc/refinery/refinery.toml \ - ./rules.toml=/etc/refinery/rules.toml + $GOPATH/bin/tracing-proxy-linux-${arch}=/usr/bin/tracing-proxy \ + ./tracing-proxy.upstart=/etc/init/tracing-proxy.conf \ + ./tracing-proxy.service=/lib/systemd/system/tracing-proxy.service \ + ./config.toml=/etc/tracing-proxy/tracing-proxy.toml \ + ./rules.toml=/etc/tracing-proxy/rules.toml diff --git a/cmd/test_redimem/main.go b/cmd/test_redimem/main.go index 161f3c153b..744faa681c 100644 --- a/cmd/test_redimem/main.go +++ b/cmd/test_redimem/main.go @@ -13,7 +13,7 @@ import ( "github.com/gomodule/redigo/redis" "github.com/sirupsen/logrus" - "github.com/honeycombio/refinery/internal/redimem" + "github.com/jirs5/tracing-proxy/internal/redimem" ) func main() { diff --git a/cmd/refinery/main.go b/cmd/tracing-proxy/main.go similarity index 81% rename from cmd/refinery/main.go rename to cmd/tracing-proxy/main.go index 532929f970..ace7b783c1 100644 --- a/cmd/refinery/main.go +++ b/cmd/tracing-proxy/main.go @@ -11,21 +11,21 @@ import ( "github.com/facebookgo/inject" "github.com/facebookgo/startstop" - libhoney "github.com/honeycombio/libhoney-go" + libtrace "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" flag "github.com/jessevdk/go-flags" "github.com/sirupsen/logrus" - "github.com/honeycombio/refinery/app" - "github.com/honeycombio/refinery/collect" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/internal/peer" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/sample" - "github.com/honeycombio/refinery/service/debug" - "github.com/honeycombio/refinery/sharder" - "github.com/honeycombio/refinery/transmit" + "github.com/jirs5/tracing-proxy/app" + "github.com/jirs5/tracing-proxy/collect" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/internal/peer" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/sample" + "github.com/jirs5/tracing-proxy/service/debug" + "github.com/jirs5/tracing-proxy/sharder" + "github.com/jirs5/tracing-proxy/transmit" ) // set by travis. @@ -33,8 +33,8 @@ var BuildID string var version string type Options struct { - ConfigFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/refinery/refinery.toml"` - RulesFile string `short:"r" long:"rules_config" description:"Path to rules config file" default:"/etc/refinery/rules.toml"` + ConfigFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/tracing-proxy/tracing-proxy.toml"` + RulesFile string `short:"r" long:"rules_config" description:"Path to rules config file" default:"/etc/tracing-proxy/rules.toml"` Version bool `short:"v" long:"version" description:"Print version number and exit"` Debug bool `short:"d" long:"debug" description:"If enabled, runs debug service (runs on the first open port between localhost:6060 and :6069 by default)"` InterfaceNames bool `long:"interface-names" description:"If set, print system's network interface names and exit."` @@ -128,33 +128,35 @@ func main() { TLSHandshakeTimeout: 1200 * time.Millisecond, } - upstreamMetricsConfig := metrics.GetMetricsImplementation(c, "libhoney_upstream") - peerMetricsConfig := metrics.GetMetricsImplementation(c, "libhoney_peer") + upstreamMetricsConfig := metrics.GetMetricsImplementation(c, "libtrace_upstream") + peerMetricsConfig := metrics.GetMetricsImplementation(c, "libtrace_peer") - userAgentAddition := "refinery/" + version - upstreamClient, err := libhoney.NewClient(libhoney.ClientConfig{ + userAgentAddition := "tracing-proxy/" + version + upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{ Transmission: &transmission.Honeycomb{ MaxBatchSize: c.GetMaxBatchSize(), - BatchTimeout: libhoney.DefaultBatchTimeout, - MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, + BatchTimeout: libtrace.DefaultBatchTimeout, + MaxConcurrentBatches: libtrace.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetUpstreamBufferSize()), UserAgentAddition: userAgentAddition, Transport: upstreamTransport, BlockOnSend: true, - EnableMsgpackEncoding: true, + EnableMsgpackEncoding: false, Metrics: upstreamMetricsConfig, }, }) if err != nil { - fmt.Printf("unable to initialize upstream libhoney client") + fmt.Printf("unable to initialize upstream libtrace client") os.Exit(1) } - peerClient, err := libhoney.NewClient(libhoney.ClientConfig{ + fmt.Println("upstream client created..") + + peerClient, err := libtrace.NewClient(libtrace.ClientConfig{ Transmission: &transmission.Honeycomb{ MaxBatchSize: c.GetMaxBatchSize(), - BatchTimeout: libhoney.DefaultBatchTimeout, - MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, + BatchTimeout: libtrace.DefaultBatchTimeout, + MaxConcurrentBatches: libtrace.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetPeerBufferSize()), UserAgentAddition: userAgentAddition, Transport: peerTransport, @@ -164,7 +166,7 @@ func main() { }, }) if err != nil { - fmt.Printf("unable to initialize upstream libhoney client") + fmt.Printf("unable to initialize upstream libtrace client") os.Exit(1) } diff --git a/collect/cache/cache.go b/collect/cache/cache.go index 69bf691b12..11928b33e6 100644 --- a/collect/cache/cache.go +++ b/collect/cache/cache.go @@ -3,9 +3,9 @@ package cache import ( "time" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" ) // Cache is a non-threadsafe cache. It must not be used for concurrent access. diff --git a/collect/cache/cache_test.go b/collect/cache/cache_test.go index bc14b14d6f..b9c87852c1 100644 --- a/collect/cache/cache_test.go +++ b/collect/cache/cache_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package cache @@ -8,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" ) // TestCacheSetGet sets a value then fetches it back diff --git a/collect/collect.go b/collect/collect.go index b4cffd3e52..2ce8066f45 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -10,13 +10,13 @@ import ( "time" lru "github.com/hashicorp/golang-lru" - "github.com/honeycombio/refinery/collect/cache" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/sample" - "github.com/honeycombio/refinery/transmit" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/collect/cache" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/sample" + "github.com/jirs5/tracing-proxy/transmit" + "github.com/jirs5/tracing-proxy/types" ) var ErrWouldBlock = errors.New("not adding span, channel buffer is full") diff --git a/collect/collect_benchmark_test.go b/collect/collect_benchmark_test.go index 5fbc753fec..2ab74f23bd 100644 --- a/collect/collect_benchmark_test.go +++ b/collect/collect_benchmark_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package collect @@ -11,13 +12,13 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/stretchr/testify/assert" - "github.com/honeycombio/refinery/collect/cache" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/sample" - "github.com/honeycombio/refinery/transmit" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/collect/cache" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/sample" + "github.com/jirs5/tracing-proxy/transmit" + "github.com/jirs5/tracing-proxy/types" ) func BenchmarkCollect(b *testing.B) { diff --git a/collect/collect_test.go b/collect/collect_test.go index 2d8b62aeff..279a89c191 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package collect @@ -12,13 +13,13 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/stretchr/testify/assert" - "github.com/honeycombio/refinery/collect/cache" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/sample" - "github.com/honeycombio/refinery/transmit" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/collect/cache" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/sample" + "github.com/jirs5/tracing-proxy/transmit" + "github.com/jirs5/tracing-proxy/types" ) // TestAddRootSpan tests that adding a root span winds up with a trace object in diff --git a/config.toml b/config.toml index 9dc2826ff0..a89bb78452 100644 --- a/config.toml +++ b/config.toml @@ -21,7 +21,7 @@ CacheCapacity = 1000 # MetricsHoneycombAPI is the URL for the upstream Honeycomb API. # Eligible for live reload. -MetricsHoneycombAPI = "https://api.honeycomb.io" +MetricsHoneycombAPI = "https://api.jirs5" # MetricsAPIKey is the API key to use to send log events to the Honeycomb logging # dataset. This is separate from the APIKeys used to authenticate regular @@ -29,9 +29,9 @@ MetricsHoneycombAPI = "https://api.honeycomb.io" # Eligible for live reload. MetricsAPIKey = "abcd1234" -# MetricsDataset is the name of the dataset to which to send Refinery metrics +# MetricsDataset is the name of the dataset to which to send tracing-proxy metrics # Eligible for live reload. -MetricsDataset = "Refinery Metrics" +MetricsDataset = "tracing-proxy Metrics" # MetricsReportingInterval is the frequency (in seconds) to send metric events # to Honeycomb. Between 1 and 60 is recommended. diff --git a/config/config.go b/config/config.go index b364375ee5..0e66b53e01 100644 --- a/config/config.go +++ b/config/config.go @@ -25,7 +25,7 @@ type Config interface { // peer traffic GetPeerListenAddr() (string, error) - // GetCompressPeerCommunication will be true if refinery should compress + // GetCompressPeerCommunication will be true if tracing-proxy should compress // data before forwarding it to a peer. GetCompressPeerCommunication() bool @@ -108,11 +108,11 @@ type Config interface { // GetPrometheusMetricsConfig returns the config specific to PrometheusMetrics GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) - // GetUpstreamBufferSize returns the size of the libhoney buffer to use for the upstream - // libhoney client + // GetUpstreamBufferSize returns the size of the libtrace buffer to use for the upstream + // libtrace client GetUpstreamBufferSize() int - // GetPeerBufferSize returns the size of the libhoney buffer to use for the peer forwarding - // libhoney client + // GetPeerBufferSize returns the size of the libtrace buffer to use for the peer forwarding + // libtrace client GetPeerBufferSize() int GetIdentifierInterfaceName() (string, error) diff --git a/config/config_test.go b/config/config_test.go index 787ab79b09..cacf9eaf5d 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package config @@ -14,8 +15,8 @@ import ( func TestRedisHostEnvVar(t *testing.T) { host := "redis.magic:1337" - os.Setenv("REFINERY_REDIS_HOST", host) - defer os.Unsetenv("REFINERY_REDIS_HOST") + os.Setenv("tracing-proxy_REDIS_HOST", host) + defer os.Unsetenv("tracing-proxy_REDIS_HOST") c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) @@ -30,8 +31,8 @@ func TestRedisHostEnvVar(t *testing.T) { func TestRedisPasswordEnvVar(t *testing.T) { password := ***REMOVED*** - os.Setenv("REFINERY_REDIS_PASSWORD", password) - defer os.Unsetenv("REFINERY_REDIS_PASSWORD") + os.Setenv("tracing-proxy_REDIS_PASSWORD", password) + defer os.Unsetenv("tracing-proxy_REDIS_PASSWORD") c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) @@ -62,7 +63,7 @@ func TestReload(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -164,8 +165,8 @@ func TestReadDefaults(t *testing.T) { t.Error("received", d, "expected", false) } - if d := c.GetDryRunFieldName(); d != "refinery_kept" { - t.Error("received", d, "expected", "refinery_kept") + if d := c.GetDryRunFieldName(); d != "tracing-proxy_kept" { + t.Error("received", d, "expected", "tracing-proxy_kept") } if d := c.GetAddHostMetadataToTrace(); d != false { @@ -241,14 +242,14 @@ func TestPeerManagementType(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 [PeerManagement] Type = "redis" - Peers = ["http://refinery-1231:8080"] + Peers = ["http://tracing-proxy-1231:8080"] `)) c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) @@ -275,7 +276,7 @@ func TestAbsentTraceKeyField(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -317,7 +318,7 @@ func TestDebugServiceAddr(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -344,7 +345,7 @@ func TestDryRun(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -382,7 +383,7 @@ func TestMaxAlloc(t *testing.T) { MaxAlloc=17179869184 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -410,7 +411,7 @@ func TestGetSamplerTypes(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -429,7 +430,7 @@ func TestGetSamplerTypes(t *testing.T) { FieldList = ["request.method","response.status_code"] UseTraceLength = true AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + AddSampleRateKeyToTraceField = "meta.tracing-proxy.dynsampler_key" ClearFrequencySec = 60 [dataset2] @@ -443,7 +444,7 @@ func TestGetSamplerTypes(t *testing.T) { GoalSampleRate = 10 UseTraceLength = true AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + AddSampleRateKeyToTraceField = "meta.tracing-proxy.dynsampler_key" FieldList = "[request.method]" Weight = 0.3 @@ -501,7 +502,7 @@ func TestDefaultSampler(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -538,13 +539,13 @@ func TestHoneycombLoggerConfig(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 [HoneycombLogger] - LoggerHoneycombAPI="http://honeycomb.io" + LoggerHoneycombAPI="http://jirs5" LoggerAPIKey="1234" LoggerDataset="loggerDataset" LoggerSamplerEnabled=true @@ -563,7 +564,7 @@ func TestHoneycombLoggerConfig(t *testing.T) { assert.NoError(t, err) - assert.Equal(t, "http://honeycomb.io", loggerConfig.LoggerHoneycombAPI) + assert.Equal(t, "http://jirs5", loggerConfig.LoggerHoneycombAPI) assert.Equal(t, "1234", loggerConfig.LoggerAPIKey) assert.Equal(t, "loggerDataset", loggerConfig.LoggerDataset) assert.Equal(t, true, loggerConfig.LoggerSamplerEnabled) @@ -586,13 +587,13 @@ func TestHoneycombLoggerConfigDefaults(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 [HoneycombLogger] - LoggerHoneycombAPI="http://honeycomb.io" + LoggerHoneycombAPI="http://jirs5" LoggerAPIKey="1234" LoggerDataset="loggerDataset" `) diff --git a/config/config_test_reload_error_test.go b/config/config_test_reload_error_test.go index af7447e12f..ecddf17932 100644 --- a/config/config_test_reload_error_test.go +++ b/config/config_test_reload_error_test.go @@ -1,3 +1,4 @@ +//go:build all || !race // +build all !race package config @@ -28,7 +29,7 @@ func TestErrorReloading(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" + MetricsHoneycombAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 diff --git a/config/file_config.go b/config/file_config.go index 702b708851..35bfa59f14 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -10,7 +10,7 @@ import ( "github.com/fsnotify/fsnotify" "github.com/go-playground/validator" - libhoney "github.com/honeycombio/libhoney-go" + libtrace "github.com/honeycombio/libhoney-go" "github.com/sirupsen/logrus" viper "github.com/spf13/viper" ) @@ -94,10 +94,10 @@ type PeerManagementConfig struct { func NewConfig(config, rules string, errorCallback func(error)) (Config, error) { c := viper.New() - c.BindEnv("PeerManagement.RedisHost", "REFINERY_REDIS_HOST") - c.BindEnv("PeerManagement.RedisPassword", "REFINERY_REDIS_PASSWORD") - c.BindEnv("HoneycombLogger.LoggerAPIKey", "REFINERY_HONEYCOMB_API_KEY") - c.BindEnv("HoneycombMetrics.MetricsAPIKey", "REFINERY_HONEYCOMB_API_KEY") + c.BindEnv("PeerManagement.RedisHost", "tracing-proxy_REDIS_HOST") + c.BindEnv("PeerManagement.RedisPassword", "tracing-proxy_REDIS_PASSWORD") + c.BindEnv("HoneycombLogger.LoggerAPIKey", "tracing-proxy_HONEYCOMB_API_KEY") + c.BindEnv("HoneycombMetrics.MetricsAPIKey", "tracing-proxy_HONEYCOMB_API_KEY") c.SetDefault("ListenAddr", "0.0.0.0:8080") c.SetDefault("PeerListenAddr", "0.0.0.0:8081") c.SetDefault("CompressPeerCommunication", true) @@ -107,7 +107,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerManagement.UseTLS", false) c.SetDefault("PeerManagement.UseTLSInsecure", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) - c.SetDefault("HoneycombAPI", "https://api.honeycomb.io") + c.SetDefault("HoneycombAPI", "https://api.jirs5") c.SetDefault("Logger", "logrus") c.SetDefault("LoggingLevel", "debug") c.SetDefault("Collector", "InMemCollector") @@ -116,8 +116,8 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("TraceTimeout", 60*time.Second) c.SetDefault("MaxBatchSize", 500) c.SetDefault("SendTicker", 100*time.Millisecond) - c.SetDefault("UpstreamBufferSize", libhoney.DefaultPendingWorkCapacity) - c.SetDefault("PeerBufferSize", libhoney.DefaultPendingWorkCapacity) + c.SetDefault("UpstreamBufferSize", libtrace.DefaultPendingWorkCapacity) + c.SetDefault("PeerBufferSize", libtrace.DefaultPendingWorkCapacity) c.SetDefault("MaxAlloc", uint64(0)) c.SetDefault("HoneycombLogger.LoggerSamplerEnabled", false) c.SetDefault("HoneycombLogger.LoggerSamplerThroughput", 5) @@ -135,7 +135,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) r.SetDefault("Sampler", "DeterministicSampler") r.SetDefault("SampleRate", 1) r.SetDefault("DryRun", false) - r.SetDefault("DryRunFieldName", "refinery_kept") + r.SetDefault("DryRunFieldName", "tracing-proxy_kept") r.SetConfigFile(rules) err = r.ReadInConfig() diff --git a/config_complete.toml b/config_complete.toml index 87fe76e793..0dea90a8d3 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -7,7 +7,7 @@ # front to do the decryption. # Should be of the form 0.0.0.0:8080 # Not eligible for live reload. -ListenAddr = "0.0.0.0:8080" +ListenAddr = "0.0.0.0:8082" # GRPCListenAddr is the IP and port on which to listen for incoming events over # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put @@ -22,7 +22,7 @@ GRPCListenAddr = "0.0.0.0:9090" # ListenAddr # Should be of the form 0.0.0.0:8081 # Not eligible for live reload. -PeerListenAddr = "0.0.0.0:8081" +PeerListenAddr = "0.0.0.0:8083" # CompressPeerCommunication determines whether refinery will compress span data # it forwards to peers. If it costs money to transmit data between refinery @@ -47,7 +47,8 @@ APIKeys = [ # HoneycombAPI is the URL for the upstream Honeycomb API. # Eligible for live reload. -HoneycombAPI = "https://api.honeycomb.io" +#HoneycombAPI = "localhost:50052" +HoneycombAPI = "https://asura.opsramp.net" # SendDelay is a short timer that will be triggered when a trace is complete. # Refinery will wait this duration before actually sending the trace. The @@ -117,32 +118,32 @@ Collector = "InMemCollector" # Logger describes which logger to use for Refinery logs. Valid options are # "logrus" and "honeycomb". The logrus option will write logs to STDOUT and the # honeycomb option will send them to a Honeycomb dataset. -Logger = "honeycomb" +Logger = "logrus" # Metrics describes which service to use for Refinery metrics. Valid options are # "prometheus" and "honeycomb". The prometheus option starts a listener that # will reply to a request for /metrics. The honeycomb option will send summary # metrics to a Honeycomb dataset. -Metrics = "honeycomb" +Metrics = "prometheus" ######################### ## Peer Management ## ######################### -# [PeerManagement] -# Type = "file" +[PeerManagement] +Type = "file" # Peers is the list of all servers participating in this proxy cluster. Events # will be sharded evenly across all peers based on the Trace ID. Values here # should be the base URL used to access the peer, and should include scheme, # hostname (or ip address) and port. All servers in the cluster should be in # this list, including this host. -# Peers = [ - # "http://127.0.0.1:8081", - # "http://127.0.0.1:8081", +Peers = [ + "http://127.0.0.1:8083", + # "http://127.0.0.1:8083", # "http://10.1.2.3.4:8080", # "http://refinery-1231:8080", # "http://peer-3.fqdn" // assumes port 80 -# ] + ] # [PeerManagement] # Type = "redis" @@ -301,4 +302,4 @@ MetricsReportingInterval = 3 # listen for requests for /metrics. Must be different from the main Refinery # listener. # Not eligible for live reload. -# MetricsListenAddr = "localhost:2112" +MetricsListenAddr = "localhost:2112" diff --git a/go.mod b/go.mod index 11b70e3df0..03717c694c 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/honeycombio/refinery +module github.com/jirs5/tracing-proxy go 1.16 @@ -13,9 +13,10 @@ require ( github.com/golang/protobuf v1.5.2 github.com/gomodule/redigo v1.8.8 github.com/gorilla/mux v1.8.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.3 // indirect github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.6.0 + github.com/honeycombio/husky v0.9.0 github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 @@ -30,7 +31,14 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.9.0 golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect - google.golang.org/grpc v1.43.0 + google.golang.org/grpc v1.44.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect ) + +//replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 +//replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98 + +//replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 +replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220317114134-7b752389d9eb diff --git a/go.sum b/go.sum index 8e415b6989..b7d1540862 100644 --- a/go.sum +++ b/go.sum @@ -150,6 +150,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -197,8 +199,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -228,6 +231,8 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.3 h1:I8MsauTJQXZ8df8qJvEln0kYNc3bSapuaSsEsnFdEFU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.3/go.mod h1:lZdb/YAJUSj9OqrCHs2ihjtoO3+xK3G53wTYXFWRGDo= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -259,15 +264,15 @@ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.6.0 h1:VufNrLZoVMqDZrj8hHIF6izh/6LbpYfPPaKeGttXMII= -github.com/honeycombio/husky v0.6.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= -github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= -github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jirs5/husky v0.9.1-0.20220317114134-7b752389d9eb h1:mWsrVcSHqkhd4oy6ht07rhb21nZsEzbrh8vrDxYJyQ4= +github.com/jirs5/husky v0.9.1-0.20220317114134-7b752389d9eb/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= +github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98 h1:SllAt3oySFffDLd9/T4uwE9x7JnGu6PD0T+H7gvWMLU= +github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -437,6 +442,7 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -787,8 +793,9 @@ google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 h1:zzNejm+EgrbLfDZ6lu9Uud2IVvHySPl8vQzf04laR5Q= +google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -815,8 +822,9 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -866,3 +874,4 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/internal/peer/file.go b/internal/peer/file.go index efd13bf7f4..165abc0d38 100644 --- a/internal/peer/file.go +++ b/internal/peer/file.go @@ -1,6 +1,6 @@ package peer -import "github.com/honeycombio/refinery/config" +import "github.com/jirs5/tracing-proxy/config" type filePeers struct { c config.Config diff --git a/internal/peer/file_test.go b/internal/peer/file_test.go index b8e453087a..6ae043f502 100644 --- a/internal/peer/file_test.go +++ b/internal/peer/file_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package peer @@ -5,7 +6,7 @@ package peer import ( "testing" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" ) func TestFilePeers(t *testing.T) { diff --git a/internal/peer/peers.go b/internal/peer/peers.go index ff5a2615c2..94ed4629de 100644 --- a/internal/peer/peers.go +++ b/internal/peer/peers.go @@ -3,7 +3,7 @@ package peer import ( "errors" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" ) // Peers holds the collection of peers for the cluster diff --git a/internal/peer/peers_test.go b/internal/peer/peers_test.go index 5d11be5085..d48fde80cc 100644 --- a/internal/peer/peers_test.go +++ b/internal/peer/peers_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package peer @@ -5,7 +6,7 @@ package peer import ( "testing" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" "github.com/stretchr/testify/assert" ) diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 71c8a9a876..a6b7e49d85 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -13,8 +13,8 @@ import ( "time" "github.com/gomodule/redigo/redis" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/internal/redimem" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/internal/redimem" "github.com/sirupsen/logrus" ) @@ -59,13 +59,13 @@ func newRedisPeers(c config.Config) (Peers, error) { IdleTimeout: 5 * time.Minute, Wait: true, Dial: func() (redis.Conn, error) { - // if redis is started at the same time as refinery, connecting to redis can - // fail and cause refinery to error out. + // if redis is started at the same time as tracing-proxy, connecting to redis can + // fail and cause tracing-proxy to error out. // Instead, we will try to connect to redis for up to 10 seconds with // a 1 second delay between attempts to allow the redis process to init var ( conn redis.Conn - err error + err error ) for timeout := time.After(10 * time.Second); ; { select { @@ -91,7 +91,7 @@ func newRedisPeers(c config.Config) (Peers, error) { peers := &redisPeers{ store: &redimem.RedisMembership{ - Prefix: "refinery", + Prefix: "tracing-proxy", Pool: pool, }, peers: make([]string, 1), diff --git a/internal/redimem/redimem.go b/internal/redimem/redimem.go index 4def176a5a..317f223da9 100644 --- a/internal/redimem/redimem.go +++ b/internal/redimem/redimem.go @@ -26,7 +26,7 @@ type Membership interface { } const ( - globalPrefix = "refinery" + globalPrefix = "tracing-proxy" defaultRepeatCount = 2 // redisScanTimeout indicates how long to attempt to scan for peers. diff --git a/logger/honeycomb.go b/logger/honeycomb.go index f5f6c76fd9..16e8af75a5 100644 --- a/logger/honeycomb.go +++ b/logger/honeycomb.go @@ -9,10 +9,10 @@ import ( "time" "github.com/honeycombio/dynsampler-go" - libhoney "github.com/honeycombio/libhoney-go" + libtrace "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" ) // HoneycombLogger is a Logger implementation that sends all logs to a Honeycomb @@ -23,14 +23,14 @@ type HoneycombLogger struct { UpstreamTransport *http.Transport `inject:"upstreamTransport"` Version string `inject:"version"` loggerConfig config.HoneycombLoggerConfig - libhClient *libhoney.Client - builder *libhoney.Builder + libhClient *libtrace.Client + builder *libtrace.Builder sampler dynsampler.Sampler } type HoneycombEntry struct { loggerConfig config.HoneycombLoggerConfig - builder *libhoney.Builder + builder *libtrace.Builder sampler dynsampler.Sampler } @@ -63,9 +63,9 @@ func (h *HoneycombLogger) Start() error { // logs are often sent in flurries; flush every half second MaxBatchSize: 100, BatchTimeout: 500 * time.Millisecond, - UserAgentAddition: "refinery/" + h.Version + " (metrics)", + UserAgentAddition: "tracing-proxy/" + h.Version + " (metrics)", Transport: h.UpstreamTransport, - PendingWorkCapacity: libhoney.DefaultPendingWorkCapacity, + PendingWorkCapacity: libtrace.DefaultPendingWorkCapacity, } } @@ -81,13 +81,13 @@ func (h *HoneycombLogger) Start() error { } } - libhClientConfig := libhoney.ClientConfig{ + libhClientConfig := libtrace.ClientConfig{ APIHost: h.loggerConfig.LoggerHoneycombAPI, APIKey: h.loggerConfig.LoggerAPIKey, Dataset: h.loggerConfig.LoggerDataset, Transmission: loggerTx, } - libhClient, err := libhoney.NewClient(libhClientConfig) + libhClient, err := libtrace.NewClient(libhClientConfig) if err != nil { return err } @@ -152,7 +152,7 @@ func (h *HoneycombLogger) reloadBuilder() { func (h *HoneycombLogger) Stop() error { fmt.Printf("stopping honey logger\n") - libhoney.Flush() + libtrace.Flush() return nil } diff --git a/logger/logger.go b/logger/logger.go index d36bf01c5b..2298e66a9a 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" ) type Logger interface { diff --git a/logger/logger_test.go b/logger/logger_test.go index b0ed975630..de03ab625b 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package logger @@ -5,7 +6,7 @@ package logger import ( "testing" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" "github.com/stretchr/testify/assert" ) diff --git a/logger/logrus.go b/logger/logrus.go index 05ce9ab2be..f0fdc5a74c 100644 --- a/logger/logrus.go +++ b/logger/logrus.go @@ -3,7 +3,7 @@ package logger import ( "github.com/sirupsen/logrus" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" ) // LogrusLogger is a Logger implementation that sends all logs to stdout using diff --git a/logger/mock.go b/logger/mock.go index 05eb1eff26..dc6270ad58 100644 --- a/logger/mock.go +++ b/logger/mock.go @@ -3,7 +3,7 @@ package logger import ( "fmt" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" ) type MockLogger struct { diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go index de226a0f7a..7dddde5d15 100644 --- a/metrics/honeycomb.go +++ b/metrics/honeycomb.go @@ -10,11 +10,11 @@ import ( "sync" "time" - libhoney "github.com/honeycombio/libhoney-go" + libtrace "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" ) type HoneycombMetrics struct { @@ -30,7 +30,7 @@ type HoneycombMetrics struct { histogramsLock sync.Mutex histograms map[string]*histogram - libhClient *libhoney.Client + libhClient *libtrace.Client latestMemStatsLock sync.RWMutex latestMemStats runtime.MemStats @@ -72,7 +72,7 @@ func (h *HoneycombMetrics) Start() error { } h.reportingFreq = mc.MetricsReportingInterval - if err = h.initLibhoney(mc); err != nil { + if err = h.initlibtrace(mc); err != nil { return err } @@ -98,24 +98,24 @@ func (h *HoneycombMetrics) reloadBuilder() { h.libhClient.Close() // cancel the two reporting goroutines and restart them h.reportingCancelFunc() - h.initLibhoney(mc) + h.initlibtrace(mc) } -func (h *HoneycombMetrics) initLibhoney(mc config.HoneycombMetricsConfig) error { +func (h *HoneycombMetrics) initlibtrace(mc config.HoneycombMetricsConfig) error { metricsTx := &transmission.Honeycomb{ // metrics are always sent as a single event, so don't wait for the timeout MaxBatchSize: 1, BlockOnSend: true, - UserAgentAddition: "refinery/" + h.Version + " (metrics)", + UserAgentAddition: "tracing-proxy/" + h.Version + " (metrics)", Transport: h.UpstreamTransport, } - libhClientConfig := libhoney.ClientConfig{ + libhClientConfig := libtrace.ClientConfig{ APIHost: mc.MetricsHoneycombAPI, APIKey: mc.MetricsAPIKey, Dataset: mc.MetricsDataset, Transmission: metricsTx, } - libhClient, err := libhoney.NewClient(libhClientConfig) + libhClient, err := libtrace.NewClient(libhClientConfig) if err != nil { return err } @@ -172,7 +172,7 @@ func (h *HoneycombMetrics) refreshMemStats(ctx context.Context) { } } -// readResponses reads the responses from the libhoney responses queue and logs +// readResponses reads the responses from the libtrace responses queue and logs // any errors that come down it func (h *HoneycombMetrics) readResponses(ctx context.Context) { resps := h.libhClient.TxResponses() @@ -200,7 +200,7 @@ func (h *HoneycombMetrics) readResponses(ctx context.Context) { case <-ctx.Done(): // bail out; we're refreshing the config and will launch a new // response reader. - h.Logger.Debug().Logf("restarting honeycomb metrics read libhoney responses goroutine") + h.Logger.Debug().Logf("restarting honeycomb metrics read libtrace responses goroutine") return } } diff --git a/metrics/metrics.go b/metrics/metrics.go index 4d41d63807..43fa26d582 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" ) type Metrics interface { diff --git a/metrics/prometheus.go b/metrics/prometheus.go index 4161fe78d6..3d7d7a0dac 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -9,8 +9,8 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" ) type PromMetrics struct { diff --git a/metrics/prometheus_test.go b/metrics/prometheus_test.go index 539f926a41..d3910aa5ff 100644 --- a/metrics/prometheus_test.go +++ b/metrics/prometheus_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package metrics @@ -6,8 +7,8 @@ import ( "fmt" "testing" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" "github.com/stretchr/testify/assert" ) diff --git a/refinery.service b/refinery.service index 01de188b20..331dd5e8e2 100644 --- a/refinery.service +++ b/refinery.service @@ -1,9 +1,9 @@ [Unit] -Description=Refinery Honeycomb Trace-Aware Sampling Proxy +Description=tracing-proxy Honeycomb Trace-Aware Sampling Proxy After=network.target [Service] -ExecStart=/usr/bin/refinery -c /etc/refinery/refinery.toml -r /etc/refinery/rules.toml +ExecStart=/usr/bin/tracing-proxy -c /etc/tracing-proxy/tracing-proxy.toml -r /etc/tracing-proxy/rules.toml KillMode=process Restart=on-failure User=honeycomb @@ -11,4 +11,4 @@ Group=honeycomb LimitNOFILE=infinity [Install] -Alias=refinery refinery.service +Alias=tracing-proxy tracing-proxy.service diff --git a/refinery.upstart b/refinery.upstart index 7b3b9e817e..e66e885aee 100644 --- a/refinery.upstart +++ b/refinery.upstart @@ -1,12 +1,12 @@ -# Upstart job for Refinery, the Honeycomb Trace-Aware Sampling Proxy -# https://honeycomb.io/ +# Upstart job for tracing-proxy, the Honeycomb Trace-Aware Sampling Proxy +# https://jirs5/ -description "Refinery Daemon" -author "Ben Hartshorne " +description "tracing-proxy Daemon" +author "Ben Hartshorne " start on runlevel [2345] stop on runlevel [!2345] respawn -exec su -s /bin/sh -c 'exec "$0" "$@"' honeycomb -- /usr/bin/refinery -c /etc/refinery/refinery.toml -r /etc/refinery/rules.toml +exec su -s /bin/sh -c 'exec "$0" "$@"' honeycomb -- /usr/bin/tracing-proxy -c /etc/tracing-proxy/tracing-proxy.toml -r /etc/tracing-proxy/rules.toml diff --git a/route/errors.go b/route/errors.go index c901f8be32..5a13c8c9d8 100644 --- a/route/errors.go +++ b/route/errors.go @@ -69,7 +69,7 @@ func (r *Router) handlerReturnWithError(w http.ResponseWriter, he handlerError, errmsg = ErrGenericMessage } - jsonErrMsg := []byte(`{"source":"refinery","error":"` + errmsg + `"}`) + jsonErrMsg := []byte(`{"source":"tracing-proxy","error":"` + errmsg + `"}`) w.Write(jsonErrMsg) } diff --git a/route/errors_test.go b/route/errors_test.go index 4351dec469..327a8662e4 100644 --- a/route/errors_test.go +++ b/route/errors_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package route @@ -8,7 +9,7 @@ import ( "net/http/httptest" "testing" - "github.com/honeycombio/refinery/logger" + "github.com/jirs5/tracing-proxy/logger" ) func TestHandlerReturnWithError(t *testing.T) { diff --git a/route/middleware.go b/route/middleware.go index 7836433853..c0f13a0454 100644 --- a/route/middleware.go +++ b/route/middleware.go @@ -9,7 +9,7 @@ import ( "time" "github.com/gorilla/mux" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/types" ) // for generating request IDs @@ -78,7 +78,7 @@ func (r *Router) panicCatcher(next http.Handler) http.Handler { }) } -// requestLogger logs one line debug per request that comes through Refinery +// requestLogger logs one line debug per request that comes through tracing-proxy func (r *Router) requestLogger(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { arrivalTime := time.Now() diff --git a/route/otlp_trace.go b/route/otlp_trace.go index df6a2ecf59..19ec432859 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -2,49 +2,56 @@ package route import ( "context" - "errors" + "fmt" "net/http" huskyotlp "github.com/honeycombio/husky/otlp" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/types" collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" ) func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { ri := huskyotlp.GetRequestInfoFromHttpHeaders(req.Header) - if err := ri.ValidateHeaders(); err != nil { + /*if err := ri.ValidateHeaders(); err != nil { if errors.Is(err, huskyotlp.ErrInvalidContentType) { router.handlerReturnWithError(w, ErrInvalidContentType, err) } else { router.handlerReturnWithError(w, ErrAuthNeeded, err) } return - } + }*/ - result, err := huskyotlp.TranslateTraceRequestFromReader(req.Body, ri) + result, err := huskyotlp.TranslateTraceReqFromReader(req.Body, ri) if err != nil { router.handlerReturnWithError(w, ErrUpstreamFailed, err) return } - if err := processTraceRequest(req.Context(), router, result.Events, ri.ApiKey, ri.Dataset); err != nil { + token := ri.ApiToken + tenantId := ri.ApiTenantId + if err := processTraceRequest(req.Context(), router, result.Batches, ri.ApiKey, ri.Dataset, token, tenantId); err != nil { router.handlerReturnWithError(w, ErrUpstreamFailed, err) } } func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { ri := huskyotlp.GetRequestInfoFromGrpcMetadata(ctx) - if err := ri.ValidateHeaders(); err != nil { + /*if err := ri.ValidateHeaders(); err != nil { return nil, huskyotlp.AsGRPCError(err) - } - - result, err := huskyotlp.TranslateTraceRequest(req) + }*/ + fmt.Println("Translating Trace Req ..") + result, err := huskyotlp.TranslateTraceReq(req, ri) if err != nil { return nil, huskyotlp.AsGRPCError(err) } + token := ri.ApiToken + tenantId := ri.ApiTenantId - if err := processTraceRequest(ctx, router, result.Events, ri.ApiKey, ri.Dataset); err != nil { + fmt.Println("Token:", token) + fmt.Println("TenantId:", tenantId) + + if err := processTraceRequest(ctx, router, result.Batches, ri.ApiKey, ri.Dataset, token, tenantId); err != nil { return nil, huskyotlp.AsGRPCError(err) } @@ -54,9 +61,11 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac func processTraceRequest( ctx context.Context, router *Router, - batch []huskyotlp.Event, + batches []huskyotlp.Batch, apiKey string, - datasetName string) error { + datasetName string, + token string, + tenantId string) error { var requestID types.RequestIDContextKey apiHost, err := router.Config.GetHoneycombAPI() @@ -65,18 +74,22 @@ func processTraceRequest( return err } - for _, ev := range batch { - event := &types.Event{ - Context: ctx, - APIHost: apiHost, - APIKey: apiKey, - Dataset: datasetName, - SampleRate: uint(ev.SampleRate), - Timestamp: ev.Timestamp, - Data: ev.Attributes, - } - if err = router.processEvent(event, requestID); err != nil { - router.Logger.Error().Logf("Error processing event: " + err.Error()) + for _, batch := range batches { + for _, ev := range batch.Events { + event := &types.Event{ + Context: ctx, + APIHost: apiHost, + APIKey: apiKey, + APIToken: token, + APITenantId: tenantId, + Dataset: datasetName, + SampleRate: uint(ev.SampleRate), + Timestamp: ev.Timestamp, + Data: ev.Attributes, + } + if err = router.processEvent(event, requestID); err != nil { + router.Logger.Error().Logf("Error processing event: " + err.Error()) + } } } diff --git a/route/otlp_trace_test.go b/route/otlp_trace_test.go index 9a9381bfce..05204bcbc4 100644 --- a/route/otlp_trace_test.go +++ b/route/otlp_trace_test.go @@ -13,10 +13,10 @@ import ( "github.com/golang/protobuf/proto" huskyotlp "github.com/honeycombio/husky/otlp" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/transmit" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/transmit" "github.com/klauspost/compress/zstd" "github.com/stretchr/testify/assert" collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" @@ -296,7 +296,7 @@ func TestOTLPHandler(t *testing.T) { w := httptest.NewRecorder() router.postOTLP(w, request) assert.Equal(t, w.Code, http.StatusNotImplemented) - assert.Equal(t, `{"source":"refinery","error":"invalid content-type - only 'application/protobuf' is supported"}`, string(w.Body.String())) + assert.Equal(t, `{"source":"tracing-proxy","error":"invalid content-type - only 'application/protobuf' is supported"}`, string(w.Body.String())) assert.Equal(t, 0, len(mockTransmission.Events)) mockTransmission.Flush() diff --git a/route/route.go b/route/route.go index d0f7ee8005..8f3a0840cd 100644 --- a/route/route.go +++ b/route/route.go @@ -27,13 +27,13 @@ import ( // grpc/gzip compressor, auto registers on import _ "google.golang.org/grpc/encoding/gzip" - "github.com/honeycombio/refinery/collect" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/sharder" - "github.com/honeycombio/refinery/transmit" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/collect" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/sharder" + "github.com/jirs5/tracing-proxy/transmit" + "github.com/jirs5/tracing-proxy/types" collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" ) @@ -243,7 +243,7 @@ func (r *Router) Stop() error { func (r *Router) alive(w http.ResponseWriter, req *http.Request) { r.iopLogger.Debug().Logf("answered /x/alive check") - w.Write([]byte(`{"source":"refinery","alive":"yes"}`)) + w.Write([]byte(`{"source":"tracing-proxy","alive":"yes"}`)) } func (r *Router) panic(w http.ResponseWriter, req *http.Request) { @@ -251,7 +251,7 @@ func (r *Router) panic(w http.ResponseWriter, req *http.Request) { } func (r *Router) version(w http.ResponseWriter, req *http.Request) { - w.Write([]byte(fmt.Sprintf(`{"source":"refinery","version":"%s"}`, r.versionStr))) + w.Write([]byte(fmt.Sprintf(`{"source":"tracing-proxy","version":"%s"}`, r.versionStr))) } func (r *Router) debugTrace(w http.ResponseWriter, req *http.Request) { diff --git a/route/route_test.go b/route/route_test.go index 47b90364fe..ebe7ff4e78 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -13,14 +13,14 @@ import ( "time" "github.com/facebookgo/inject" - "github.com/honeycombio/refinery/collect" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/transmit" + "github.com/jirs5/tracing-proxy/collect" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/transmit" "github.com/gorilla/mux" - "github.com/honeycombio/refinery/sharder" + "github.com/jirs5/tracing-proxy/sharder" "github.com/klauspost/compress/zstd" "github.com/vmihailenco/msgpack/v4" "google.golang.org/grpc/metadata" diff --git a/rules_complete.toml b/rules_complete.toml index e3711d989a..ee27ae4558 100644 --- a/rules_complete.toml +++ b/rules_complete.toml @@ -4,10 +4,10 @@ # DryRun - If enabled, marks traces that would be dropped given current sampling rules, # and sends all traces regardless -# DryRun = false +DryRun = true # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept -# DryRunFieldName = "refinery_kept" +DryRunFieldName = "fromProxy" # DeterministicSampler is a section of the config for manipulating the # Deterministic Sampler implementation. This is the simplest sampling algorithm diff --git a/sample/deterministic.go b/sample/deterministic.go index cfdd2b816a..c1b8ab848e 100644 --- a/sample/deterministic.go +++ b/sample/deterministic.go @@ -4,9 +4,9 @@ import ( "crypto/sha1" "math" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/types" ) // shardingSalt is a random bit to make sure we don't shard the same as any diff --git a/sample/deterministic_test.go b/sample/deterministic_test.go index 01d1f4af53..970ca451ca 100644 --- a/sample/deterministic_test.go +++ b/sample/deterministic_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sample @@ -8,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/types" ) // TestInitialization tests that sample rates are consistently returned diff --git a/sample/dynamic.go b/sample/dynamic.go index eda9e3d3ec..ca59685543 100644 --- a/sample/dynamic.go +++ b/sample/dynamic.go @@ -5,10 +5,10 @@ import ( dynsampler "github.com/honeycombio/dynsampler-go" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" ) type DynamicSampler struct { diff --git a/sample/dynamic_ema.go b/sample/dynamic_ema.go index 1fd44d8b21..31a323b814 100644 --- a/sample/dynamic_ema.go +++ b/sample/dynamic_ema.go @@ -5,10 +5,10 @@ import ( dynsampler "github.com/honeycombio/dynsampler-go" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" ) type EMADynamicSampler struct { diff --git a/sample/dynamic_ema_test.go b/sample/dynamic_ema_test.go index c295938038..b1a156e6e7 100644 --- a/sample/dynamic_ema_test.go +++ b/sample/dynamic_ema_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sample @@ -5,10 +6,10 @@ package sample import ( "testing" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/sample/dynamic_test.go b/sample/dynamic_test.go index f472d234d3..550e216071 100644 --- a/sample/dynamic_test.go +++ b/sample/dynamic_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sample @@ -5,10 +6,10 @@ package sample import ( "testing" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/sample/rules.go b/sample/rules.go index 3a56d24040..6d938eed3d 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -4,10 +4,10 @@ import ( "math/rand" "strings" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" ) type RulesBasedSampler struct { diff --git a/sample/rules_test.go b/sample/rules_test.go index 6bcb5b747d..95b57a0e19 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sample @@ -5,10 +6,10 @@ package sample import ( "testing" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/sample/sample.go b/sample/sample.go index eef4337ca5..ac05d7f31b 100644 --- a/sample/sample.go +++ b/sample/sample.go @@ -3,10 +3,10 @@ package sample import ( "os" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" ) type Sampler interface { diff --git a/sample/sample_test.go b/sample/sample_test.go index 53fad51b40..70c8cf86a8 100644 --- a/sample/sample_test.go +++ b/sample/sample_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sample @@ -6,9 +7,9 @@ import ( "testing" "github.com/facebookgo/inject" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" ) func TestDependencyInjection(t *testing.T) { diff --git a/sample/totalthroughput.go b/sample/totalthroughput.go index d31ca68b81..25629e2d75 100644 --- a/sample/totalthroughput.go +++ b/sample/totalthroughput.go @@ -5,10 +5,10 @@ import ( dynsampler "github.com/honeycombio/dynsampler-go" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" ) type TotalThroughputSampler struct { diff --git a/sample/totalthroughput_test.go b/sample/totalthroughput_test.go index edef7d42a0..d5567a4aa2 100644 --- a/sample/totalthroughput_test.go +++ b/sample/totalthroughput_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sample @@ -5,10 +6,10 @@ package sample import ( "testing" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/sample/trace_key.go b/sample/trace_key.go index 80d0c6f1e5..45e5f206fa 100644 --- a/sample/trace_key.go +++ b/sample/trace_key.go @@ -5,7 +5,7 @@ import ( "sort" "strconv" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/types" ) type traceKey struct { diff --git a/sample/trace_key_test.go b/sample/trace_key_test.go index e074b8fce8..1448ecafb2 100644 --- a/sample/trace_key_test.go +++ b/sample/trace_key_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sample @@ -5,7 +6,7 @@ package sample import ( "testing" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/service/debug/debug_service.go b/service/debug/debug_service.go index c9f743bd38..3de36d48b0 100644 --- a/service/debug/debug_service.go +++ b/service/debug/debug_service.go @@ -14,7 +14,7 @@ import ( "sync" "syscall" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" metrics "github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics/exp" "github.com/sirupsen/logrus" diff --git a/sharder/deterministic.go b/sharder/deterministic.go index 8b77ecb135..2da822af82 100644 --- a/sharder/deterministic.go +++ b/sharder/deterministic.go @@ -10,9 +10,9 @@ import ( "sync" "time" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/internal/peer" - "github.com/honeycombio/refinery/logger" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/internal/peer" + "github.com/jirs5/tracing-proxy/logger" "github.com/pkg/errors" ) @@ -123,7 +123,7 @@ func (d *DeterministicSharder) Start() error { } // go through peer list, resolve each address, see if any of them match any - // local interface. Note that this assumes only one instance of Refinery per + // local interface. Note that this assumes only one instance of tracing-proxy per // host can run. for i, peerShard := range d.peers { d.Logger.Debug().WithField("peer", peerShard).WithField("self", localAddrs).Logf("Considering peer looking for self") diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go index 49336a8f5e..88189abb9e 100644 --- a/sharder/deterministic_test.go +++ b/sharder/deterministic_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sharder @@ -5,9 +6,9 @@ package sharder import ( "testing" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/internal/peer" - "github.com/honeycombio/refinery/logger" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/internal/peer" + "github.com/jirs5/tracing-proxy/logger" "github.com/stretchr/testify/assert" ) diff --git a/sharder/sharder.go b/sharder/sharder.go index 5349ec792b..548476249d 100644 --- a/sharder/sharder.go +++ b/sharder/sharder.go @@ -4,14 +4,14 @@ import ( "fmt" "os" - "github.com/honeycombio/refinery/config" + "github.com/jirs5/tracing-proxy/config" ) -// Shard repreesents a single instance of Refinery. +// Shard repreesents a single instance of tracing-proxy. type Shard interface { Equals(Shard) bool // GetAddress returns a string suitable for use in building a URL, eg - // http://refinery-1234:8080 or https://10.2.3.4 + // http://tracing-proxy-1234:8080 or https://10.2.3.4 GetAddress() string } diff --git a/sharder/single.go b/sharder/single.go index e2003a29df..fd1900b8fc 100644 --- a/sharder/single.go +++ b/sharder/single.go @@ -1,7 +1,7 @@ package sharder import ( - "github.com/honeycombio/refinery/logger" + "github.com/jirs5/tracing-proxy/logger" ) // SingleShard implements the Shard interface diff --git a/transmit/mock.go b/transmit/mock.go index c018131a96..e001978ff1 100644 --- a/transmit/mock.go +++ b/transmit/mock.go @@ -3,7 +3,7 @@ package transmit import ( "sync" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/types" ) type MockTransmission struct { diff --git a/transmit/transmit.go b/transmit/transmit.go index fa46711211..b8ae6cbbba 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -5,13 +5,13 @@ import ( "os" "sync" - libhoney "github.com/honeycombio/libhoney-go" + libtrace "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" - "github.com/honeycombio/refinery/types" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" + "github.com/jirs5/tracing-proxy/types" ) type Transmission interface { @@ -33,12 +33,12 @@ type DefaultTransmission struct { Logger logger.Logger `inject:""` Metrics metrics.Metrics `inject:"metrics"` Version string `inject:"version"` - LibhClient *libhoney.Client + LibhClient *libtrace.Client // Type is peer or upstream, and used only for naming metrics Name string - builder *libhoney.Builder + builder *libtrace.Builder responseCanceler context.CancelFunc } @@ -58,7 +58,7 @@ func (d *DefaultTransmission) Start() error { if d.Config.GetAddHostMetadataToTrace() { if hostname, err := os.Hostname(); err == nil && hostname != "" { // add hostname to spans - d.LibhClient.AddField("meta.refinery.local_hostname", hostname) + d.LibhClient.AddField("meta.tracing-proxy.local_hostname", hostname) } } @@ -66,7 +66,7 @@ func (d *DefaultTransmission) Start() error { d.builder.APIHost = upstreamAPI once.Do(func() { - libhoney.UserAgentAddition = "refinery/" + d.Version + libtrace.UserAgentAddition = "tracing-proxy/" + d.Version }) d.Metrics.Register(d.Name+counterEnqueueErrors, "counter") @@ -105,6 +105,8 @@ func (d *DefaultTransmission) EnqueueEvent(ev *types.Event) { libhEv.Dataset = ev.Dataset libhEv.SampleRate = ev.SampleRate libhEv.Timestamp = ev.Timestamp + libhEv.APIToken = ev.APIToken + libhEv.APITenantId = ev.APITenantId for k, v := range ev.Data { libhEv.AddField(k, v) diff --git a/transmit/transmit_test.go b/transmit/transmit_test.go index efb0a954b0..a69015500b 100644 --- a/transmit/transmit_test.go +++ b/transmit/transmit_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package transmit @@ -6,11 +7,11 @@ import ( "testing" "github.com/facebookgo/inject" - "github.com/honeycombio/refinery/config" - "github.com/honeycombio/refinery/logger" - "github.com/honeycombio/refinery/metrics" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" + "github.com/jirs5/tracing-proxy/metrics" - libhoney "github.com/honeycombio/libhoney-go" + libtrace "github.com/honeycombio/libhoney-go" "github.com/stretchr/testify/assert" ) @@ -19,14 +20,14 @@ func TestDefaultTransmissionUpdatesUserAgentAdditionAfterStart(t *testing.T) { Config: &config.MockConfig{}, Logger: &logger.NullLogger{}, Metrics: &metrics.NullMetrics{}, - LibhClient: &libhoney.Client{}, + LibhClient: &libtrace.Client{}, Version: "test", } - assert.Equal(t, libhoney.UserAgentAddition, "") + assert.Equal(t, libtrace.UserAgentAddition, "") err := transmission.Start() assert.Nil(t, err) - assert.Equal(t, libhoney.UserAgentAddition, "refinery/test") + assert.Equal(t, libtrace.UserAgentAddition, "tracing-proxy/test") } func TestDependencyInjection(t *testing.T) { diff --git a/types/event.go b/types/event.go index 0a1115155c..29ff844fa1 100644 --- a/types/event.go +++ b/types/event.go @@ -7,7 +7,7 @@ import ( const ( APIKeyHeader = "X-Honeycomb-Team" - // libhoney-js uses this + // libtrace-js uses this APIKeyHeaderShort = "X-Hny-Team" DatasetHeader = "X-Honeycomb-Dataset" SampleRateHeader = "X-Honeycomb-Samplerate" @@ -19,17 +19,19 @@ type RequestIDContextKey struct{} // event is not part of a trace - it's an event that showed up with no trace ID type Event struct { - Context context.Context - APIHost string - APIKey string - Dataset string - SampleRate uint - Timestamp time.Time - Data map[string]interface{} + Context context.Context + APIHost string + APIKey string + APIToken string + APITenantId string + Dataset string + SampleRate uint + Timestamp time.Time + Data map[string]interface{} } // Trace isn't something that shows up on the wire; it gets created within -// Refinery. Traces are not thread-safe; only one goroutine should be working +// tracing-proxy. Traces are not thread-safe; only one goroutine should be working // with a trace object at a time. type Trace struct { APIHost string @@ -47,7 +49,7 @@ type Trace struct { SendBy time.Time // StartTime is the server time when the first span arrived for this trace. - // Used to calculate how long traces spend sitting in Refinery + // Used to calculate how long traces spend sitting in tracing-proxy StartTime time.Time HasRootSpan bool From 4dafb0fd30875b0819040652a103768bbef2e06f Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Fri, 25 Mar 2022 14:33:51 +0530 Subject: [PATCH 128/351] adding deployement files for kubernetes --- cmd/tracing-proxy/main.go | 2 +- deployment/kubernetes/k8s-config-cm.yaml | 320 ++++++++++++++++++++++ deployment/kubernetes/k8s-deployment.yaml | 64 +++++ deployment/kubernetes/k8s-rules-cm.yaml | 266 ++++++++++++++++++ 4 files changed, 651 insertions(+), 1 deletion(-) create mode 100644 deployment/kubernetes/k8s-config-cm.yaml create mode 100644 deployment/kubernetes/k8s-deployment.yaml create mode 100644 deployment/kubernetes/k8s-rules-cm.yaml diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index ace7b783c1..41d63b4ad2 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -33,7 +33,7 @@ var BuildID string var version string type Options struct { - ConfigFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/tracing-proxy/tracing-proxy.toml"` + ConfigFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/tracing-proxy/config.toml"` RulesFile string `short:"r" long:"rules_config" description:"Path to rules config file" default:"/etc/tracing-proxy/rules.toml"` Version bool `short:"v" long:"version" description:"Print version number and exit"` Debug bool `short:"d" long:"debug" description:"If enabled, runs debug service (runs on the first open port between localhost:6060 and :6069 by default)"` diff --git a/deployment/kubernetes/k8s-config-cm.yaml b/deployment/kubernetes/k8s-config-cm.yaml new file mode 100644 index 0000000000..ad8fd4a4f2 --- /dev/null +++ b/deployment/kubernetes/k8s-config-cm.yaml @@ -0,0 +1,320 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: opsramp-tracing-proxy-config + namespace: opsramp-tracing-proxy +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: opsramp-tracing-proxy-config + labels: + name: opsramp-tracing-proxy-config + namespace: opsramp-tracing-proxy +data: + config.toml: |- + ##################### + ## Refinery Config ## + ##################### + + # ListenAddr is the IP and port on which to listen for incoming events. Incoming + # traffic is expected to be HTTP, so if using SSL put something like nginx in + # front to do the decryption. + # Should be of the form 0.0.0.0:8080 + # Not eligible for live reload. + ListenAddr = "0.0.0.0:8082" + + # GRPCListenAddr is the IP and port on which to listen for incoming events over + # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put + # something like nginx in front to do the decryption. + # Should be of the form 0.0.0.0:9090 + # Not eligible for live reload. + GRPCListenAddr = "0.0.0.0:9090" + + # PeerListenAddr is the IP and port on which to listen for traffic being + # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL + # put something like nginx in front to do the decryption. Must be different from + # ListenAddr + # Should be of the form 0.0.0.0:8081 + # Not eligible for live reload. + PeerListenAddr = "0.0.0.0:8083" + + # CompressPeerCommunication determines whether refinery will compress span data + # it forwards to peers. If it costs money to transmit data between refinery + # instances (e.g. they're spread across AWS availability zones), then you + # almost certainly want compression enabled to reduce your bill. The option to + # disable it is provided as an escape hatch for deployments that value lower CPU + # utilization over data transfer costs. + CompressPeerCommunication = true + + # APIKeys is a list of Honeycomb API keys that the proxy will accept. This list + # only applies to events - other Honeycomb API actions will fall through to the + # upstream API directly. + # Adding keys here causes events arriving with API keys not in this list to be + # rejected with an HTTP 401 error If an API key that is a literal '*' is in the + # list, all API keys are accepted. + # Eligible for live reload. + APIKeys = [ + # "replace-me", + # "more-optional-keys", + "*", # wildcard accept all keys + ] + + # HoneycombAPI is the URL for the upstream Honeycomb API. + # Eligible for live reload. + #HoneycombAPI = "localhost:50052" + HoneycombAPI = "https://asura.opsramp.net" + + # SendDelay is a short timer that will be triggered when a trace is complete. + # Refinery will wait this duration before actually sending the trace. The + # reason for this short delay is to allow for small network delays or clock + # jitters to elapse and any final spans to arrive before actually sending the + # trace. This supports duration strings with supplied units. Set to 0 for + # immediate sends. + # Eligible for live reload. + SendDelay = "2s" + + # TraceTimeout is a long timer; it represents the outside boundary of how long + # to wait before sending an incomplete trace. Normally traces are sent when the + # root span arrives. Sometimes the root span never arrives (due to crashes or + # whatever), and this timer will send a trace even without having received the + # root span. If you have particularly long-lived traces you should increase this + # timer. This supports duration strings with supplied units. + # Eligible for live reload. + TraceTimeout = "60s" + + # MaxBatchSize is the number of events to be included in the batch for sending + MaxBatchSize = 500 + + # SendTicker is a short timer; it determines the duration to use to check for traces to send + SendTicker = "100ms" + + # LoggingLevel is the level above which we should log. Debug is very verbose, + # and should only be used in pre-production environments. Info is the + # recommended level. Valid options are "debug", "info", "error", and + # "panic" + # Not eligible for live reload. + LoggingLevel = "debug" + + # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use + # when buffering events that will be forwarded to peers or the upstream API. + UpstreamBufferSize = 10000 + PeerBufferSize = 10000 + + # DebugServiceAddr sets the IP and port the debug service will run on + # The debug service will only run if the command line flag -d is specified + # The debug service runs on the first open port between localhost:6060 and :6069 by default + # DebugServiceAddr = "localhost:8085" + + # AddHostMetadataToTrace determines whether or not to add information about + # the host that Refinery is running on to the spans that it processes. + # If enabled, information about the host will be added to each span with the + # prefix `meta.refinery.`. + # Currently the only value added is 'meta.refinery.local_hostname'. + # Not eligible for live reload + AddHostMetadataToTrace = false + + ############################ + ## Implementation Choices ## + ############################ + + # Each of the config options below chooses an implementation of a Refinery + # component to use. Depending on the choice there may be more configuration + # required below in the section for that choice. Changing implementation choices + # requires a process restart; these changes will not be picked up by a live + # config reload. (Individual config options for a given implementation may be + # eligible for live reload). + + # Collector describes which collector to use for collecting traces. The only + # current valid option is "InMemCollector".. More can be added by adding + # implementations of the Collector interface. + Collector = "InMemCollector" + + # Logger describes which logger to use for Refinery logs. Valid options are + # "logrus" and "honeycomb". The logrus option will write logs to STDOUT and the + # honeycomb option will send them to a Honeycomb dataset. + Logger = "logrus" + + # Metrics describes which service to use for Refinery metrics. Valid options are + # "prometheus" and "honeycomb". The prometheus option starts a listener that + # will reply to a request for /metrics. The honeycomb option will send summary + # metrics to a Honeycomb dataset. + Metrics = "prometheus" + + ######################### + ## Peer Management ## + ######################### + + [PeerManagement] + Type = "file" + # Peers is the list of all servers participating in this proxy cluster. Events + # will be sharded evenly across all peers based on the Trace ID. Values here + # should be the base URL used to access the peer, and should include scheme, + # hostname (or ip address) and port. All servers in the cluster should be in + # this list, including this host. + Peers = [ + "http://127.0.0.1:8083", + # "http://127.0.0.1:8083", + # "http://10.1.2.3.4:8080", + # "http://refinery-1231:8080", + # "http://peer-3.fqdn" // assumes port 80 + ] + + # [PeerManagement] + # Type = "redis" + # RedisHost is is used to connect to redis for peer cluster membership management. + # Further, if the environment variable 'REFINERY_REDIS_HOST' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + # RedisHost = "localhost:6379" + + # RedisPassword is the password used to connect to redis for peer cluster membership management. + # If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + # RedisPassword = "" + + # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + # Not eligible for live reload. + # UseTLS = false + + # UseTLSInsecure disables certificate checks + # Not eligible for live reload. + # UseTLSInsecure = false + + # IdentifierInterfaceName is optional. By default, when using RedisHost, Refinery will use + # the local hostname to identify itself to other peers in Redis. If your environment + # requires that you use IPs as identifiers (for example, if peers can't resolve eachother + # by name), you can specify the network interface that Refinery is listening on here. + # Refinery will use the first unicast address that it finds on the specified network + # interface as its identifier. + # Not eligible for live reload. + # IdentifierInterfaceName = "eth0" + + # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Refinery will default to the first + # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + # the first IPV6 unicast address found. + # UseIPV6Identifier = false + + # RedisIdentifier is optional. By default, when using RedisHost, Refinery will use + # the local hostname to identify itself to other peers in Redis. If your environment + # requires that you use IPs as identifiers (for example, if peers can't resolve eachother + # by name), you can specify the exact identifier (IP address, etc) to use here. + # Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. + # RedisIdentifier = "192.168.1.1" + + ######################### + ## In-Memory Collector ## + ######################### + + # InMemCollector brings together all the settings that are relevant to + # collecting spans together to make traces. + [InMemCollector] + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + # Eligible for live reload. Growing the cache capacity with a live config reload + # is fine. Avoid shrinking it with a live reload (you can, but it may cause + # temporary odd sampling decisions). + CacheCapacity = 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are + # supported. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. + MaxAlloc = 0 + + ################### + ## Logrus Logger ## + ################### + + # LogrusLogger is a section of the config only used if you are using the + # LogrusLogger to send all logs to STDOUT using the logrus package. If you are + # using a different logger (eg honeycomb logger) you can leave all this + # commented out. + [LogrusLogger] + + # logrus logger currently has no options! + + ###################### + ## Honeycomb Logger ## + ###################### + + # HoneycombLogger is a section of the config only used if you are using the + # HoneycombLogger to send all logs to a Honeycomb Dataset. If you are using a + # different logger (eg file-based logger) you can leave all this commented out. + + [HoneycombLogger] + + # LoggerHoneycombAPI is the URL for the upstream Honeycomb API. + # Eligible for live reload. + LoggerHoneycombAPI = "https://api.honeycomb.io" + + # LoggerAPIKey is the API key to use to send log events to the Honeycomb logging + # dataset. This is separate from the APIKeys used to authenticate regular + # traffic. + # Eligible for live reload. + LoggerAPIKey = "abcd1234" + + # LoggerDataset is the name of the dataset to which to send Refinery logs + # Eligible for live reload. + LoggerDataset = "Refinery Logs" + + # LoggerSamplerEnabled enables a PerKeyThroughput dynamic sampler for log messages. + # This will sample log messages based on [log level:message] key on a per second throughput basis. + # Not eligible for live reload. + LoggerSamplerEnabled = true + + # LoggerSamplerThroughput is the per key per second throughput for the log message dynamic sampler. + # Not eligible for live reload. + LoggerSamplerThroughput = 10 + + ####################### + ## Honeycomb Metrics ## + ####################### + + # HoneycombMetrics is a section of the config only used if you are using the + # HoneycombMetrics to send all metrics to a Honeycomb Dataset. If you are using a + # different metrics service (eg prometheus or metricsd) you can leave all this + # commented out. + + [HoneycombMetrics] + + # MetricsHoneycombAPI is the URL for the upstream Honeycomb API. + # Eligible for live reload. + MetricsHoneycombAPI = "https://api.honeycomb.io" + + # MetricsAPIKey is the API key to use to send log events to the Honeycomb logging + # dataset. This is separate from the APIKeys used to authenticate regular + # traffic. + # Eligible for live reload. + MetricsAPIKey = "abcd1234" + + # MetricsDataset is the name of the dataset to which to send Refinery metrics + # Eligible for live reload. + MetricsDataset = "Refinery Metrics" + + # MetricsReportingInterval is the frequency (in seconds) to send metric events + # to Honeycomb. Between 1 and 60 is recommended. + # Not eligible for live reload. + MetricsReportingInterval = 3 + + + #####################@## + ## Prometheus Metrics ## + #####################@## + + [PrometheusMetrics] + + # MetricsListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Refinery + # listener. + # Not eligible for live reload. + MetricsListenAddr = "localhost:2112" diff --git a/deployment/kubernetes/k8s-deployment.yaml b/deployment/kubernetes/k8s-deployment.yaml new file mode 100644 index 0000000000..d94b097d00 --- /dev/null +++ b/deployment/kubernetes/k8s-deployment.yaml @@ -0,0 +1,64 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: opsramp-tracing-proxy +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: opsramp-tracing-proxy + namespace: opsramp-tracing-proxy + labels: + app: opsramp-tracing-proxy +spec: + replicas: 1 + selector: + matchLabels: + app: opsramp-tracing-proxy + template: + metadata: + labels: + app: opsramp-tracing-proxy + spec: + containers: + - name: opsramp-tracing-proxy + image: lokeshopsramp/tracing-proxy + ports: + - containerPort: 9090 + resources: + requests: + memory: "2048Mi" + cpu: "2" + limits: + memory: "8096Mi" + cpu: "4" + volumeMounts: + - name: opsramp-tracing-rules + mountPath: /etc/tracing-proxy/rules.toml + subPath: rules.toml + readOnly: true + - name: opsramp-tracing-config + mountPath: /etc/tracing-proxy/config.toml + subPath: config.toml + readOnly: true + volumes: + - configMap: + name: opsramp-tracing-proxy-rules + name: opsramp-tracing-rules + - configMap: + name: opsramp-tracing-proxy-config + name: opsramp-tracing-config + +--- +apiVersion: v1 +kind: Service +metadata: + name: opsramp-tracing-proxy + namespace: opsramp-tracing-proxy +spec: + selector: + app: opsramp-tracing-proxy + ports: + - protocol: TCP + port: 9090 + targetPort: 9090 diff --git a/deployment/kubernetes/k8s-rules-cm.yaml b/deployment/kubernetes/k8s-rules-cm.yaml new file mode 100644 index 0000000000..4f4276e02b --- /dev/null +++ b/deployment/kubernetes/k8s-rules-cm.yaml @@ -0,0 +1,266 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: opsramp-tracing-proxy-rules + namespace: opsramp-tracing-proxy +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: opsramp-tracing-proxy-rules + labels: + name: opsramp-tracing-proxy-rules + namespace: opsramp-tracing-proxy +data: + rules.toml: |- + ############################ + ## Sampling Rules Config ## + ############################ + + # DryRun - If enabled, marks traces that would be dropped given current sampling rules, + # and sends all traces regardless + DryRun = true + + # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept + DryRunFieldName = "fromProxy" + + # DeterministicSampler is a section of the config for manipulating the + # Deterministic Sampler implementation. This is the simplest sampling algorithm + # - it is a static sample rate, choosing traces randomly to either keep or send + # (at the appropriate rate). It is not influenced by the contents of the trace. + Sampler = "DeterministicSampler" + + # SampleRate is the rate at which to sample. It indicates a ratio, where one + # sample trace is kept for every n traces seen. For example, a SampleRate of 30 + # will keep 1 out of every 30 traces. The choice on whether to keep any specific + # trace is random, so the rate is approximate. + # Eligible for live reload. + SampleRate = 1 + + [dataset1] + + # Note: If your dataset name contains a space, you will have to escape the dataset name + # using single quotes, such as ['dataset 1'] + + # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler + # implementation. This sampler collects the values of a number of fields from a + # trace and uses them to form a key. This key is handed to the standard dynamic + # sampler algorithm which generates a sample rate based on the frequency with + # which that key has appeared in the previous ClearFrequencySec seconds. See + # https://github.com/honeycombio/dynsampler-go for more detail on the mechanics + # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from + # that package. + Sampler = "DynamicSampler" + + # SampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + # Eligible for live reload. + SampleRate = 2 + + # FieldList is a list of all the field names to use to form the key that will be + # handed to the dynamic sampler. The cardinality of the combination of values + # from all of these keys should be reasonable in the face of the frequency of + # those keys. If the combination of fields in these keys essentially makes them + # unique, the dynamic sampler will do no sampling. If the keys have too few + # values, you won't get samples of the most interesting traces. A good key + # selection will have consistent values for high frequency boring traffic and + # unique values for outliers and interesting traffic. Including an error field + # (or something like HTTP status code) is an excellent choice. As an example, + # assuming 30 or so endpoints, a combination of HTTP endpoint and status code + # would be a good set of keys in order to let you see accurately use of all + # endpoints and call out when there is failing traffic to any endpoint. Field + # names may come from any span in the trace. + # Eligible for live reload. + FieldList = ["request.method","response.status_code"] + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Honeycomb, set this to true. + # Eligible for live reload. + UseTraceLength = true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace = true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # ClearFrequencySec is the name of the field the sampler will use to determine + # the period over which it will calculate the sample rate. This setting defaults + # to 30. + # Eligible for live reload. + ClearFrequencySec = 60 + + [dataset2] + + # EMADynamicSampler is a section of the config for manipulating the Exponential + # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, + # it attempts to average a given sample rate, weighting rare traffic and frequent + # traffic differently so as to end up with the correct average. + # + # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended + # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs + # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential + # Moving Average of counts seen per key, and adjusts this average at regular intervals. + # The weight applied to more recent intervals is defined by `weight`, a number between + # (0, 1) - larger values weight the average more toward recent observations. In other words, + # a larger weight will cause sample rates more quickly adapt to traffic patterns, + # while a smaller weight will result in sample rates that are less sensitive to bursts or drops + # in traffic and thus more consistent over time. + # + # Keys that are not found in the EMA will always have a sample + # rate of 1. Keys that occur more frequently will be sampled on a logarithmic + # curve. In other words, every key will be represented at least once in any + # given window and more frequent keys will have their sample rate + # increased proportionally to wind up with the goal sample rate. + Sampler = "EMADynamicSampler" + + # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + # Eligible for live reload. + GoalSampleRate = 2 + + # FieldList is a list of all the field names to use to form the key that will be + # handed to the dynamic sampler. The cardinality of the combination of values + # from all of these keys should be reasonable in the face of the frequency of + # those keys. If the combination of fields in these keys essentially makes them + # unique, the dynamic sampler will do no sampling. If the keys have too few + # values, you won't get samples of the most interesting traces. A good key + # selection will have consistent values for high frequency boring traffic and + # unique values for outliers and interesting traffic. Including an error field + # (or something like HTTP status code) is an excellent choice. As an example, + # assuming 30 or so endpoints, a combination of HTTP endpoint and status code + # would be a good set of keys in order to let you see accurately use of all + # endpoints and call out when there is failing traffic to any endpoint. Field + # names may come from any span in the trace. + # Eligible for live reload. + FieldList = ["request.method","response.status_code"] + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Honeycomb, set this to true. + # Eligible for live reload. + UseTraceLength = true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace = true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # AdjustmentInterval defines how often (in seconds) we adjust the moving average from + # recent observations. Default 15s + # Eligible for live reload. + AdjustmentInterval = 15 + + # Weight is a value between (0, 1) indicating the weighting factor used to adjust + # the EMA. With larger values, newer data will influence the average more, and older + # values will be factored out more quickly. In mathematical literature concerning EMA, + # this is referred to as the `alpha` constant. + # Default is 0.5 + # Eligible for live reload. + Weight = 0.5 + + # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. + # Once MaxKeys is reached, new keys will not be included in the sample rate map, but + # existing keys will continue to be be counted. You can use this to keep the sample rate + # map size under control. + # Eligible for live reload + MaxKeys = 0 + + # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key + # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to + # decide what constitutes "zero". Keys with averages below this threshold will be removed + # from the EMA. Default is the same as Weight, as this prevents a key with the smallest + # integer value (1) from being aged out immediately. This value should generally be <= Weight, + # unless you have very specific reasons to set it higher. + # Eligible for live reload + AgeOutValue = 0.5 + + # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define + # the burst detection threshold. If total counts observed for a given interval exceed the threshold + # EMA is updated immediately, rather than waiting on the AdjustmentInterval. + # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, + # burst detection will kick in. + # Eligible for live reload + BurstMultiple = 2.0 + + # BurstDetectionDelay indicates the number of intervals to run after Start is called before + # burst detection kicks in. + # Defaults to 3 + # Eligible for live reload + BurstDetectionDelay = 3 + + [dataset3] + + Sampler = "DeterministicSampler" + SampleRate = 10 + + [dataset4] + + Sampler = "RulesBasedSampler" + + [[dataset4.rule]] + name = "drop healtchecks" + drop = true + [[dataset4.rule.condition]] + field = "http.route" + operator = "=" + value = "/health-check" + + [[dataset4.rule]] + name = "keep slow 500 errors" + SampleRate = 1 + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = 500 + [[dataset4.rule.condition]] + field = "duration_ms" + operator = ">=" + value = 1000.789 + + [[dataset4.rule]] + name = "dynamically sample 200 responses" + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = 200 + [dataset4.rule.sampler.EMADynamicSampler] + Sampler = "EMADynamicSampler" + GoalSampleRate = 15 + FieldList = ["request.method", "request.route"] + AddSampleRateKeyToTrace = true + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + [[dataset4.rule]] + SampleRate = 10 # default when no rules match, if missing defaults to 10 + + [dataset5] + + Sampler = "TotalThroughputSampler" + GoalThroughputPerSec = 100 + FieldList = "[request.method]" + From c53f86ad0f116645d402a619536f145061876bd1 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Fri, 25 Mar 2022 15:11:37 +0530 Subject: [PATCH 129/351] fix in k8s-deployment --- deployment/kubernetes/k8s-deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deployment/kubernetes/k8s-deployment.yaml b/deployment/kubernetes/k8s-deployment.yaml index d94b097d00..e0955f71fc 100644 --- a/deployment/kubernetes/k8s-deployment.yaml +++ b/deployment/kubernetes/k8s-deployment.yaml @@ -23,6 +23,7 @@ spec: containers: - name: opsramp-tracing-proxy image: lokeshopsramp/tracing-proxy + command: ["/usr/bin/tracing-proxy"] ports: - containerPort: 9090 resources: From 3d02347c732ca47b38e151b32a45c291b07d8141 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Fri, 25 Mar 2022 17:55:11 +0530 Subject: [PATCH 130/351] fix in event attributes husky --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 03717c694c..410f803d9b 100644 --- a/go.mod +++ b/go.mod @@ -41,4 +41,4 @@ require ( replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 -replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220317114134-7b752389d9eb +replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220325122121-523bfbec37a7 diff --git a/go.sum b/go.sum index b7d1540862..774561db3d 100644 --- a/go.sum +++ b/go.sum @@ -269,8 +269,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jirs5/husky v0.9.1-0.20220317114134-7b752389d9eb h1:mWsrVcSHqkhd4oy6ht07rhb21nZsEzbrh8vrDxYJyQ4= -github.com/jirs5/husky v0.9.1-0.20220317114134-7b752389d9eb/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= +github.com/jirs5/husky v0.9.1-0.20220325122121-523bfbec37a7 h1:etPTWTKk/1S2dWAdOoW+c1smMqkrJwQZjkUoppbL/zc= +github.com/jirs5/husky v0.9.1-0.20220325122121-523bfbec37a7/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98 h1:SllAt3oySFffDLd9/T4uwE9x7JnGu6PD0T+H7gvWMLU= github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= From 99118c5055e625707998aad08c2acc79bd6a3df4 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 28 Mar 2022 09:47:08 +0530 Subject: [PATCH 131/351] adding bash to oci image for enabling shell access to the container --- Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index ca4bb18cc0..7770928b16 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM golang:alpine as builder -RUN apk update && apk add --no-cache git ca-certificates && update-ca-certificates +RUN apk update && apk add --no-cache git bash ca-certificates && update-ca-certificates ARG BUILD_ID=dev @@ -22,6 +22,8 @@ RUN CGO_ENABLED=0 \ FROM scratch +COPY --from-builder /bin/bash /bin/bash + COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy From 835e130ae9ca2fd96e75332cbe111c549e31260d Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 28 Mar 2022 09:49:22 +0530 Subject: [PATCH 132/351] fix typo in Dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 7770928b16..44f10bb0bd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,7 @@ RUN CGO_ENABLED=0 \ FROM scratch -COPY --from-builder /bin/bash /bin/bash +COPY --from=builder /bin/bash /bin/bash COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ From 6aaf96a9967f34b20730bc3379571a3f7df5278e Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 28 Mar 2022 10:50:53 +0530 Subject: [PATCH 133/351] changing Dockerfile to use alpine image & update k8s-deployment to have no cmd --- Dockerfile | 9 ++++++--- deployment/kubernetes/k8s-deployment.yaml | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 44f10bb0bd..4997100181 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,10 +20,13 @@ RUN CGO_ENABLED=0 \ -o tracing-proxy \ ./cmd/tracing-proxy -FROM scratch +FROM alpine:latest -COPY --from=builder /bin/bash /bin/bash +RUN apk update && apk add --no-cache bash ca-certificates && update-ca-certificates -COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ +COPY config_complete.toml /etc/tracing-proxy/config.toml +COPY rules_complete.toml /etc/tracing-proxy/rules.toml COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy + +CMD ["/usr/bin/tracing-proxy"] \ No newline at end of file diff --git a/deployment/kubernetes/k8s-deployment.yaml b/deployment/kubernetes/k8s-deployment.yaml index e0955f71fc..610ef92090 100644 --- a/deployment/kubernetes/k8s-deployment.yaml +++ b/deployment/kubernetes/k8s-deployment.yaml @@ -23,7 +23,7 @@ spec: containers: - name: opsramp-tracing-proxy image: lokeshopsramp/tracing-proxy - command: ["/usr/bin/tracing-proxy"] + imagePullPolicy: Always ports: - containerPort: 9090 resources: From 88b57aea79d10e0fba3d000e83aab37cd881dff0 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 28 Mar 2022 11:25:45 +0530 Subject: [PATCH 134/351] updated cmd parameters in dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 4997100181..eebb3e37a9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,4 +29,4 @@ COPY rules_complete.toml /etc/tracing-proxy/rules.toml COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy -CMD ["/usr/bin/tracing-proxy"] \ No newline at end of file +CMD ["/usr/bin/tracing-proxy", "--config", "/etc/tracing-proxy/config.toml", "--rules_config", "/etc/tracing-proxy/rules.toml"] \ No newline at end of file From 4d52be5829c1cd221720d5f44f3cda7907a51347 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Mon, 28 Mar 2022 14:46:00 +0530 Subject: [PATCH 135/351] fix for trace keys for sampler --- collect/collect.go | 6 +++++- route/route.go | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index 2ce8066f45..804f40c43b 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -392,7 +392,7 @@ func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, sp *types } func isRootSpan(sp *types.Span) bool { - parentID := sp.Data["trace.parent_id"] + parentID := sp.Data["traceParentID"] if parentID == nil { parentID = sp.Data["parentId"] if parentID == nil { @@ -428,6 +428,8 @@ func (i *InMemCollector) send(trace *types.Trace) { var sampler sample.Sampler var found bool + fmt.Println("Trying to get sampler for data set : ", trace.Dataset) + if sampler, found = i.datasetSamplers[trace.Dataset]; !found { sampler = i.SamplerFactory.GetSamplerImplementationForDataset(trace.Dataset) // save sampler for later @@ -439,6 +441,8 @@ func (i *InMemCollector) send(trace *types.Trace) { trace.SampleRate = rate trace.KeepSample = shouldSend + fmt.Println("Trace shouldSend for data set : ", trace.Dataset, shouldSend) + // record this decision in the sent record LRU for future spans sentRecord := traceSentRecord{ keep: shouldSend, diff --git a/route/route.go b/route/route.go index 8f3a0840cd..82f6343824 100644 --- a/route/route.go +++ b/route/route.go @@ -400,7 +400,7 @@ func (r *Router) processEvent(ev *types.Event, reqID interface{}) error { // extract trace ID, route to self or peer, pass on to collector // TODO make trace ID field configurable var traceID string - if trID, ok := ev.Data["trace.trace_id"]; ok { + if trID, ok := ev.Data["traceTraceID"]; ok { traceID = trID.(string) } else if trID, ok := ev.Data["traceId"]; ok { traceID = trID.(string) From f166bf889fc5b045a071a6c10d79a1070f5210d7 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Mon, 28 Mar 2022 15:43:12 +0530 Subject: [PATCH 136/351] fix for rule sampler condition check --- sample/rules.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/sample/rules.go b/sample/rules.go index 6d938eed3d..8c7f676255 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -69,7 +69,17 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b span: for _, span := range trace.GetSpans() { var match bool - value, exists := span.Data[condition.Field] + var value interface{} + var exists bool + + if spanAttributes, ok := span.Data["spanAttributes"]; ok && spanAttributes != nil { + value, exists = spanAttributes.(map[string]interface{})[condition.Field] + } + if !exists { + if resourceAttributes, ok := span.Data["resourceAttributes"]; ok && resourceAttributes != nil { + value, exists = resourceAttributes.(map[string]interface{})[condition.Field] + } + } switch exists { case true: From 98ee29021485a3486ca36803cdffe479f6c1e68f Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 28 Mar 2022 15:58:19 +0530 Subject: [PATCH 137/351] updated rules.go to respect spanAttributes, resourceAttributes, eventAttributes --- sample/rules.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/sample/rules.go b/sample/rules.go index 8c7f676255..7a6ca19ee4 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -72,13 +72,19 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b var value interface{} var exists bool - if spanAttributes, ok := span.Data["spanAttributes"]; ok && spanAttributes != nil { - value, exists = spanAttributes.(map[string]interface{})[condition.Field] + attributeMapKeys := []string{"spanAttributes", "resourceAttributes", "eventAttributes"} + + for _, attributeKey := range attributeMapKeys { + if attribute, ok := span.Data[attributeKey]; ok && attribute != nil { + value, exists = attribute.(map[string]interface{})[condition.Field] + if exists { + break + } + } } + if !exists { - if resourceAttributes, ok := span.Data["resourceAttributes"]; ok && resourceAttributes != nil { - value, exists = resourceAttributes.(map[string]interface{})[condition.Field] - } + value, exists = span.Data[condition.Field] } switch exists { From 24024bf42e2a8cb47b24e57acd884afa5d71b852 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Mon, 28 Mar 2022 16:28:08 +0100 Subject: [PATCH 138/351] Cache google ko deps between workflows (#424) --- .circleci/config.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d3ed5bfac4..15736e099f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -23,6 +23,16 @@ commands: go build -ldflags "-X main.BuildID=${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" \ -o $GOPATH/bin/refinery-<< parameters.os >>-<< parameters.arch >> \ ./cmd/refinery + setup_googleko: + steps: + - restore_cache: + keys: + - v1-googleko + - run: go install github.com/google/ko@latest + - save_cache: + key: v1-googleko + paths: + - /home/circleci/go/pkg/mod/ jobs: test: @@ -129,7 +139,7 @@ jobs: docker: - image: cimg/go:1.17 steps: - - run: go install github.com/google/ko@latest + - setup_googleko - checkout - setup_remote_docker - run: @@ -140,7 +150,7 @@ jobs: docker: - image: cimg/go:1.17 steps: - - run: go install github.com/google/ko@latest + - setup_googleko - checkout - setup_remote_docker - run: From 51348e5b95780d306182fd4392db91b103aa93e4 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Mon, 28 Mar 2022 17:03:33 +0100 Subject: [PATCH 139/351] Create checksums when building binaries (#423) --- .circleci/config.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 15736e099f..581fc1b3b6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -23,6 +23,9 @@ commands: go build -ldflags "-X main.BuildID=${CIRCLE_TAG:-${CIRCLE_SHA1:0:7}}" \ -o $GOPATH/bin/refinery-<< parameters.os >>-<< parameters.arch >> \ ./cmd/refinery + - run: | + sha256sum $GOPATH/bin/refinery-<< parameters.os >>-<< parameters.arch >> \ + > $GOPATH/bin/refinery-<< parameters.os >>-<< parameters.arch >>.checksum setup_googleko: steps: - restore_cache: From 48cd923ae0782a33cf55e4c7b9e06111cec14da5 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Mon, 28 Mar 2022 13:20:24 -0400 Subject: [PATCH 140/351] prepare 1.12.1 release (#425) --- CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9f3876e37..4ba1731067 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Refinery Changelog +## 1.12.1 2022-03-28 + +### Fixes + +- fix: error log event metadata (#422) | [@vreynolds](https://github.com/vreynolds) + +### Maintenance + +- Create checksums when building binaries (#423) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) +- Cache google ko deps between workflows (#424) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) + ## 1.12.0 2022-02-24 ### Enhancements From fa5cbd5ae1bee8525c31bdf8a3016b638b59ff10 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Tue, 29 Mar 2022 14:50:18 +0530 Subject: [PATCH 141/351] trace latency per operation metric added --- collect/collect.go | 26 ++++++++++++++++++ metrics/honeycomb.go | 8 ++++++ metrics/metrics.go | 2 ++ metrics/prometheus.go | 62 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 98 insertions(+) diff --git a/collect/collect.go b/collect/collect.go index 804f40c43b..5463604c6d 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -102,6 +102,7 @@ func (i *InMemCollector) Start() error { i.Metrics.Register("trace_send_dropped", "counter") i.Metrics.Register("trace_send_has_root", "counter") i.Metrics.Register("trace_send_no_root", "counter") + i.Metrics.RegisterWithDescriptionLabels("trace_operation_latency_ms", "gauge_labels", "Trace latency wrt each trace operation", []string{"operation"}) stc, err := lru.New(imcConfig.CacheCapacity * 5) // keep 5x ring buffer size if err != nil { @@ -327,6 +328,18 @@ func (i *InMemCollector) processSpan(sp *types.Span) { // create a new trace to hold it i.Metrics.Increment("trace_accepted") + //Add metrics for latency/duration per operation + /*val := make(map[string]interface{}) + if sp.Data != nil { + if spanName, ok := sp.Data["spanName"].(string); ok { + val [spanName] = sp.Data["durationMs"] + i.Metrics.GaugeWithLabels("trace_operation_latency_ms", val) + }else{ + fmt.Println("No Operation/Span Name found in trace") + } + + }*/ + timeout, err := i.Config.GetTraceTimeout() if err != nil { timeout = 60 * time.Second @@ -425,6 +438,19 @@ func (i *InMemCollector) send(trace *types.Trace) { i.Metrics.Increment("trace_send_no_root") } + //Add metrics for latency/duration per operation + val := make(map[string]interface{}) + for _, sp := range trace.GetSpans() { + if sp.Data != nil { + if spanName, ok := sp.Data["spanName"].(string); ok { + val[spanName] = sp.Data["durationMs"] + } else { + fmt.Println("No Operation/Span Name found in trace") + } + } + } + i.Metrics.GaugeWithLabels("trace_operation_latency_ms", "operation", val) + var sampler sample.Sampler var found bool diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go index 7dddde5d15..0297cdda9f 100644 --- a/metrics/honeycomb.go +++ b/metrics/honeycomb.go @@ -42,6 +42,14 @@ type HoneycombMetrics struct { prefix string } +func (h *HoneycombMetrics) GaugeWithLabels(name string, label string, val map[string]interface{}) { + panic("implement me") +} + +func (h *HoneycombMetrics) RegisterWithDescriptionLabels(name string, metricType string, desc string, labels []string) { + panic("implement me") +} + type counter struct { lock sync.Mutex name string diff --git a/metrics/metrics.go b/metrics/metrics.go index 43fa26d582..c8a604560a 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -14,6 +14,8 @@ type Metrics interface { Gauge(name string, val interface{}) Count(name string, n interface{}) Histogram(name string, obs interface{}) + RegisterWithDescriptionLabels(name string, metricType string, desc string, labels []string) + GaugeWithLabels(name string, label string, val map[string]interface{}) } func GetMetricsImplementation(c config.Config, prefix string) Metrics { diff --git a/metrics/prometheus.go b/metrics/prometheus.go index 3d7d7a0dac..f66aac1619 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -81,6 +81,54 @@ func (p *PromMetrics) Register(name string, metricType string) { p.metrics[name] = newmet } +// RegisterWithDescriptionLabels takes a name, a metric type, description, labels. The type should be one of "counter", +// "gauge", or "histogram" +func (p *PromMetrics) RegisterWithDescriptionLabels(name string, metricType string, desc string, labels []string) { + p.lock.Lock() + defer p.lock.Unlock() + + newmet, exists := p.metrics[name] + + // don't attempt to add the metric again as this will cause a panic + if exists { + return + } + + switch metricType { + case "counter": + newmet = promauto.NewCounter(prometheus.CounterOpts{ + Name: name, + Namespace: p.prefix, + Help: name, + }) + case "gauge": + newmet = promauto.NewGauge(prometheus.GaugeOpts{ + Name: name, + Namespace: p.prefix, + Help: name, + }) + case "histogram": + newmet = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: name, + Namespace: p.prefix, + Help: name, + // This is an attempt at a usable set of buckets for a wide range of metrics + // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous + Buckets: prometheus.ExponentialBuckets(1, 4, 16), + }) + case "gauge_labels": + + newmet = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: name, + Help: "Trace latency in ms group by operation", + }, + labels) + } + + p.metrics[name] = newmet +} + func (p *PromMetrics) Increment(name string) { p.lock.RLock() defer p.lock.RUnlock() @@ -121,3 +169,17 @@ func (p *PromMetrics) Histogram(name string, obs interface{}) { } } } + +func (p *PromMetrics) GaugeWithLabels(name string, label string, val map[string]interface{}) { + p.lock.RLock() + defer p.lock.RUnlock() + if gaugeIface, ok := p.metrics[name]; ok { + if gaugeVec, ok := gaugeIface.(*prometheus.GaugeVec); ok { + //gaugeVec.WithLabelValues() + for k, v := range val { + //gaugeVec.With(prometheus.Labels{"operation":k}).Set(ConvertNumeric(v)) + gaugeVec.With(prometheus.Labels{label: k}).Set(ConvertNumeric(v)) + } + } + } +} From 1f084536a566fd5fdba8e2e0b9559cc7b358032f Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Tue, 29 Mar 2022 17:10:36 +0530 Subject: [PATCH 142/351] added 4 metrics trace_operation_latency_ms trace_operations_failed trace_operations_succeeded trace_operations_total --- collect/collect.go | 72 +++++++++++++++++++++++++++++-------------- metrics/honeycomb.go | 6 +++- metrics/metrics.go | 40 +++++++++++++++++++++++- metrics/prometheus.go | 51 +++++++++++++++--------------- 4 files changed, 120 insertions(+), 49 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index 5463604c6d..f83fcf361b 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -102,7 +102,30 @@ func (i *InMemCollector) Start() error { i.Metrics.Register("trace_send_dropped", "counter") i.Metrics.Register("trace_send_has_root", "counter") i.Metrics.Register("trace_send_no_root", "counter") - i.Metrics.RegisterWithDescriptionLabels("trace_operation_latency_ms", "gauge_labels", "Trace latency wrt each trace operation", []string{"operation"}) + i.Metrics.RegisterWithDescriptionLabels( + "trace_operation_latency_ms", + "gauge", + "Trace latency wrt each trace operation", + []string{"service_name", "operation"}, + ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_operations_failed", + "counter", + "Number of Error events in spans wrt each trace operation", + []string{"service_name", "operation"}, + ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_operations_succeeded", + "counter", + "Number of Succeeded events in spans wrt each trace operation", + []string{"service_name", "operation"}, + ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_operations_total", + "counter", + "Total Number of events in spans wrt each trace operation", + []string{"service_name", "operation"}, + ) stc, err := lru.New(imcConfig.CacheCapacity * 5) // keep 5x ring buffer size if err != nil { @@ -328,18 +351,6 @@ func (i *InMemCollector) processSpan(sp *types.Span) { // create a new trace to hold it i.Metrics.Increment("trace_accepted") - //Add metrics for latency/duration per operation - /*val := make(map[string]interface{}) - if sp.Data != nil { - if spanName, ok := sp.Data["spanName"].(string); ok { - val [spanName] = sp.Data["durationMs"] - i.Metrics.GaugeWithLabels("trace_operation_latency_ms", val) - }else{ - fmt.Println("No Operation/Span Name found in trace") - } - - }*/ - timeout, err := i.Config.GetTraceTimeout() if err != nil { timeout = 60 * time.Second @@ -438,18 +449,33 @@ func (i *InMemCollector) send(trace *types.Trace) { i.Metrics.Increment("trace_send_no_root") } - //Add metrics for latency/duration per operation - val := make(map[string]interface{}) - for _, sp := range trace.GetSpans() { - if sp.Data != nil { - if spanName, ok := sp.Data["spanName"].(string); ok { - val[spanName] = sp.Data["durationMs"] - } else { - fmt.Println("No Operation/Span Name found in trace") - } + // Add metrics for latency/duration per operation + for _, span := range trace.GetSpans() { + if span.Data == nil { + continue + } + + labelToKeyMap := map[string]string{ + "service_name": "service.name", + "operation": "spanName", + } + + labels := metrics.ExtractLabelsFromSpan(span, labelToKeyMap) + + durationMsString, ok := span.Data["durationMs"] + if ok && durationMsString != nil { + i.Metrics.GaugeWithLabels("trace_operation_latency_ms", labels, metrics.ConvertNumeric(durationMsString)) + } + + errorStatus, ok := span.Data["error"] + if ok && errorStatus != nil && errorStatus.(bool) { + i.Metrics.IncrementWithLabels("trace_operations_failed", labels) + i.Metrics.IncrementWithLabels("trace_operations_total", labels) + } else { + i.Metrics.IncrementWithLabels("trace_operations_succeeded", labels) + i.Metrics.IncrementWithLabels("trace_operations_total", labels) } } - i.Metrics.GaugeWithLabels("trace_operation_latency_ms", "operation", val) var sampler sample.Sampler var found bool diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go index 0297cdda9f..eae1e616e3 100644 --- a/metrics/honeycomb.go +++ b/metrics/honeycomb.go @@ -42,7 +42,11 @@ type HoneycombMetrics struct { prefix string } -func (h *HoneycombMetrics) GaugeWithLabels(name string, label string, val map[string]interface{}) { +func (h *HoneycombMetrics) GaugeWithLabels(name string, labels map[string]string, value float64) { + panic("implement me") +} + +func (h *HoneycombMetrics) IncrementWithLabels(name string, labels map[string]string) { panic("implement me") } diff --git a/metrics/metrics.go b/metrics/metrics.go index c8a604560a..d56e6f30a8 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -2,6 +2,7 @@ package metrics import ( "fmt" + "github.com/jirs5/tracing-proxy/types" "os" "github.com/jirs5/tracing-proxy/config" @@ -15,7 +16,9 @@ type Metrics interface { Count(name string, n interface{}) Histogram(name string, obs interface{}) RegisterWithDescriptionLabels(name string, metricType string, desc string, labels []string) - GaugeWithLabels(name string, label string, val map[string]interface{}) + + GaugeWithLabels(name string, labels map[string]string, value float64) + IncrementWithLabels(name string, labels map[string]string) } func GetMetricsImplementation(c config.Config, prefix string) Metrics { @@ -74,3 +77,38 @@ func PrefixMetricName(prefix string, name string) string { } return name } + +func ExtractLabelsFromSpan(span *types.Span, labelToKeyMap map[string]string) map[string]string { + + labels := map[string]string{} + + attributeMapKeys := []string{"spanAttributes", "resourceAttributes", "eventAttributes"} + + for labelName, searchKey := range labelToKeyMap { + + // check of the higher level first + searchValue, exists := span.Data[searchKey] + if exists && searchValue != nil { + labels[labelName] = searchValue.(string) + continue + } + + // check in the span, resource and event attributes when key is not found + for _, attributeKey := range attributeMapKeys { + if attribute, ok := span.Data[attributeKey]; ok && attribute != nil { + searchValue, exists = attribute.(map[string]interface{})[searchKey] + if exists && searchValue != nil { + labels[labelName] = searchValue.(string) + break + } + } + } + + // if the key does not exist then set it to empty + if !exists { + labels[labelName] = "" + } + } + + return labels +} diff --git a/metrics/prometheus.go b/metrics/prometheus.go index f66aac1619..f732741a1b 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -96,34 +96,29 @@ func (p *PromMetrics) RegisterWithDescriptionLabels(name string, metricType stri switch metricType { case "counter": - newmet = promauto.NewCounter(prometheus.CounterOpts{ + newmet = promauto.NewCounterVec(prometheus.CounterOpts{ Name: name, Namespace: p.prefix, - Help: name, - }) + Help: desc, + }, labels) case "gauge": - newmet = promauto.NewGauge(prometheus.GaugeOpts{ - Name: name, - Namespace: p.prefix, - Help: name, - }) + newmet = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: name, + Namespace: p.prefix, + Help: desc, + }, + labels) case "histogram": - newmet = promauto.NewHistogram(prometheus.HistogramOpts{ + newmet = promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: name, Namespace: p.prefix, - Help: name, + Help: desc, // This is an attempt at a usable set of buckets for a wide range of metrics // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous Buckets: prometheus.ExponentialBuckets(1, 4, 16), - }) - case "gauge_labels": + }, labels) - newmet = promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Name: name, - Help: "Trace latency in ms group by operation", - }, - labels) } p.metrics[name] = newmet @@ -170,16 +165,24 @@ func (p *PromMetrics) Histogram(name string, obs interface{}) { } } -func (p *PromMetrics) GaugeWithLabels(name string, label string, val map[string]interface{}) { +func (p *PromMetrics) GaugeWithLabels(name string, labels map[string]string, value float64) { p.lock.RLock() defer p.lock.RUnlock() + if gaugeIface, ok := p.metrics[name]; ok { if gaugeVec, ok := gaugeIface.(*prometheus.GaugeVec); ok { - //gaugeVec.WithLabelValues() - for k, v := range val { - //gaugeVec.With(prometheus.Labels{"operation":k}).Set(ConvertNumeric(v)) - gaugeVec.With(prometheus.Labels{label: k}).Set(ConvertNumeric(v)) - } + gaugeVec.With(labels).Set(value) + } + } +} + +func (p *PromMetrics) IncrementWithLabels(name string, labels map[string]string) { + p.lock.RLock() + defer p.lock.RUnlock() + + if gaugeIface, ok := p.metrics[name]; ok { + if gaugeVec, ok := gaugeIface.(*prometheus.CounterVec); ok { + gaugeVec.With(labels).Inc() } } } From 9555bf018e1860b21179b36947db2c7fa0b45fbd Mon Sep 17 00:00:00 2001 From: ecobrien29 <46940457+ecobrien29@users.noreply.github.com> Date: Tue, 29 Mar 2022 06:56:58 -0700 Subject: [PATCH 143/351] Add parsing for nested json fields in the rules sampler (#418) --- config/sampler_config.go | 3 +- go.mod | 3 +- go.sum | 8 +++ rules_complete.toml | 2 + sample/rules.go | 12 ++++ sample/rules_test.go | 149 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 175 insertions(+), 2 deletions(-) diff --git a/config/sampler_config.go b/config/sampler_config.go index 4812ccaba7..d49ebca1af 100644 --- a/config/sampler_config.go +++ b/config/sampler_config.go @@ -70,7 +70,8 @@ func (r *RulesBasedSamplerRule) String() string { } type RulesBasedSamplerConfig struct { - Rule []*RulesBasedSamplerRule + Rule []*RulesBasedSamplerRule + CheckNestedFields bool } func (r *RulesBasedSamplerConfig) String() string { diff --git a/go.mod b/go.mod index b31953fbba..86c4c1e8db 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.9.0 + github.com/honeycombio/husky v0.9.0 // indirect github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 @@ -27,6 +27,7 @@ require ( github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.10.1 github.com/stretchr/testify v1.7.0 + github.com/tidwall/gjson v1.14.0 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.9.0 golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect diff --git a/go.sum b/go.sum index 6cfaaba06d..34b4d9dead 100644 --- a/go.sum +++ b/go.sum @@ -263,6 +263,8 @@ github.com/honeycombio/husky v0.9.0 h1:TppxWwGCZb54qwHuPRAkxhht4b3btFcM2OvV1/Zs3 github.com/honeycombio/husky v0.9.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= +github.com/honeycombio/refinery v1.12.0 h1:o1392RE741TZp8+JMsj1sSpmHOs9+hVunPuozn4JPRY= +github.com/honeycombio/refinery v1.12.0/go.mod h1:ct0vuSsQrUiZZo7sMIQ2grCfeO985A0dwADjUuG81qE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -399,6 +401,12 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/gjson v1.14.0 h1:6aeJ0bzojgWLa82gDQHcx3S0Lr/O51I9bJ5nv6JFx5w= +github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= diff --git a/rules_complete.toml b/rules_complete.toml index e3711d989a..080ca92a3b 100644 --- a/rules_complete.toml +++ b/rules_complete.toml @@ -206,6 +206,8 @@ SampleRate = 1 [dataset4] Sampler = "RulesBasedSampler" + # Optional, if set to true then the rules will also check nested json fields, in the format of parent.child + CheckNestedFields = false [[dataset4.rule]] name = "drop healtchecks" diff --git a/sample/rules.go b/sample/rules.go index 3a56d24040..73c9ceb023 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -1,6 +1,7 @@ package sample import ( + "encoding/json" "math/rand" "strings" @@ -8,6 +9,7 @@ import ( "github.com/honeycombio/refinery/logger" "github.com/honeycombio/refinery/metrics" "github.com/honeycombio/refinery/types" + "github.com/tidwall/gjson" ) type RulesBasedSampler struct { @@ -70,6 +72,16 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b for _, span := range trace.GetSpans() { var match bool value, exists := span.Data[condition.Field] + if !exists && s.Config.CheckNestedFields { + jsonStr, err := json.Marshal(span.Data) + if err == nil { + result := gjson.Get(string(jsonStr), condition.Field) + if result.Exists() { + value = result.String() + exists = true + } + } + } switch exists { case true: diff --git a/sample/rules_test.go b/sample/rules_test.go index 6bcb5b747d..553e415a5f 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sample @@ -521,6 +522,154 @@ func TestRules(t *testing.T) { } } +func TestRulesWithNestedFields(t *testing.T) { + data := []TestRulesData{ + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "nested field", + SampleRate: 10, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "test.test1", + Operator: "=", + Value: "a", + }, + }, + }, + }, + CheckNestedFields: true, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "test": map[string]interface{}{ + "test1": "a", + }, + }, + }, + }, + }, + ExpectedKeep: true, + ExpectedRate: 10, + }, + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "field not nested", + SampleRate: 10, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "test.test1", + Operator: "=", + Value: "a", + }, + }, + }, + }, + CheckNestedFields: true, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "test.test1": "a", + }, + }, + }, + }, + ExpectedKeep: true, + ExpectedRate: 10, + }, + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "not exists test", + SampleRate: 4, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "test.test1", + Operator: "not-exists", + }, + }, + }, + }, + CheckNestedFields: true, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "test": map[string]interface{}{ + "test2": "b", + }, + }, + }, + }, + }, + ExpectedKeep: true, + ExpectedRate: 4, + }, + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "do not check nested", + SampleRate: 4, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "test.test1", + Operator: "exists", + }, + }, + }, + }, + CheckNestedFields: false, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "test": map[string]interface{}{ + "test1": "a", + }, + }, + }, + }, + }, + ExpectedKeep: true, + ExpectedRate: 1, + }, + } + + for _, d := range data { + sampler := &RulesBasedSampler{ + Config: d.Rules, + Logger: &logger.NullLogger{}, + Metrics: &metrics.NullMetrics{}, + } + + trace := &types.Trace{} + + for _, span := range d.Spans { + trace.AddSpan(span) + } + + rate, keep := sampler.GetSampleRate(trace) + + assert.Equal(t, d.ExpectedRate, rate, d.Rules) + + // we can only test when we don't expect to keep the trace + if !d.ExpectedKeep { + assert.Equal(t, d.ExpectedKeep, keep, d.Rules) + } + } +} + func TestRulesWithDynamicSampler(t *testing.T) { data := []TestRulesData{ { From 554582a3fd2c6a2f959c30cbdc8fcfada4bf1b0a Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 30 Mar 2022 12:10:16 +0100 Subject: [PATCH 144/351] update husky to v0.10.3 (#431) --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 86c4c1e8db..6296caea96 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.9.0 // indirect + github.com/honeycombio/husky v0.10.3 github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 diff --git a/go.sum b/go.sum index 34b4d9dead..051ee8381d 100644 --- a/go.sum +++ b/go.sum @@ -259,12 +259,10 @@ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.9.0 h1:TppxWwGCZb54qwHuPRAkxhht4b3btFcM2OvV1/Zs3/s= -github.com/honeycombio/husky v0.9.0/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= +github.com/honeycombio/husky v0.10.3 h1:407j6dXPG2ClzBGwIm/pgD+1N56jGamb3SZESttcuPg= +github.com/honeycombio/husky v0.10.3/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= -github.com/honeycombio/refinery v1.12.0 h1:o1392RE741TZp8+JMsj1sSpmHOs9+hVunPuozn4JPRY= -github.com/honeycombio/refinery v1.12.0/go.mod h1:ct0vuSsQrUiZZo7sMIQ2grCfeO985A0dwADjUuG81qE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= From a84b06170eaa48bbb1f1b602dc4ca5a83c45fea8 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Fri, 1 Apr 2022 11:39:01 +0530 Subject: [PATCH 145/351] added push for prom metrics to opsramp --- collect/collect.go | 36 +++++++++++ config/file_config.go | 8 ++- config_complete.toml | 29 +++++++++ go.mod | 4 ++ go.sum | 5 +- metrics/prometheus.go | 142 +++++++++++++++++++++++++++++++++++++++++- 6 files changed, 219 insertions(+), 5 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index f83fcf361b..e973bdfa94 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -3,6 +3,7 @@ package collect import ( "errors" "fmt" + "net/http" "os" "runtime" "sort" @@ -275,6 +276,22 @@ func (i *InMemCollector) collect() { ticker := time.NewTicker(tickerDuration) defer ticker.Stop() + metricsConfig, err := i.Config.GetPrometheusMetricsConfig() + if err != nil { + i.Logger.Error().Logf("Failed to Load Prometheus Config:", err) + } + if metricsConfig.OpsRampMetricsRetryCount > 10 || metricsConfig.OpsRampMetricsRetryCount < 0 { + metricsConfig.OpsRampMetricsRetryCount = 2 + } + + metricsTicker := time.NewTicker(time.Duration(metricsConfig.OpsRampMetricsReportingInterval) * time.Second) + defer metricsTicker.Stop() + + metricsAuthToken, err := metrics.GetOpsRampOAuthToken(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampMetricsAPIKey, metricsConfig.OpsRampMetricsAPISecret) + if err != nil { + i.Logger.Error().Logf("Failed to get oauth token for OpsRamp Metrics err:", err) + } + // mutex is normally held by this goroutine at all times. // It is unlocked once per ticker cycle for tests. i.mutex.Lock() @@ -298,6 +315,25 @@ func (i *InMemCollector) collect() { i.processSpan(sp) default: select { + case <-metricsTicker.C: + statusCode, err := metrics.PushMetricsToOpsRamp(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampTenantID, *metricsAuthToken) + if statusCode == http.StatusProxyAuthRequired { // 🤦‍ OpsRamp uses this for bad auth token + metricsAuthToken, err = metrics.GetOpsRampOAuthToken(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampMetricsAPIKey, metricsConfig.OpsRampMetricsAPISecret) + if err != nil { + i.Logger.Error().Logf("Failed to get oauth token for OpsRamp Metrics err:", err) + } + } + if err != nil { + i.Logger.Error().Logf("prom request failed: %v", err) + for retries := metricsConfig.OpsRampMetricsRetryCount; retries > 0; retries-- { + i.Logger.Debug().Logf("retry count: %d", retries) + statusCode, err = metrics.PushMetricsToOpsRamp(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampTenantID, *metricsAuthToken) + if err == nil { + break + } + } + } + i.Logger.Debug().Logf("Status Code: %v Err: %v", statusCode, err) case <-ticker.C: i.sendTracesInCache(time.Now()) i.checkAlloc() diff --git a/config/file_config.go b/config/file_config.go index 35bfa59f14..036042ce6c 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -68,7 +68,13 @@ type HoneycombLoggerConfig struct { } type PrometheusMetricsConfig struct { - MetricsListenAddr string `validate:"required"` + MetricsListenAddr string `validate:"required"` + OpsRampMetricsAPI string `validate:"required,url"` + OpsRampTenantID string `validate:"required"` + OpsRampMetricsAPIKey string `validate:"required"` + OpsRampMetricsAPISecret string `validate:"required"` + OpsRampMetricsReportingInterval int64 `validate:"required"` + OpsRampMetricsRetryCount int64 `validate:"required"` } type HoneycombMetricsConfig struct { diff --git a/config_complete.toml b/config_complete.toml index 0dea90a8d3..d008679d4d 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -298,6 +298,35 @@ MetricsReportingInterval = 3 [PrometheusMetrics] +# OpsRampMetricsAPI is the URL for the upstream OpsRamp API. +# Not Eligible for live reload. +OpsRampMetricsAPI = "" + +# OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. +# Not Eligible for live reload. +OpsRampTenantID = "" + +# OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. +# This is separate from the APIKeys used to authenticate regular +# traffic. +# Not Eligible for live reload. +OpsRampMetricsAPIKey = "" + +# OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. +# This is separate from the APISecret used to authenticate regular +# traffic. +# Not Eligible for live reload. +OpsRampMetricsAPISecret = "" + +# OpsRampMetricsReportingInterval is frequency specified in seconds at which +# the metrics are collected and sent to OpsRamp +# Not Eligible for live reload. +OpsRampMetricsReportingInterval = 10 + +# OpsRampMetricsRetryCount is the number of times we retry incase the send fails +# Not Eligible for live reload. +OpsRampMetricsRetryCount = 2 + # MetricsListenAddr determines the interface and port on which Prometheus will # listen for requests for /metrics. Must be different from the main Refinery # listener. diff --git a/go.mod b/go.mod index 410f803d9b..6a0b118c1a 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,9 @@ require ( github.com/fsnotify/fsnotify v1.5.1 github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible + github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 + github.com/golang/snappy v0.0.3 github.com/gomodule/redigo v1.8.8 github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.3 // indirect @@ -24,6 +26,8 @@ require ( github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/prometheus v2.5.0+incompatible github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.10.1 diff --git a/go.sum b/go.sum index 774561db3d..b3067d5e55 100644 --- a/go.sum +++ b/go.sum @@ -148,6 +148,7 @@ github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+Pugk github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= @@ -183,6 +184,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.8 h1:f6cXq6RRfiyrOJEV7p3JhLDlmawGBVBBP1MggY8Mo4E= github.com/gomodule/redigo v1.8.8/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= @@ -369,6 +371,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -442,7 +446,6 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= diff --git a/metrics/prometheus.go b/metrics/prometheus.go index f732741a1b..435d09bfa9 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -1,16 +1,25 @@ package metrics import ( + "bytes" + "encoding/json" + "fmt" + "github.com/golang/snappy" + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/prompb" + "io/ioutil" "net/http" + "strings" "sync" + "time" + "github.com/gogo/protobuf/proto" "github.com/gorilla/mux" + "github.com/jirs5/tracing-proxy/config" + "github.com/jirs5/tracing-proxy/logger" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promhttp" - - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" ) type PromMetrics struct { @@ -186,3 +195,130 @@ func (p *PromMetrics) IncrementWithLabels(name string, labels map[string]string) } } } + +func PushMetricsToOpsRamp(apiEndpoint, tenantID string, oauthToken OpsRampAuthTokenResponse) (int, error) { + metricFamilySlice, err := prometheus.DefaultGatherer.Gather() + if err != nil { + return -1, err + } + + timeSeries := []*prompb.TimeSeries{} + + for _, metricFamily := range metricFamilySlice { + + for _, metric := range metricFamily.GetMetric() { + samples := []prompb.Sample{} + labels := []*prompb.Label{ + { + Name: "__name__", + Value: metricFamily.GetName(), + }, + } + for _, label := range metric.GetLabel() { + labels = append(labels, &prompb.Label{ + Name: label.GetName(), + Value: label.GetValue(), + }) + } + + switch metricFamily.GetType() { + case io_prometheus_client.MetricType_COUNTER: + samples = append(samples, prompb.Sample{ + Value: metric.GetCounter().GetValue(), + Timestamp: time.Now().UnixMilli(), + }) + case io_prometheus_client.MetricType_GAUGE: + samples = append(samples, prompb.Sample{ + Value: metric.GetGauge().GetValue(), + Timestamp: time.Now().UnixMilli(), + }) + + } + timeSeries = append(timeSeries, &prompb.TimeSeries{Labels: labels, Samples: samples}) + } + + } + + request := prompb.WriteRequest{Timeseries: timeSeries} + + out, err := proto.Marshal(&request) + if err != nil { + return -1, err + } + + compressed := snappy.Encode(nil, out) + + URL := fmt.Sprintf("%s/metricsql/api/v7/tenants/%s/metrics", strings.TrimRight(apiEndpoint, "/"), tenantID) + + req, err := http.NewRequest(http.MethodPost, URL, bytes.NewBuffer(compressed)) + if err != nil { + return -1, err + } + + req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + req.Header.Set("Connection", "close") + req.Header.Set("Content-Encoding", "snappy") + req.Header.Set("Content-Type", "application/x-protobuf") + + if !strings.Contains(oauthToken.Scope, "metrics:write") { + return -1, fmt.Errorf("auth token provided not not have metrics:write scope") + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", oauthToken.AccessToken)) + + client := http.Client{Timeout: time.Duration(10) * time.Second} + resp, err := client.Do(req) + if err != nil { + return -1, err + } + defer resp.Body.Close() + // Depending on version and configuration of the PGW, StatusOK or StatusAccepted may be returned. + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { + body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + return resp.StatusCode, fmt.Errorf("unexpected status code %d while pushing: %s", resp.StatusCode, body) + } + + return resp.StatusCode, nil +} + +type OpsRampAuthTokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` +} + +func GetOpsRampOAuthToken(apiEndpoint, apiKey, apiSecret string) (*OpsRampAuthTokenResponse, error) { + + authTokenResponse := new(OpsRampAuthTokenResponse) + + url := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(apiEndpoint, "/")) + + requestBody := strings.NewReader("client_id=" + apiKey + "&client_secret=" + apiSecret + "&grant_type=client_credentials") + + req, err := http.NewRequest(http.MethodPost, url, requestBody) + if err != nil { + return nil, err + } + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + req.Header.Add("Accept", "application/json") + req.Header.Set("Connection", "close") + + client := http.Client{Timeout: time.Duration(10) * time.Second} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + err = json.Unmarshal(respBody, authTokenResponse) + if err != nil { + return nil, err + } + + return authTokenResponse, nil +} From 853215d4f289a12148aac951625fcade8afc8ee5 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Sat, 2 Apr 2022 17:38:57 +0530 Subject: [PATCH 146/351] seperate the config for OpsRamp Metrics and run it as a seperate goroutine --- collect/collect.go | 65 ++++++++++++++++++++++--------------------- config/config.go | 5 ++++ config/file_config.go | 52 +++++++++++++++++++++++++++++++++- config_complete.toml | 16 +++++++---- 4 files changed, 100 insertions(+), 38 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index e973bdfa94..984879bf48 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -276,20 +276,42 @@ func (i *InMemCollector) collect() { ticker := time.NewTicker(tickerDuration) defer ticker.Stop() - metricsConfig, err := i.Config.GetPrometheusMetricsConfig() - if err != nil { - i.Logger.Error().Logf("Failed to Load Prometheus Config:", err) - } - if metricsConfig.OpsRampMetricsRetryCount > 10 || metricsConfig.OpsRampMetricsRetryCount < 0 { - metricsConfig.OpsRampMetricsRetryCount = 2 - } + if i.Config.GetSendMetricsToOpsRamp() { + metricsConfig, err := i.Config.GetOpsRampMetricsConfig() + if err != nil { + i.Logger.Error().Logf("Failed to Load OpsRampMetrics Config:", err) + } - metricsTicker := time.NewTicker(time.Duration(metricsConfig.OpsRampMetricsReportingInterval) * time.Second) - defer metricsTicker.Stop() + go func() { + metricsTicker := time.NewTicker(time.Duration(metricsConfig.OpsRampMetricsReportingInterval) * time.Second) + defer metricsTicker.Stop() - metricsAuthToken, err := metrics.GetOpsRampOAuthToken(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampMetricsAPIKey, metricsConfig.OpsRampMetricsAPISecret) - if err != nil { - i.Logger.Error().Logf("Failed to get oauth token for OpsRamp Metrics err:", err) + metricsAuthToken, err := metrics.GetOpsRampOAuthToken(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampMetricsAPIKey, metricsConfig.OpsRampMetricsAPISecret) + if err != nil { + i.Logger.Error().Logf("Failed to get oauth token for OpsRamp Metrics err:", err) + } + + for _ = range metricsTicker.C { + statusCode, err := metrics.PushMetricsToOpsRamp(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampTenantID, *metricsAuthToken) + if statusCode == http.StatusProxyAuthRequired { // 🤦‍ OpsRamp uses this for bad auth token + metricsAuthToken, err = metrics.GetOpsRampOAuthToken(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampMetricsAPIKey, metricsConfig.OpsRampMetricsAPISecret) + if err != nil { + i.Logger.Error().Logf("Failed to get oauth token for OpsRamp Metrics err:", err) + } + } + if err != nil { + i.Logger.Error().Logf("prom request failed: %v", err) + for retries := metricsConfig.OpsRampMetricsRetryCount; retries > 0; retries-- { + i.Logger.Debug().Logf("retry count: %d", retries) + statusCode, err = metrics.PushMetricsToOpsRamp(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampTenantID, *metricsAuthToken) + if err == nil { + break + } + } + } + i.Logger.Debug().Logf("Status Code: %v Err: %v", statusCode, err) + } + }() } // mutex is normally held by this goroutine at all times. @@ -315,25 +337,6 @@ func (i *InMemCollector) collect() { i.processSpan(sp) default: select { - case <-metricsTicker.C: - statusCode, err := metrics.PushMetricsToOpsRamp(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampTenantID, *metricsAuthToken) - if statusCode == http.StatusProxyAuthRequired { // 🤦‍ OpsRamp uses this for bad auth token - metricsAuthToken, err = metrics.GetOpsRampOAuthToken(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampMetricsAPIKey, metricsConfig.OpsRampMetricsAPISecret) - if err != nil { - i.Logger.Error().Logf("Failed to get oauth token for OpsRamp Metrics err:", err) - } - } - if err != nil { - i.Logger.Error().Logf("prom request failed: %v", err) - for retries := metricsConfig.OpsRampMetricsRetryCount; retries > 0; retries-- { - i.Logger.Debug().Logf("retry count: %d", retries) - statusCode, err = metrics.PushMetricsToOpsRamp(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampTenantID, *metricsAuthToken) - if err == nil { - break - } - } - } - i.Logger.Debug().Logf("Status Code: %v Err: %v", statusCode, err) case <-ticker.C: i.sendTracesInCache(time.Now()) i.checkAlloc() diff --git a/config/config.go b/config/config.go index 0e66b53e01..17829772e4 100644 --- a/config/config.go +++ b/config/config.go @@ -108,6 +108,9 @@ type Config interface { // GetPrometheusMetricsConfig returns the config specific to PrometheusMetrics GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) + // GetOpsRampMetricsConfig returns the config specific to PrometheusMetrics + GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) + // GetUpstreamBufferSize returns the size of the libtrace buffer to use for the upstream // libtrace client GetUpstreamBufferSize() int @@ -133,4 +136,6 @@ type Config interface { GetDryRunFieldName() string GetAddHostMetadataToTrace() bool + + GetSendMetricsToOpsRamp() bool } diff --git a/config/file_config.go b/config/file_config.go index 036042ce6c..f7906c61c5 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -48,6 +48,7 @@ type configContents struct { PeerManagement PeerManagementConfig `validate:"required"` InMemCollector InMemoryCollectorCacheCapacity `validate:"required"` AddHostMetadataToTrace bool + SendMetricsToOpsRamp bool } type InMemoryCollectorCacheCapacity struct { @@ -68,7 +69,10 @@ type HoneycombLoggerConfig struct { } type PrometheusMetricsConfig struct { - MetricsListenAddr string `validate:"required"` + MetricsListenAddr string `validate:"required"` +} + +type OpsRampMetricsConfig struct { OpsRampMetricsAPI string `validate:"required,url"` OpsRampTenantID string `validate:"required"` OpsRampMetricsAPIKey string `validate:"required"` @@ -128,6 +132,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("HoneycombLogger.LoggerSamplerEnabled", false) c.SetDefault("HoneycombLogger.LoggerSamplerThroughput", 5) c.SetDefault("AddHostMetadataToTrace", false) + c.SetDefault("SendMetricsToOpsRamp", false) c.SetConfigFile(config) err := c.ReadInConfig() @@ -638,6 +643,44 @@ func (f *fileConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, erro return *pcConfig, errors.New("No config found for PrometheusMetrics") } +func (f *fileConfig) GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + opsRampMetricsConfig := &OpsRampMetricsConfig{ + OpsRampMetricsAPI: "https://placeholder.api.com/", + OpsRampTenantID: "placeholder_tenantID", + OpsRampMetricsAPIKey: "placeholder_key", + OpsRampMetricsAPISecret: "placeholder_secret", + OpsRampMetricsReportingInterval: 60, + OpsRampMetricsRetryCount: 2, + } + + if sub := f.config.Sub("OpsRampMetrics"); sub != nil { + err := sub.UnmarshalExact(opsRampMetricsConfig) + if err != nil { + return opsRampMetricsConfig, err + } + + if opsRampMetricsConfig.OpsRampMetricsRetryCount < 0 || opsRampMetricsConfig.OpsRampMetricsRetryCount > 10 { + opsRampMetricsConfig.OpsRampMetricsRetryCount = 2 + } + + if opsRampMetricsConfig.OpsRampMetricsReportingInterval < 10 { + opsRampMetricsConfig.OpsRampMetricsReportingInterval = 10 + } + + v := validator.New() + err = v.Struct(opsRampMetricsConfig) + if err != nil { + return opsRampMetricsConfig, err + } + + return opsRampMetricsConfig, nil + } + return nil, errors.New("No config found for OpsRampMetrics") +} + func (f *fileConfig) GetSendDelay() (time.Duration, error) { f.mux.RLock() defer f.mux.RUnlock() @@ -726,3 +769,10 @@ func (f *fileConfig) GetAddHostMetadataToTrace() bool { return f.conf.AddHostMetadataToTrace } + +func (f *fileConfig) GetSendMetricsToOpsRamp() bool { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.SendMetricsToOpsRamp +} diff --git a/config_complete.toml b/config_complete.toml index d008679d4d..7d035382d2 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -126,6 +126,10 @@ Logger = "logrus" # metrics to a Honeycomb dataset. Metrics = "prometheus" +# Metrics are sent to OpsRamp (The collection happens based on configuration specifie +# in OpsRampMetrics and only works when the Metrics is set to "prometheus") +SendMetricsToOpsRamp = false + ######################### ## Peer Management ## ######################### @@ -297,7 +301,13 @@ MetricsReportingInterval = 3 #####################@## [PrometheusMetrics] +# MetricsListenAddr determines the interface and port on which Prometheus will +# listen for requests for /metrics. Must be different from the main Refinery +# listener. +# Not eligible for live reload. +MetricsListenAddr = "localhost:2112" +[OpsRampMetrics] # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. # Not Eligible for live reload. OpsRampMetricsAPI = "" @@ -326,9 +336,3 @@ OpsRampMetricsReportingInterval = 10 # OpsRampMetricsRetryCount is the number of times we retry incase the send fails # Not Eligible for live reload. OpsRampMetricsRetryCount = 2 - -# MetricsListenAddr determines the interface and port on which Prometheus will -# listen for requests for /metrics. Must be different from the main Refinery -# listener. -# Not eligible for live reload. -MetricsListenAddr = "localhost:2112" From 406c826a67dfc75904c4a7e07b423bd9f9683793 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 4 Apr 2022 14:09:15 +0530 Subject: [PATCH 147/351] add proxy support for metrics push to opsramp --- collect/collect.go | 39 ---------- config/file_config.go | 5 ++ config_complete.toml | 20 ++++++ metrics/prometheus.go | 162 +++++++++++++++++++++++++++++++++++------- 4 files changed, 161 insertions(+), 65 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index 984879bf48..f83fcf361b 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -3,7 +3,6 @@ package collect import ( "errors" "fmt" - "net/http" "os" "runtime" "sort" @@ -276,44 +275,6 @@ func (i *InMemCollector) collect() { ticker := time.NewTicker(tickerDuration) defer ticker.Stop() - if i.Config.GetSendMetricsToOpsRamp() { - metricsConfig, err := i.Config.GetOpsRampMetricsConfig() - if err != nil { - i.Logger.Error().Logf("Failed to Load OpsRampMetrics Config:", err) - } - - go func() { - metricsTicker := time.NewTicker(time.Duration(metricsConfig.OpsRampMetricsReportingInterval) * time.Second) - defer metricsTicker.Stop() - - metricsAuthToken, err := metrics.GetOpsRampOAuthToken(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampMetricsAPIKey, metricsConfig.OpsRampMetricsAPISecret) - if err != nil { - i.Logger.Error().Logf("Failed to get oauth token for OpsRamp Metrics err:", err) - } - - for _ = range metricsTicker.C { - statusCode, err := metrics.PushMetricsToOpsRamp(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampTenantID, *metricsAuthToken) - if statusCode == http.StatusProxyAuthRequired { // 🤦‍ OpsRamp uses this for bad auth token - metricsAuthToken, err = metrics.GetOpsRampOAuthToken(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampMetricsAPIKey, metricsConfig.OpsRampMetricsAPISecret) - if err != nil { - i.Logger.Error().Logf("Failed to get oauth token for OpsRamp Metrics err:", err) - } - } - if err != nil { - i.Logger.Error().Logf("prom request failed: %v", err) - for retries := metricsConfig.OpsRampMetricsRetryCount; retries > 0; retries-- { - i.Logger.Debug().Logf("retry count: %d", retries) - statusCode, err = metrics.PushMetricsToOpsRamp(metricsConfig.OpsRampMetricsAPI, metricsConfig.OpsRampTenantID, *metricsAuthToken) - if err == nil { - break - } - } - } - i.Logger.Debug().Logf("Status Code: %v Err: %v", statusCode, err) - } - }() - } - // mutex is normally held by this goroutine at all times. // It is unlocked once per ticker cycle for tests. i.mutex.Lock() diff --git a/config/file_config.go b/config/file_config.go index f7906c61c5..b3d79fabf0 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -79,6 +79,11 @@ type OpsRampMetricsConfig struct { OpsRampMetricsAPISecret string `validate:"required"` OpsRampMetricsReportingInterval int64 `validate:"required"` OpsRampMetricsRetryCount int64 `validate:"required"` + ProxyProtocol string + ProxyServer string + ProxyPort int64 + ProxyUserName string + ProxyPassword string } type HoneycombMetricsConfig struct { diff --git a/config_complete.toml b/config_complete.toml index 7d035382d2..b729dfbb9f 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -336,3 +336,23 @@ OpsRampMetricsReportingInterval = 10 # OpsRampMetricsRetryCount is the number of times we retry incase the send fails # Not Eligible for live reload. OpsRampMetricsRetryCount = 2 + +# ProxyProtocol accepts http and https +# Not Eligible for live reload. +ProxyProtocol = "" + +# ProxyServer takes the proxy server address +# Not Eligible for live reload. +ProxyServer = "" + +# ProxyPort takes the proxy server port +# Not Eligible for live reload. +ProxyPort = 3128 + +# ProxyUserName takes the proxy username +# Not Eligible for live reload. +ProxyUserName = "" + +# ProxyPassword takes the proxy password +# Not Eligible for live reload. +ProxyPassword = "" diff --git a/metrics/prometheus.go b/metrics/prometheus.go index 435d09bfa9..8c6190f35c 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/prometheus/prompb" "io/ioutil" "net/http" + "net/url" "strings" "sync" "time" @@ -30,12 +31,48 @@ type PromMetrics struct { metrics map[string]interface{} lock sync.RWMutex + Client http.Client + oAuthToken *OpsRampAuthTokenResponse + apiEndpoint string + tenantID string + apiKey string + apiSecret string + retryCount int64 + prefix string } func (p *PromMetrics) Start() error { p.Logger.Debug().Logf("Starting PromMetrics") defer func() { p.Logger.Debug().Logf("Finished starting PromMetrics") }() + + if p.Config.GetSendMetricsToOpsRamp() { + metricsConfig, err := p.Config.GetOpsRampMetricsConfig() + if err != nil { + p.Logger.Error().Logf("Failed to Load OpsRampMetrics Config:", err) + } + + go func() { + metricsTicker := time.NewTicker(time.Duration(metricsConfig.OpsRampMetricsReportingInterval) * time.Second) + defer metricsTicker.Stop() + p.PopulateOpsRampMetrics(metricsConfig) + + // populating the oAuth Token Initially + err := p.RenewOpsRampOAuthToken() + if err != nil { + p.Logger.Error().Logf("error while initializing oAuth Token Err: %v", err) + } + + for _ = range metricsTicker.C { + statusCode, err := p.PushMetricsToOpsRamp() + if err != nil { + p.Logger.Error().Logf("error while pushing metrics with statusCode: %d and Error: %v", statusCode, err) + } + } + }() + + } + pc, err := p.Config.GetPrometheusMetricsConfig() if err != nil { return err @@ -196,7 +233,61 @@ func (p *PromMetrics) IncrementWithLabels(name string, labels map[string]string) } } -func PushMetricsToOpsRamp(apiEndpoint, tenantID string, oauthToken OpsRampAuthTokenResponse) (int, error) { +type OpsRampMetrics struct { + Client http.Client + oAuthToken *OpsRampAuthTokenResponse + apiEndpoint string + tenantID string + apiKey string + apiSecret string + retryCount int64 + + Logger logger.Logger `inject:""` + lock sync.RWMutex +} + +type OpsRampAuthTokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` +} + +func (p *PromMetrics) PopulateOpsRampMetrics(metricsConfig *config.OpsRampMetricsConfig) { + + p.apiEndpoint = metricsConfig.OpsRampMetricsAPI + p.apiKey = metricsConfig.OpsRampMetricsAPIKey + p.apiSecret = metricsConfig.OpsRampMetricsAPISecret + p.tenantID = metricsConfig.OpsRampTenantID + p.retryCount = metricsConfig.OpsRampMetricsRetryCount + + proxyUrl := "" + if metricsConfig.ProxyServer != "" && metricsConfig.ProxyProtocol != "" { + proxyUrl = fmt.Sprintf("%s://%s:%d/", metricsConfig.ProxyProtocol, metricsConfig.ProxyServer, metricsConfig.ProxyPort) + if metricsConfig.ProxyUserName != "" && metricsConfig.ProxyPassword != "" { + proxyUrl = fmt.Sprintf("%s://%s:%s@%s:%d", metricsConfig.ProxyProtocol, metricsConfig.ProxyUserName, metricsConfig.ProxyPassword, metricsConfig.ProxyServer, metricsConfig.ProxyPort) + p.Logger.Debug().Logf("Using Authentication for Proxy Communication for Metrics") + } + } + + p.Client = http.Client{ + Transport: &http.Transport{Proxy: http.ProxyFromEnvironment}, + Timeout: time.Duration(10) * time.Second, + } + if proxyUrl != "" { + proxyURL, err := url.Parse(proxyUrl) + if err != nil { + p.Logger.Error().Logf("skipping proxy err: %v", err) + } else { + p.Client = http.Client{ + Transport: &http.Transport{Proxy: http.ProxyURL(proxyURL)}, + Timeout: time.Duration(10) * time.Second, + } + } + } +} + +func (p *PromMetrics) PushMetricsToOpsRamp() (int, error) { metricFamilySlice, err := prometheus.DefaultGatherer.Gather() if err != nil { return -1, err @@ -248,7 +339,7 @@ func PushMetricsToOpsRamp(apiEndpoint, tenantID string, oauthToken OpsRampAuthTo compressed := snappy.Encode(nil, out) - URL := fmt.Sprintf("%s/metricsql/api/v7/tenants/%s/metrics", strings.TrimRight(apiEndpoint, "/"), tenantID) + URL := fmt.Sprintf("%s/metricsql/api/v7/tenants/%s/metrics", strings.TrimRight(p.apiEndpoint, "/"), p.tenantID) req, err := http.NewRequest(http.MethodPost, URL, bytes.NewBuffer(compressed)) if err != nil { @@ -260,65 +351,84 @@ func PushMetricsToOpsRamp(apiEndpoint, tenantID string, oauthToken OpsRampAuthTo req.Header.Set("Content-Encoding", "snappy") req.Header.Set("Content-Type", "application/x-protobuf") - if !strings.Contains(oauthToken.Scope, "metrics:write") { + if !strings.Contains(p.oAuthToken.Scope, "metrics:write") { return -1, fmt.Errorf("auth token provided not not have metrics:write scope") } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", oauthToken.AccessToken)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken)) - client := http.Client{Timeout: time.Duration(10) * time.Second} - resp, err := client.Do(req) + resp, err := p.SendWithRetry(req) if err != nil { return -1, err } defer resp.Body.Close() // Depending on version and configuration of the PGW, StatusOK or StatusAccepted may be returned. + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + p.Logger.Error().Logf("failed to parse response body Err: %v", err) + } if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { - body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. return resp.StatusCode, fmt.Errorf("unexpected status code %d while pushing: %s", resp.StatusCode, body) } + p.Logger.Debug().Logf("metrics push response: %v", string(body)) return resp.StatusCode, nil } -type OpsRampAuthTokenResponse struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int `json:"expires_in"` - Scope string `json:"scope"` -} - -func GetOpsRampOAuthToken(apiEndpoint, apiKey, apiSecret string) (*OpsRampAuthTokenResponse, error) { +func (p *PromMetrics) RenewOpsRampOAuthToken() error { - authTokenResponse := new(OpsRampAuthTokenResponse) + p.oAuthToken = new(OpsRampAuthTokenResponse) - url := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(apiEndpoint, "/")) + url := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(p.apiEndpoint, "/")) - requestBody := strings.NewReader("client_id=" + apiKey + "&client_secret=" + apiSecret + "&grant_type=client_credentials") + requestBody := strings.NewReader("client_id=" + p.apiKey + "&client_secret=" + p.apiSecret + "&grant_type=client_credentials") req, err := http.NewRequest(http.MethodPost, url, requestBody) if err != nil { - return nil, err + return err } req.Header.Add("Content-Type", "application/x-www-form-urlencoded") req.Header.Add("Accept", "application/json") req.Header.Set("Connection", "close") - client := http.Client{Timeout: time.Duration(10) * time.Second} - resp, err := client.Do(req) + resp, err := p.Client.Do(req) if err != nil { - return nil, err + return err } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, err + return err } defer resp.Body.Close() - err = json.Unmarshal(respBody, authTokenResponse) + err = json.Unmarshal(respBody, p.oAuthToken) if err != nil { - return nil, err + return err + } + + return nil +} + +func (p *PromMetrics) SendWithRetry(request *http.Request) (*http.Response, error) { + + response, err := p.Client.Do(request) + if err == nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { + return response, nil + } + if response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token + p.RenewOpsRampOAuthToken() + } + + // retry if the error is not nil + for retries := p.retryCount; retries > 0; retries-- { + response, err = p.Client.Do(request) + if err == nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { + return response, nil + } + if response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token + p.RenewOpsRampOAuthToken() + } } - return authTokenResponse, nil + return response, err } From 2ef88b8702659cd3bf1c3fd0033b6d2b6a7e4c64 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 4 Apr 2022 16:30:25 +0530 Subject: [PATCH 148/351] fixed multiple go routines for opsramp metrics by limiting to single prefix --- metrics/prometheus.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/metrics/prometheus.go b/metrics/prometheus.go index 8c6190f35c..f445692275 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -46,7 +46,7 @@ func (p *PromMetrics) Start() error { p.Logger.Debug().Logf("Starting PromMetrics") defer func() { p.Logger.Debug().Logf("Finished starting PromMetrics") }() - if p.Config.GetSendMetricsToOpsRamp() { + if p.prefix == "" && p.Config.GetSendMetricsToOpsRamp() { metricsConfig, err := p.Config.GetOpsRampMetricsConfig() if err != nil { p.Logger.Error().Logf("Failed to Load OpsRampMetrics Config:", err) @@ -412,20 +412,20 @@ func (p *PromMetrics) RenewOpsRampOAuthToken() error { func (p *PromMetrics) SendWithRetry(request *http.Request) (*http.Response, error) { response, err := p.Client.Do(request) - if err == nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { + if err == nil && response != nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { return response, nil } - if response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token + if response != nil && response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token p.RenewOpsRampOAuthToken() } // retry if the error is not nil for retries := p.retryCount; retries > 0; retries-- { response, err = p.Client.Do(request) - if err == nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { + if err == nil && response != nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { return response, nil } - if response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token + if response != nil && response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token p.RenewOpsRampOAuthToken() } } From 63af9a174372f47eab7bf9d0d47b0eb7133cc209 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Tue, 5 Apr 2022 10:09:08 +0530 Subject: [PATCH 149/351] added support for pushing histogram and summary metrics to opsramp --- go.mod | 1 + metrics/prometheus.go | 136 +++++++++++++++++++++++++++++++++++++----- 2 files changed, 121 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index 6a0b118c1a..a400150d2c 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.26.0 github.com/prometheus/prometheus v2.5.0+incompatible github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 diff --git a/metrics/prometheus.go b/metrics/prometheus.go index f445692275..aa89c4a08a 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/golang/snappy" io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" "io/ioutil" "net/http" @@ -293,18 +294,14 @@ func (p *PromMetrics) PushMetricsToOpsRamp() (int, error) { return -1, err } + presentTime := time.Now().UnixMilli() + timeSeries := []*prompb.TimeSeries{} for _, metricFamily := range metricFamilySlice { for _, metric := range metricFamily.GetMetric() { - samples := []prompb.Sample{} - labels := []*prompb.Label{ - { - Name: "__name__", - Value: metricFamily.GetName(), - }, - } + labels := []*prompb.Label{} for _, label := range metric.GetLabel() { labels = append(labels, &prompb.Label{ Name: label.GetName(), @@ -314,20 +311,127 @@ func (p *PromMetrics) PushMetricsToOpsRamp() (int, error) { switch metricFamily.GetType() { case io_prometheus_client.MetricType_COUNTER: - samples = append(samples, prompb.Sample{ - Value: metric.GetCounter().GetValue(), - Timestamp: time.Now().UnixMilli(), + timeSeries = append(timeSeries, &prompb.TimeSeries{ + Labels: append(labels, &prompb.Label{ + Name: model.MetricNameLabel, + Value: metricFamily.GetName(), + }), + Samples: []prompb.Sample{ + { + Value: metric.GetCounter().GetValue(), + Timestamp: presentTime, + }, + }, }) case io_prometheus_client.MetricType_GAUGE: - samples = append(samples, prompb.Sample{ - Value: metric.GetGauge().GetValue(), - Timestamp: time.Now().UnixMilli(), + timeSeries = append(timeSeries, &prompb.TimeSeries{ + Labels: append(labels, &prompb.Label{ + Name: model.MetricNameLabel, + Value: metricFamily.GetName(), + }), + Samples: []prompb.Sample{ + { + Value: metric.GetGauge().GetValue(), + Timestamp: presentTime, + }, + }, + }) + case io_prometheus_client.MetricType_HISTOGRAM: + // samples for all the buckets + for _, bucket := range metric.GetHistogram().GetBucket() { + timeSeries = append(timeSeries, &prompb.TimeSeries{ + Labels: append(labels, []*prompb.Label{ + { + Name: model.MetricNameLabel, + Value: metricFamily.GetName(), + }, + { + Name: model.BucketLabel, + Value: fmt.Sprintf("%v", bucket.GetUpperBound()), + }, + }...), + Samples: []prompb.Sample{ + { + Value: float64(bucket.GetCumulativeCount()), + Timestamp: presentTime, + }, + }, + }) + } + // samples for count and sum + timeSeries = append(timeSeries, &prompb.TimeSeries{ + Labels: append(labels, &prompb.Label{ + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), + }), + Samples: []prompb.Sample{ + { + Value: metric.GetHistogram().GetSampleSum(), + Timestamp: presentTime, + }, + }, + }) + timeSeries = append(timeSeries, &prompb.TimeSeries{ + Labels: append(labels, &prompb.Label{ + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_count", metricFamily.GetName()), + }), + Samples: []prompb.Sample{ + { + Value: float64(metric.GetHistogram().GetSampleCount()), + Timestamp: presentTime, + }, + }, + }) + case io_prometheus_client.MetricType_SUMMARY: + // samples for all the quantiles + for _, quantile := range metric.GetSummary().GetQuantile() { + timeSeries = append(timeSeries, &prompb.TimeSeries{ + Labels: append(labels, []*prompb.Label{ + { + Name: model.MetricNameLabel, + Value: metricFamily.GetName(), + }, + { + Name: model.QuantileLabel, + Value: fmt.Sprintf("%v", quantile.GetQuantile()), + }, + }...), + Samples: []prompb.Sample{ + { + Value: quantile.GetValue(), + Timestamp: presentTime, + }, + }, + }) + } + // samples for count and sum + timeSeries = append(timeSeries, &prompb.TimeSeries{ + Labels: append(labels, &prompb.Label{ + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), + }), + Samples: []prompb.Sample{ + { + Value: metric.GetSummary().GetSampleSum(), + Timestamp: presentTime, + }, + }, + }) + timeSeries = append(timeSeries, &prompb.TimeSeries{ + Labels: append(labels, &prompb.Label{ + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_count", metricFamily.GetName()), + }), + Samples: []prompb.Sample{ + { + Value: float64(metric.GetSummary().GetSampleCount()), + Timestamp: presentTime, + }, + }, }) - } - timeSeries = append(timeSeries, &prompb.TimeSeries{Labels: labels, Samples: samples}) } - } request := prompb.WriteRequest{Timeseries: timeSeries} From b3d6011489393a4d1adb96235ea644154321de57 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 6 Apr 2022 10:24:04 +0530 Subject: [PATCH 150/351] added support to send selective metrics to OpsRamp --- config/file_config.go | 1 + config_complete.toml | 26 ++++++++++++++++---------- metrics/prometheus.go | 28 +++++++++++++++------------- 3 files changed, 32 insertions(+), 23 deletions(-) diff --git a/config/file_config.go b/config/file_config.go index b3d79fabf0..d81128c447 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -84,6 +84,7 @@ type OpsRampMetricsConfig struct { ProxyPort int64 ProxyUserName string ProxyPassword string + OpsRampMetricsList []string } type HoneycombMetricsConfig struct { diff --git a/config_complete.toml b/config_complete.toml index b729dfbb9f..0bae5ab4c2 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -40,10 +40,10 @@ CompressPeerCommunication = true # list, all API keys are accepted. # Eligible for live reload. APIKeys = [ - # "replace-me", - # "more-optional-keys", - "*", # wildcard accept all keys - ] + # "replace-me", + # "more-optional-keys", + "*", # wildcard accept all keys +] # HoneycombAPI is the URL for the upstream Honeycomb API. # Eligible for live reload. @@ -142,12 +142,12 @@ Type = "file" # hostname (or ip address) and port. All servers in the cluster should be in # this list, including this host. Peers = [ - "http://127.0.0.1:8083", - # "http://127.0.0.1:8083", - # "http://10.1.2.3.4:8080", - # "http://refinery-1231:8080", - # "http://peer-3.fqdn" // assumes port 80 - ] + "http://127.0.0.1:8083", + # "http://127.0.0.1:8083", + # "http://10.1.2.3.4:8080", + # "http://refinery-1231:8080", + # "http://peer-3.fqdn" // assumes port 80 +] # [PeerManagement] # Type = "redis" @@ -356,3 +356,9 @@ ProxyUserName = "" # ProxyPassword takes the proxy password # Not Eligible for live reload. ProxyPassword = "" + +# OpsRampMetricsList is a list of regular expressions which match the metric +# names. Keep the list as small as possible since too many regular expressions can lead to bad performance. +# Internally all the regex in the list are concatinated using '|' to make the computation little faster. +# Not Eligible for live reload +OpsRampMetricsList = [".*"] diff --git a/metrics/prometheus.go b/metrics/prometheus.go index aa89c4a08a..dba0092134 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "net/http" "net/url" + "regexp" "strings" "sync" "time" @@ -39,6 +40,7 @@ type PromMetrics struct { apiKey string apiSecret string retryCount int64 + re *regexp.Regexp prefix string } @@ -234,19 +236,6 @@ func (p *PromMetrics) IncrementWithLabels(name string, labels map[string]string) } } -type OpsRampMetrics struct { - Client http.Client - oAuthToken *OpsRampAuthTokenResponse - apiEndpoint string - tenantID string - apiKey string - apiSecret string - retryCount int64 - - Logger logger.Logger `inject:""` - lock sync.RWMutex -} - type OpsRampAuthTokenResponse struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` @@ -262,6 +251,16 @@ func (p *PromMetrics) PopulateOpsRampMetrics(metricsConfig *config.OpsRampMetric p.tenantID = metricsConfig.OpsRampTenantID p.retryCount = metricsConfig.OpsRampMetricsRetryCount + // Creating Regex for list of metrics + regexString := ".*" // default value is to take everything + if len(metricsConfig.OpsRampMetricsList) >= 1 { + regexString = metricsConfig.OpsRampMetricsList[0] + for index := 0; index < len(metricsConfig.OpsRampMetricsList); index++ { + regexString = fmt.Sprintf("%s|%s", regexString, metricsConfig.OpsRampMetricsList[index]) + } + } + p.re = regexp.MustCompile(regexString) + proxyUrl := "" if metricsConfig.ProxyServer != "" && metricsConfig.ProxyProtocol != "" { proxyUrl = fmt.Sprintf("%s://%s:%d/", metricsConfig.ProxyProtocol, metricsConfig.ProxyServer, metricsConfig.ProxyPort) @@ -300,6 +299,9 @@ func (p *PromMetrics) PushMetricsToOpsRamp() (int, error) { for _, metricFamily := range metricFamilySlice { + if !p.re.MatchString(metricFamily.GetName()) { + continue + } for _, metric := range metricFamily.GetMetric() { labels := []*prompb.Label{} for _, label := range metric.GetLabel() { From 4ac9663fa50d1f220ddcdebb9af4f8853c9aaf29 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 6 Apr 2022 11:20:35 +0530 Subject: [PATCH 151/351] removal of honeycomb metrics --- cmd/tracing-proxy/main.go | 6 +- config/config.go | 7 - config/file_config.go | 54 +-- config/mock.go | 8 - config_complete.toml | 45 +-- metrics/honeycomb.go | 369 ------------------ metrics/metrics.go | 30 +- metrics/{prometheus.go => opsramp.go} | 32 +- .../{prometheus_test.go => opsramp_test.go} | 4 +- 9 files changed, 28 insertions(+), 527 deletions(-) delete mode 100644 metrics/honeycomb.go rename metrics/{prometheus.go => opsramp.go} (92%) rename metrics/{prometheus_test.go => opsramp_test.go} (95%) diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 41d63b4ad2..a13be3cfe6 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -95,7 +95,7 @@ func main() { // get desired implementation for each dependency to inject lgr := logger.GetLoggerImplementation(c) collector := collect.GetCollectorImplementation(c) - metricsConfig := metrics.GetMetricsImplementation(c, "") + metricsConfig := metrics.GetMetricsImplementation("") shrdr := sharder.GetSharderImplementation(c) samplerFactory := &sample.SamplerFactory{} @@ -128,8 +128,8 @@ func main() { TLSHandshakeTimeout: 1200 * time.Millisecond, } - upstreamMetricsConfig := metrics.GetMetricsImplementation(c, "libtrace_upstream") - peerMetricsConfig := metrics.GetMetricsImplementation(c, "libtrace_peer") + upstreamMetricsConfig := metrics.GetMetricsImplementation("libtrace_upstream") + peerMetricsConfig := metrics.GetMetricsImplementation("libtrace_peer") userAgentAddition := "tracing-proxy/" + version upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{ diff --git a/config/config.go b/config/config.go index 17829772e4..b38ec30343 100644 --- a/config/config.go +++ b/config/config.go @@ -98,13 +98,6 @@ type Config interface { // GetSamplerConfigForDataset returns the sampler type to use for the given dataset GetSamplerConfigForDataset(string) (interface{}, error) - // GetMetricsType returns the type of metrics to use. Valid types are in the - // metrics package - GetMetricsType() (string, error) - - // GetHoneycombMetricsConfig returns the config specific to HoneycombMetrics - GetHoneycombMetricsConfig() (HoneycombMetricsConfig, error) - // GetPrometheusMetricsConfig returns the config specific to PrometheusMetrics GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) diff --git a/config/file_config.go b/config/file_config.go index d81128c447..f378dbf4b5 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -35,7 +35,6 @@ type configContents struct { LoggingLevel string `validate:"required"` Collector string `validate:"required,oneof= InMemCollector"` Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"` - Metrics string `validate:"required,oneof= prometheus honeycomb"` SendDelay time.Duration `validate:"required"` TraceTimeout time.Duration `validate:"required"` MaxBatchSize uint `validate:"required"` @@ -87,13 +86,6 @@ type OpsRampMetricsConfig struct { OpsRampMetricsList []string } -type HoneycombMetricsConfig struct { - MetricsHoneycombAPI string `validate:"required,url"` - MetricsAPIKey string `validate:"required"` - MetricsDataset string `validate:"required"` - MetricsReportingInterval int64 `validate:"required"` -} - type PeerManagementConfig struct { Type string `validate:"required,oneof= file redis"` Peers []string `validate:"dive,url"` @@ -127,7 +119,6 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("Logger", "logrus") c.SetDefault("LoggingLevel", "debug") c.SetDefault("Collector", "InMemCollector") - c.SetDefault("Metrics", "honeycomb") c.SetDefault("SendDelay", 2*time.Second) c.SetDefault("TraceTimeout", 60*time.Second) c.SetDefault("MaxBatchSize", 500) @@ -262,22 +253,10 @@ func (f *fileConfig) validateConditionalConfigs() error { } // validate metrics config - metricsType, err := f.GetMetricsType() + _, err = f.GetPrometheusMetricsConfig() if err != nil { return err } - if metricsType == "honeycomb" { - _, err = f.GetHoneycombMetricsConfig() - if err != nil { - return err - } - } - if metricsType == "prometheus" { - _, err = f.GetPrometheusMetricsConfig() - if err != nil { - return err - } - } return nil } @@ -596,37 +575,6 @@ func (f *fileConfig) GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCap return *capacity, errors.New("No config found for inMemCollector") } -func (f *fileConfig) GetMetricsType() (string, error) { - f.mux.RLock() - defer f.mux.RUnlock() - - return f.conf.Metrics, nil -} - -func (f *fileConfig) GetHoneycombMetricsConfig() (HoneycombMetricsConfig, error) { - f.mux.RLock() - defer f.mux.RUnlock() - - hmConfig := &HoneycombMetricsConfig{} - if sub := f.config.Sub("HoneycombMetrics"); sub != nil { - err := sub.UnmarshalExact(hmConfig) - if err != nil { - return *hmConfig, err - } - - hmConfig.MetricsAPIKey = f.config.GetString("HoneycombMetrics.MetricsAPIKey") - - v := validator.New() - err = v.Struct(hmConfig) - if err != nil { - return *hmConfig, err - } - - return *hmConfig, nil - } - return *hmConfig, errors.New("No config found for HoneycombMetrics") -} - func (f *fileConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index a4025c1042..05e43facc5 100644 --- a/config/mock.go +++ b/config/mock.go @@ -48,8 +48,6 @@ type MockConfig struct { GetSamplerTypeVal interface{} GetMetricsTypeErr error GetMetricsTypeVal string - GetHoneycombMetricsConfigErr error - GetHoneycombMetricsConfigVal HoneycombMetricsConfig GetPrometheusMetricsConfigErr error GetPrometheusMetricsConfigVal PrometheusMetricsConfig GetSendDelayErr error @@ -197,12 +195,6 @@ func (m *MockConfig) GetMetricsType() (string, error) { return m.GetMetricsTypeVal, m.GetMetricsTypeErr } -func (m *MockConfig) GetHoneycombMetricsConfig() (HoneycombMetricsConfig, error) { - m.Mux.RLock() - defer m.Mux.RUnlock() - - return m.GetHoneycombMetricsConfigVal, m.GetHoneycombMetricsConfigErr -} func (m *MockConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index 0bae5ab4c2..6663a14f56 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -99,6 +99,10 @@ PeerBufferSize = 10000 # Not eligible for live reload AddHostMetadataToTrace = false +# Metrics are sent to OpsRamp (The collection happens based on configuration specifie +# in OpsRampMetrics and only works when the Metrics is set to "prometheus") +SendMetricsToOpsRamp = false + ############################ ## Implementation Choices ## ############################ @@ -120,16 +124,6 @@ Collector = "InMemCollector" # honeycomb option will send them to a Honeycomb dataset. Logger = "logrus" -# Metrics describes which service to use for Refinery metrics. Valid options are -# "prometheus" and "honeycomb". The prometheus option starts a listener that -# will reply to a request for /metrics. The honeycomb option will send summary -# metrics to a Honeycomb dataset. -Metrics = "prometheus" - -# Metrics are sent to OpsRamp (The collection happens based on configuration specifie -# in OpsRampMetrics and only works when the Metrics is set to "prometheus") -SendMetricsToOpsRamp = false - ######################### ## Peer Management ## ######################### @@ -265,37 +259,6 @@ LoggerSamplerEnabled = true # Not eligible for live reload. LoggerSamplerThroughput = 10 -####################### -## Honeycomb Metrics ## -####################### - -# HoneycombMetrics is a section of the config only used if you are using the -# HoneycombMetrics to send all metrics to a Honeycomb Dataset. If you are using a -# different metrics service (eg prometheus or metricsd) you can leave all this -# commented out. - -[HoneycombMetrics] - -# MetricsHoneycombAPI is the URL for the upstream Honeycomb API. -# Eligible for live reload. -MetricsHoneycombAPI = "https://api.honeycomb.io" - -# MetricsAPIKey is the API key to use to send log events to the Honeycomb logging -# dataset. This is separate from the APIKeys used to authenticate regular -# traffic. -# Eligible for live reload. -MetricsAPIKey = "abcd1234" - -# MetricsDataset is the name of the dataset to which to send Refinery metrics -# Eligible for live reload. -MetricsDataset = "Refinery Metrics" - -# MetricsReportingInterval is the frequency (in seconds) to send metric events -# to Honeycomb. Between 1 and 60 is recommended. -# Not eligible for live reload. -MetricsReportingInterval = 3 - - #####################@## ## Prometheus Metrics ## #####################@## diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go deleted file mode 100644 index eae1e616e3..0000000000 --- a/metrics/honeycomb.go +++ /dev/null @@ -1,369 +0,0 @@ -package metrics - -import ( - "context" - "math" - "net/http" - "os" - "runtime" - "sort" - "sync" - "time" - - libtrace "github.com/honeycombio/libhoney-go" - "github.com/honeycombio/libhoney-go/transmission" - - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" -) - -type HoneycombMetrics struct { - Config config.Config `inject:""` - Logger logger.Logger `inject:""` - UpstreamTransport *http.Transport `inject:"upstreamTransport"` - Version string `inject:"version"` - - countersLock sync.Mutex - counters map[string]*counter - gaugesLock sync.Mutex - gauges map[string]*gauge - histogramsLock sync.Mutex - histograms map[string]*histogram - - libhClient *libtrace.Client - - latestMemStatsLock sync.RWMutex - latestMemStats runtime.MemStats - - //reportingFreq is the interval with which to report statistics - reportingFreq int64 - reportingCancelFunc func() - - prefix string -} - -func (h *HoneycombMetrics) GaugeWithLabels(name string, labels map[string]string, value float64) { - panic("implement me") -} - -func (h *HoneycombMetrics) IncrementWithLabels(name string, labels map[string]string) { - panic("implement me") -} - -func (h *HoneycombMetrics) RegisterWithDescriptionLabels(name string, metricType string, desc string, labels []string) { - panic("implement me") -} - -type counter struct { - lock sync.Mutex - name string - val int -} - -type gauge struct { - lock sync.Mutex - name string - val float64 -} - -type histogram struct { - lock sync.Mutex - name string - vals []float64 -} - -func (h *HoneycombMetrics) Start() error { - h.Logger.Debug().Logf("Starting HoneycombMetrics") - defer func() { h.Logger.Debug().Logf("Finished starting HoneycombMetrics") }() - mc, err := h.Config.GetHoneycombMetricsConfig() - if err != nil { - return err - } - if mc.MetricsReportingInterval < 1 { - mc.MetricsReportingInterval = 1 - } - h.reportingFreq = mc.MetricsReportingInterval - - if err = h.initlibtrace(mc); err != nil { - return err - } - - h.counters = make(map[string]*counter) - h.gauges = make(map[string]*gauge) - h.histograms = make(map[string]*histogram) - - // listen for config reloads - h.Config.RegisterReloadCallback(h.reloadBuilder) - - return nil -} - -func (h *HoneycombMetrics) reloadBuilder() { - h.Logger.Debug().Logf("reloading config for honeeycomb metrics reporter") - mc, err := h.Config.GetHoneycombMetricsConfig() - if err != nil { - // complain about this both to STDOUT and to the previously configured - // honeycomb logger - h.Logger.Error().Logf("failed to reload configs for Honeycomb metrics: %+v\n", err) - return - } - h.libhClient.Close() - // cancel the two reporting goroutines and restart them - h.reportingCancelFunc() - h.initlibtrace(mc) -} - -func (h *HoneycombMetrics) initlibtrace(mc config.HoneycombMetricsConfig) error { - metricsTx := &transmission.Honeycomb{ - // metrics are always sent as a single event, so don't wait for the timeout - MaxBatchSize: 1, - BlockOnSend: true, - UserAgentAddition: "tracing-proxy/" + h.Version + " (metrics)", - Transport: h.UpstreamTransport, - } - libhClientConfig := libtrace.ClientConfig{ - APIHost: mc.MetricsHoneycombAPI, - APIKey: mc.MetricsAPIKey, - Dataset: mc.MetricsDataset, - Transmission: metricsTx, - } - libhClient, err := libtrace.NewClient(libhClientConfig) - if err != nil { - return err - } - h.libhClient = libhClient - - // add some general go metrics to every report - // goroutines - if hostname, err := os.Hostname(); err == nil { - h.libhClient.AddField("hostname", hostname) - } - h.libhClient.AddDynamicField("num_goroutines", - func() interface{} { return runtime.NumGoroutine() }) - ctx, cancel := context.WithCancel(context.Background()) - h.reportingCancelFunc = cancel - go h.refreshMemStats(ctx) - go h.readResponses(ctx) - getAlloc := func() interface{} { - var mem runtime.MemStats - h.readMemStats(&mem) - return mem.Alloc - } - h.libhClient.AddDynamicField("memory_inuse", getAlloc) - startTime := time.Now() - h.libhClient.AddDynamicField("process_uptime_seconds", func() interface{} { - return time.Now().Sub(startTime) / time.Second - }) - go h.reportToHoneycommb(ctx) - return nil -} - -// refreshMemStats caches memory statistics to avoid blocking sending honeycomb -// metrics on gc pauses -func (h *HoneycombMetrics) refreshMemStats(ctx context.Context) { - // get memory metrics 5 times more frequently than we send metrics to make sure - // we have relatively up to date mem statistics but not go wild and get them - // all the time. - // for _ = range { - ticker := time.NewTicker(time.Duration(h.reportingFreq*1000/5) * time.Millisecond) - for { - select { - case <-ticker.C: - // Blocks if GC is running, maybe for a *looong* time. - var mem runtime.MemStats - runtime.ReadMemStats(&mem) - - h.latestMemStatsLock.Lock() - h.latestMemStats = mem - h.latestMemStatsLock.Unlock() - case <-ctx.Done(): - // context canceled? we're being asked to stop this so it can be restarted. - h.Logger.Debug().Logf("restarting honeycomb metrics refreshMemStats goroutine") - return - } - } -} - -// readResponses reads the responses from the libtrace responses queue and logs -// any errors that come down it -func (h *HoneycombMetrics) readResponses(ctx context.Context) { - resps := h.libhClient.TxResponses() - for { - select { - case resp := <-resps: - // read response, log if there's an error - var msg string - var log logger.Entry - switch { - case resp.Err != nil: - msg = "Metrics reporter got an error back from Honeycomb" - log = h.Logger.Error().WithField("error", resp.Err.Error()) - case resp.StatusCode > 202: - msg = "Metrics reporter got an unexpected status code back from Honeycomb" - log = h.Logger.Error() - } - if log != nil { - log.WithFields(map[string]interface{}{ - "status_code": resp.StatusCode, - "body": string(resp.Body), - "duration": resp.Duration, - }).Logf(msg) - } - case <-ctx.Done(): - // bail out; we're refreshing the config and will launch a new - // response reader. - h.Logger.Debug().Logf("restarting honeycomb metrics read libtrace responses goroutine") - return - } - } -} - -// readMemStats is a drop-in replacement for runtime.ReadMemStats which won't -// block waiting for a GC to finish. -func (h *HoneycombMetrics) readMemStats(mem *runtime.MemStats) { - h.latestMemStatsLock.RLock() - defer h.latestMemStatsLock.RUnlock() - - *mem = h.latestMemStats -} - -func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) { - tick := time.NewTicker(time.Duration(h.reportingFreq) * time.Second) - for { - select { - case <-ctx.Done(): - // context canceled? we're being asked to stop this so it can be restarted. - return - case <-tick.C: - ev := h.libhClient.NewEvent() - ev.Metadata = map[string]string{ - "api_host": ev.APIHost, - "dataset": ev.Dataset, - } - h.countersLock.Lock() - for _, count := range h.counters { - count.lock.Lock() - ev.AddField(PrefixMetricName(h.prefix, count.name), count.val) - count.val = 0 - count.lock.Unlock() - } - h.countersLock.Unlock() - - h.gaugesLock.Lock() - for _, gauge := range h.gauges { - gauge.lock.Lock() - ev.AddField(PrefixMetricName(h.prefix, gauge.name), gauge.val) - // gauges should remain where they are until changed - // gauge.val = 0 - gauge.lock.Unlock() - } - h.gaugesLock.Unlock() - - h.histogramsLock.Lock() - for _, histogram := range h.histograms { - histogram.lock.Lock() - if len(histogram.vals) != 0 { - sort.Float64s(histogram.vals) - p50Index := int(math.Floor(float64(len(histogram.vals)) * 0.5)) - p95Index := int(math.Floor(float64(len(histogram.vals)) * 0.95)) - p99Index := int(math.Floor(float64(len(histogram.vals)) * 0.99)) - ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_p50", histogram.vals[p50Index]) - ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_p95", histogram.vals[p95Index]) - ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_p99", histogram.vals[p99Index]) - ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_min", histogram.vals[0]) - ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_max", histogram.vals[len(histogram.vals)-1]) - ev.AddField(PrefixMetricName(h.prefix, histogram.name)+"_avg", average(histogram.vals)) - histogram.vals = histogram.vals[:0] - } - histogram.lock.Unlock() - } - h.histogramsLock.Unlock() - - ev.Send() - } - } -} - -func average(vals []float64) float64 { - var total float64 - for _, val := range vals { - total += val - } - return total / float64(len(vals)) -} - -func (h *HoneycombMetrics) Register(name string, metricType string) { - switch metricType { - case "counter": - h.countersLock.Lock() - defer h.countersLock.Unlock() - // inside the lock, let's not race to create the counter - _, ok := h.counters[name] - if !ok { - newCounter := &counter{ - name: name, - } - h.counters[name] = newCounter - } - case "gauge": - h.gaugesLock.Lock() - defer h.gaugesLock.Unlock() - _, ok := h.gauges[name] - if !ok { - newGauge := &gauge{ - name: name, - } - h.gauges[name] = newGauge - } - case "histogram": - h.histogramsLock.Lock() - defer h.histogramsLock.Unlock() - _, ok := h.histograms[name] - if !ok { - newGauge := &histogram{ - name: name, - vals: make([]float64, 0), - } - h.histograms[name] = newGauge - } - default: - h.Logger.Debug().Logf("unspported metric type %s", metricType) - } -} - -func (h *HoneycombMetrics) Count(name string, n interface{}) { - count, ok := h.counters[name] - if !ok { - h.Register(name, "counter") - count = h.counters[name] - } - count.lock.Lock() - defer count.lock.Unlock() - count.val = count.val + int(ConvertNumeric(n)) -} - -func (h *HoneycombMetrics) Increment(name string) { - h.Count(name, 1) -} - -func (h *HoneycombMetrics) Gauge(name string, val interface{}) { - gauge, ok := h.gauges[name] - if !ok { - h.Register(name, "gauge") - gauge = h.gauges[name] - } - gauge.lock.Lock() - defer gauge.lock.Unlock() - gauge.val = ConvertNumeric(val) -} - -func (h *HoneycombMetrics) Histogram(name string, obs interface{}) { - histogram, ok := h.histograms[name] - if !ok { - h.Register(name, "histogram") - histogram = h.histograms[name] - } - histogram.lock.Lock() - defer histogram.lock.Unlock() - histogram.vals = append(histogram.vals, ConvertNumeric(obs)) -} diff --git a/metrics/metrics.go b/metrics/metrics.go index d56e6f30a8..094ae0780a 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -1,11 +1,7 @@ package metrics import ( - "fmt" "github.com/jirs5/tracing-proxy/types" - "os" - - "github.com/jirs5/tracing-proxy/config" ) type Metrics interface { @@ -21,23 +17,8 @@ type Metrics interface { IncrementWithLabels(name string, labels map[string]string) } -func GetMetricsImplementation(c config.Config, prefix string) Metrics { - var metricsr Metrics - metricsType, err := c.GetMetricsType() - if err != nil { - fmt.Printf("unable to get metrics type from config: %v\n", err) - os.Exit(1) - } - switch metricsType { - case "honeycomb": - metricsr = &HoneycombMetrics{prefix: prefix} - case "prometheus": - metricsr = &PromMetrics{prefix: prefix} - default: - fmt.Printf("unknown metrics type %s. Exiting.\n", metricsType) - os.Exit(1) - } - return metricsr +func GetMetricsImplementation(prefix string) Metrics { + return &OpsRampMetrics{prefix: prefix} } func ConvertNumeric(val interface{}) float64 { @@ -71,13 +52,6 @@ func ConvertNumeric(val interface{}) float64 { } } -func PrefixMetricName(prefix string, name string) string { - if prefix != "" { - return fmt.Sprintf(`%s_%s`, prefix, name) - } - return name -} - func ExtractLabelsFromSpan(span *types.Span, labelToKeyMap map[string]string) map[string]string { labels := map[string]string{} diff --git a/metrics/prometheus.go b/metrics/opsramp.go similarity index 92% rename from metrics/prometheus.go rename to metrics/opsramp.go index dba0092134..533c76da1c 100644 --- a/metrics/prometheus.go +++ b/metrics/opsramp.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" ) -type PromMetrics struct { +type OpsRampMetrics struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` // metrics keeps a record of all the registered metrics so we can increment @@ -45,9 +45,9 @@ type PromMetrics struct { prefix string } -func (p *PromMetrics) Start() error { - p.Logger.Debug().Logf("Starting PromMetrics") - defer func() { p.Logger.Debug().Logf("Finished starting PromMetrics") }() +func (p *OpsRampMetrics) Start() error { + p.Logger.Debug().Logf("Starting OpsRampMetrics") + defer func() { p.Logger.Debug().Logf("Finished starting OpsRampMetrics") }() if p.prefix == "" && p.Config.GetSendMetricsToOpsRamp() { metricsConfig, err := p.Config.GetOpsRampMetricsConfig() @@ -92,7 +92,7 @@ func (p *PromMetrics) Start() error { // Register takes a name and a metric type. The type should be one of "counter", // "gauge", or "histogram" -func (p *PromMetrics) Register(name string, metricType string) { +func (p *OpsRampMetrics) Register(name string, metricType string) { p.lock.Lock() defer p.lock.Unlock() @@ -132,7 +132,7 @@ func (p *PromMetrics) Register(name string, metricType string) { // RegisterWithDescriptionLabels takes a name, a metric type, description, labels. The type should be one of "counter", // "gauge", or "histogram" -func (p *PromMetrics) RegisterWithDescriptionLabels(name string, metricType string, desc string, labels []string) { +func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType string, desc string, labels []string) { p.lock.Lock() defer p.lock.Unlock() @@ -173,7 +173,7 @@ func (p *PromMetrics) RegisterWithDescriptionLabels(name string, metricType stri p.metrics[name] = newmet } -func (p *PromMetrics) Increment(name string) { +func (p *OpsRampMetrics) Increment(name string) { p.lock.RLock() defer p.lock.RUnlock() @@ -183,7 +183,7 @@ func (p *PromMetrics) Increment(name string) { } } } -func (p *PromMetrics) Count(name string, n interface{}) { +func (p *OpsRampMetrics) Count(name string, n interface{}) { p.lock.RLock() defer p.lock.RUnlock() @@ -193,7 +193,7 @@ func (p *PromMetrics) Count(name string, n interface{}) { } } } -func (p *PromMetrics) Gauge(name string, val interface{}) { +func (p *OpsRampMetrics) Gauge(name string, val interface{}) { p.lock.RLock() defer p.lock.RUnlock() @@ -203,7 +203,7 @@ func (p *PromMetrics) Gauge(name string, val interface{}) { } } } -func (p *PromMetrics) Histogram(name string, obs interface{}) { +func (p *OpsRampMetrics) Histogram(name string, obs interface{}) { p.lock.RLock() defer p.lock.RUnlock() @@ -214,7 +214,7 @@ func (p *PromMetrics) Histogram(name string, obs interface{}) { } } -func (p *PromMetrics) GaugeWithLabels(name string, labels map[string]string, value float64) { +func (p *OpsRampMetrics) GaugeWithLabels(name string, labels map[string]string, value float64) { p.lock.RLock() defer p.lock.RUnlock() @@ -225,7 +225,7 @@ func (p *PromMetrics) GaugeWithLabels(name string, labels map[string]string, val } } -func (p *PromMetrics) IncrementWithLabels(name string, labels map[string]string) { +func (p *OpsRampMetrics) IncrementWithLabels(name string, labels map[string]string) { p.lock.RLock() defer p.lock.RUnlock() @@ -243,7 +243,7 @@ type OpsRampAuthTokenResponse struct { Scope string `json:"scope"` } -func (p *PromMetrics) PopulateOpsRampMetrics(metricsConfig *config.OpsRampMetricsConfig) { +func (p *OpsRampMetrics) PopulateOpsRampMetrics(metricsConfig *config.OpsRampMetricsConfig) { p.apiEndpoint = metricsConfig.OpsRampMetricsAPI p.apiKey = metricsConfig.OpsRampMetricsAPIKey @@ -287,7 +287,7 @@ func (p *PromMetrics) PopulateOpsRampMetrics(metricsConfig *config.OpsRampMetric } } -func (p *PromMetrics) PushMetricsToOpsRamp() (int, error) { +func (p *OpsRampMetrics) PushMetricsToOpsRamp() (int, error) { metricFamilySlice, err := prometheus.DefaultGatherer.Gather() if err != nil { return -1, err @@ -480,7 +480,7 @@ func (p *PromMetrics) PushMetricsToOpsRamp() (int, error) { return resp.StatusCode, nil } -func (p *PromMetrics) RenewOpsRampOAuthToken() error { +func (p *OpsRampMetrics) RenewOpsRampOAuthToken() error { p.oAuthToken = new(OpsRampAuthTokenResponse) @@ -515,7 +515,7 @@ func (p *PromMetrics) RenewOpsRampOAuthToken() error { return nil } -func (p *PromMetrics) SendWithRetry(request *http.Request) (*http.Response, error) { +func (p *OpsRampMetrics) SendWithRetry(request *http.Request) (*http.Response, error) { response, err := p.Client.Do(request) if err == nil && response != nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { diff --git a/metrics/prometheus_test.go b/metrics/opsramp_test.go similarity index 95% rename from metrics/prometheus_test.go rename to metrics/opsramp_test.go index d3910aa5ff..2aa368db99 100644 --- a/metrics/prometheus_test.go +++ b/metrics/opsramp_test.go @@ -13,7 +13,7 @@ import ( ) func TestMultipleRegistrations(t *testing.T) { - p := &PromMetrics{ + p := &OpsRampMetrics{ Logger: &logger.MockLogger{}, Config: &config.MockConfig{}, } @@ -28,7 +28,7 @@ func TestMultipleRegistrations(t *testing.T) { } func TestRaciness(t *testing.T) { - p := &PromMetrics{ + p := &OpsRampMetrics{ Logger: &logger.MockLogger{}, Config: &config.MockConfig{}, } From 24b5a240d552ad18ad48c6e6512089877ed8ff00 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Thu, 7 Apr 2022 09:47:22 +0530 Subject: [PATCH 152/351] changes in husky for error attribute --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index a400150d2c..e11455ad66 100644 --- a/go.mod +++ b/go.mod @@ -46,4 +46,4 @@ require ( replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 -replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220325122121-523bfbec37a7 +replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802 From d6527cf27a03e23e5d359e24c3593d40d6da3502 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Thu, 7 Apr 2022 09:56:02 +0530 Subject: [PATCH 153/351] changes in husky for error attribute --- go.sum | 2 ++ 1 file changed, 2 insertions(+) diff --git a/go.sum b/go.sum index b3067d5e55..d716e84ab7 100644 --- a/go.sum +++ b/go.sum @@ -273,6 +273,8 @@ github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LF github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jirs5/husky v0.9.1-0.20220325122121-523bfbec37a7 h1:etPTWTKk/1S2dWAdOoW+c1smMqkrJwQZjkUoppbL/zc= github.com/jirs5/husky v0.9.1-0.20220325122121-523bfbec37a7/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= +github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802 h1:EOjRO/vmYd/KOqKNE9hURrNISv+4tspT+HawDAqgNqA= +github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98 h1:SllAt3oySFffDLd9/T4uwE9x7JnGu6PD0T+H7gvWMLU= github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= From 0ca4b2baabe19b9a91c6da89d7cff6a99fd83849 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Thu, 7 Apr 2022 14:31:13 +0530 Subject: [PATCH 154/351] added support for logging into file and log rollups --- cmd/tracing-proxy/main.go | 16 +-- config/config.go | 11 +- config/file_config.go | 110 ++++------------ config/mock.go | 78 ++++++------ config_complete.toml | 58 +++------ go.mod | 1 + go.sum | 2 + logger/honeycomb.go | 256 -------------------------------------- logger/logger.go | 31 ++--- logger/logger_test.go | 25 ---- logger/logrus.go | 108 +++++++++++++++- logger/mock.go | 78 ------------ metrics/opsramp.go | 17 +-- 13 files changed, 209 insertions(+), 582 deletions(-) delete mode 100644 logger/honeycomb.go delete mode 100644 logger/logger_test.go delete mode 100644 logger/mock.go diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index a13be3cfe6..60616a34cb 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -14,8 +14,6 @@ import ( libtrace "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" flag "github.com/jessevdk/go-flags" - "github.com/sirupsen/logrus" - "github.com/jirs5/tracing-proxy/app" "github.com/jirs5/tracing-proxy/collect" "github.com/jirs5/tracing-proxy/config" @@ -93,7 +91,7 @@ func main() { } // get desired implementation for each dependency to inject - lgr := logger.GetLoggerImplementation(c) + lgr := logger.GetLoggerImplementation() collector := collect.GetCollectorImplementation(c) metricsConfig := metrics.GetMetricsImplementation("") shrdr := sharder.GetSharderImplementation(c) @@ -105,6 +103,7 @@ func main() { fmt.Printf("unable to get logging level from config: %v\n", err) os.Exit(1) } + logrusLogger := lgr.Init() if err := lgr.SetLevel(logLevel); err != nil { fmt.Printf("unable to set logging level: %v\n", err) os.Exit(1) @@ -206,15 +205,8 @@ func main() { os.Exit(1) } - // the logger provided to startstop must be valid before any service is - // started, meaning it can't rely on injected configs. make a custom logger - // just for this step - ststLogger := logrus.New() - level, _ := logrus.ParseLevel(logLevel) - ststLogger.SetLevel(level) - - defer startstop.Stop(g.Objects(), ststLogger) - if err := startstop.Start(g.Objects(), ststLogger); err != nil { + defer startstop.Stop(g.Objects(), logrusLogger) + if err := startstop.Start(g.Objects(), logrusLogger); err != nil { fmt.Printf("failed to start injected dependencies. error: %+v\n", err) os.Exit(1) } diff --git a/config/config.go b/config/config.go index b38ec30343..39f5761319 100644 --- a/config/config.go +++ b/config/config.go @@ -81,13 +81,6 @@ type Config interface { // themselves GetOtherConfig(name string, configStruct interface{}) error - // GetLoggerType returns the type of the logger to use. Valid types are in - // the logger package - GetLoggerType() (string, error) - - // GetHoneycombLoggerConfig returns the config specific to the HoneycombLogger - GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) - // GetCollectorType returns the type of the collector to use. Valid types // are in the collect package GetCollectorType() (string, error) @@ -98,8 +91,8 @@ type Config interface { // GetSamplerConfigForDataset returns the sampler type to use for the given dataset GetSamplerConfigForDataset(string) (interface{}, error) - // GetPrometheusMetricsConfig returns the config specific to PrometheusMetrics - GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) + // GetLogrusConfig returns the config specific to Logrus + GetLogrusConfig() (*LogrusLoggerConfig, error) // GetOpsRampMetricsConfig returns the config specific to PrometheusMetrics GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) diff --git a/config/file_config.go b/config/file_config.go index f378dbf4b5..6291fc76d2 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -31,7 +31,6 @@ type configContents struct { GRPCListenAddr string APIKeys []string `validate:"required"` HoneycombAPI string `validate:"required,url"` - Logger string `validate:"required,oneof= logrus honeycomb"` LoggingLevel string `validate:"required"` Collector string `validate:"required,oneof= InMemCollector"` Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"` @@ -56,22 +55,19 @@ type InMemoryCollectorCacheCapacity struct { MaxAlloc uint64 } -type HoneycombLevel int - -type HoneycombLoggerConfig struct { - LoggerHoneycombAPI string `validate:"required,url"` - LoggerAPIKey string `validate:"required"` - LoggerDataset string `validate:"required"` - LoggerSamplerEnabled bool - LoggerSamplerThroughput int - Level HoneycombLevel -} - -type PrometheusMetricsConfig struct { - MetricsListenAddr string `validate:"required"` +type LogrusLoggerConfig struct { + LogFormatter string `validate:"required",toml:"LogFormatter"` + LogOutput string `validate:"required,oneof= stdout stderr file",toml:"LogOutput"` + File struct { + FileName string `toml:"FileName"` + MaxSize int `toml:"MaxSize"` + MaxBackups int `toml:"MaxBackups"` + Compress bool `toml:"Compress"` + } `toml:"File"` } type OpsRampMetricsConfig struct { + MetricsListenAddr string `validate:"required"` OpsRampMetricsAPI string `validate:"required,url"` OpsRampTenantID string `validate:"required"` OpsRampMetricsAPIKey string `validate:"required"` @@ -104,8 +100,6 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.BindEnv("PeerManagement.RedisHost", "tracing-proxy_REDIS_HOST") c.BindEnv("PeerManagement.RedisPassword", "tracing-proxy_REDIS_PASSWORD") - c.BindEnv("HoneycombLogger.LoggerAPIKey", "tracing-proxy_HONEYCOMB_API_KEY") - c.BindEnv("HoneycombMetrics.MetricsAPIKey", "tracing-proxy_HONEYCOMB_API_KEY") c.SetDefault("ListenAddr", "0.0.0.0:8080") c.SetDefault("PeerListenAddr", "0.0.0.0:8081") c.SetDefault("CompressPeerCommunication", true) @@ -116,7 +110,6 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerManagement.UseTLSInsecure", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) c.SetDefault("HoneycombAPI", "https://api.jirs5") - c.SetDefault("Logger", "logrus") c.SetDefault("LoggingLevel", "debug") c.SetDefault("Collector", "InMemCollector") c.SetDefault("SendDelay", 2*time.Second) @@ -126,8 +119,6 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("UpstreamBufferSize", libtrace.DefaultPendingWorkCapacity) c.SetDefault("PeerBufferSize", libtrace.DefaultPendingWorkCapacity) c.SetDefault("MaxAlloc", uint64(0)) - c.SetDefault("HoneycombLogger.LoggerSamplerEnabled", false) - c.SetDefault("HoneycombLogger.LoggerSamplerThroughput", 5) c.SetDefault("AddHostMetadataToTrace", false) c.SetDefault("SendMetricsToOpsRamp", false) @@ -240,20 +231,8 @@ func (f *fileConfig) unmarshal() error { } func (f *fileConfig) validateConditionalConfigs() error { - // validate logger config - loggerType, err := f.GetLoggerType() - if err != nil { - return err - } - if loggerType == "honeycomb" { - _, err = f.GetHoneycombLoggerConfig() - if err != nil { - return err - } - } - // validate metrics config - _, err = f.GetPrometheusMetricsConfig() + _, err := f.GetOpsRampMetricsConfig() if err != nil { return err } @@ -465,41 +444,6 @@ func (f *fileConfig) GetLoggingLevel() (string, error) { return f.conf.LoggingLevel, nil } -func (f *fileConfig) GetLoggerType() (string, error) { - f.mux.RLock() - defer f.mux.RUnlock() - - return f.conf.Logger, nil -} - -func (f *fileConfig) GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) { - f.mux.RLock() - defer f.mux.RUnlock() - - hlConfig := &HoneycombLoggerConfig{} - if sub := f.config.Sub("HoneycombLogger"); sub != nil { - err := sub.UnmarshalExact(hlConfig) - if err != nil { - return *hlConfig, err - } - - hlConfig.LoggerAPIKey = f.config.GetString("HoneycombLogger.LoggerAPIKey") - - // https://github.com/spf13/viper/issues/747 - hlConfig.LoggerSamplerEnabled = f.config.GetBool("HoneycombLogger.LoggerSamplerEnabled") - hlConfig.LoggerSamplerThroughput = f.config.GetInt("HoneycombLogger.LoggerSamplerThroughput") - - v := validator.New() - err = v.Struct(hlConfig) - if err != nil { - return *hlConfig, err - } - - return *hlConfig, nil - } - return *hlConfig, errors.New("No config found for HoneycombLogger") -} - func (f *fileConfig) GetCollectorType() (string, error) { f.mux.RLock() defer f.mux.RUnlock() @@ -575,40 +519,34 @@ func (f *fileConfig) GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCap return *capacity, errors.New("No config found for inMemCollector") } -func (f *fileConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) { +func (f *fileConfig) GetLogrusConfig() (*LogrusLoggerConfig, error) { f.mux.RLock() defer f.mux.RUnlock() - pcConfig := &PrometheusMetricsConfig{} - if sub := f.config.Sub("PrometheusMetrics"); sub != nil { - err := sub.UnmarshalExact(pcConfig) + logrusConfig := &LogrusLoggerConfig{} + + if sub := f.config.Sub("LogrusLogger"); sub != nil { + err := sub.UnmarshalExact(logrusConfig) if err != nil { - return *pcConfig, err + return logrusConfig, err } v := validator.New() - err = v.Struct(pcConfig) + err = v.Struct(logrusConfig) if err != nil { - return *pcConfig, err + return logrusConfig, err } - return *pcConfig, nil + return logrusConfig, nil } - return *pcConfig, errors.New("No config found for PrometheusMetrics") + return nil, errors.New("No config found for LogrusConfig") } func (f *fileConfig) GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) { f.mux.RLock() defer f.mux.RUnlock() - opsRampMetricsConfig := &OpsRampMetricsConfig{ - OpsRampMetricsAPI: "https://placeholder.api.com/", - OpsRampTenantID: "placeholder_tenantID", - OpsRampMetricsAPIKey: "placeholder_key", - OpsRampMetricsAPISecret: "placeholder_secret", - OpsRampMetricsReportingInterval: 60, - OpsRampMetricsRetryCount: 2, - } + opsRampMetricsConfig := &OpsRampMetricsConfig{} if sub := f.config.Sub("OpsRampMetrics"); sub != nil { err := sub.UnmarshalExact(opsRampMetricsConfig) @@ -624,6 +562,10 @@ func (f *fileConfig) GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) { opsRampMetricsConfig.OpsRampMetricsReportingInterval = 10 } + if len(opsRampMetricsConfig.OpsRampMetricsList) < 1 { + opsRampMetricsConfig.OpsRampMetricsList = []string{".*"} + } + v := validator.New() err = v.Struct(opsRampMetricsConfig) if err != nil { diff --git a/config/mock.go b/config/mock.go index 05e43facc5..55219e355e 100644 --- a/config/mock.go +++ b/config/mock.go @@ -27,45 +27,43 @@ type MockConfig struct { GetGRPCListenAddrVal string GetLoggerTypeErr error GetLoggerTypeVal string - GetHoneycombLoggerConfigErr error - GetHoneycombLoggerConfigVal HoneycombLoggerConfig GetLoggingLevelErr error GetLoggingLevelVal string GetOtherConfigErr error // GetOtherConfigVal must be a JSON representation of the config struct to be populated. - GetOtherConfigVal string - GetPeersErr error - GetPeersVal []string - GetRedisHostErr error - GetRedisHostVal string - GetRedisPasswordErr error - GetRedisPasswordVal string - GetUseTLSErr error - GetUseTLSVal bool - GetUseTLSInsecureErr error - GetUseTLSInsecureVal bool - GetSamplerTypeErr error - GetSamplerTypeVal interface{} - GetMetricsTypeErr error - GetMetricsTypeVal string - GetPrometheusMetricsConfigErr error - GetPrometheusMetricsConfigVal PrometheusMetricsConfig - GetSendDelayErr error - GetSendDelayVal time.Duration - GetTraceTimeoutErr error - GetTraceTimeoutVal time.Duration - GetMaxBatchSizeVal uint - GetUpstreamBufferSizeVal int - GetPeerBufferSizeVal int - SendTickerVal time.Duration - IdentifierInterfaceName string - UseIPV6Identifier bool - RedisIdentifier string - PeerManagementType string - DebugServiceAddr string - DryRun bool - DryRunFieldName string - AddHostMetadataToTrace bool + GetOtherConfigVal string + GetPeersErr error + GetPeersVal []string + GetRedisHostErr error + GetRedisHostVal string + GetRedisPasswordErr error + GetRedisPasswordVal string + GetUseTLSErr error + GetUseTLSVal bool + GetUseTLSInsecureErr error + GetUseTLSInsecureVal bool + GetSamplerTypeErr error + GetSamplerTypeVal interface{} + GetMetricsTypeErr error + GetMetricsTypeVal string + GetOpsRampMetricsConfigErr error + GetOpsRampMetricsConfigVal OpsRampMetricsConfig + GetSendDelayErr error + GetSendDelayVal time.Duration + GetTraceTimeoutErr error + GetTraceTimeoutVal time.Duration + GetMaxBatchSizeVal uint + GetUpstreamBufferSizeVal int + GetPeerBufferSizeVal int + SendTickerVal time.Duration + IdentifierInterfaceName string + UseIPV6Identifier bool + RedisIdentifier string + PeerManagementType string + DebugServiceAddr string + DryRun bool + DryRunFieldName string + AddHostMetadataToTrace bool Mux sync.RWMutex } @@ -137,12 +135,6 @@ func (m *MockConfig) GetLoggerType() (string, error) { return m.GetLoggerTypeVal, m.GetLoggerTypeErr } -func (m *MockConfig) GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) { - m.Mux.RLock() - defer m.Mux.RUnlock() - - return m.GetHoneycombLoggerConfigVal, m.GetHoneycombLoggerConfigErr -} func (m *MockConfig) GetLoggingLevel() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() @@ -195,11 +187,11 @@ func (m *MockConfig) GetMetricsType() (string, error) { return m.GetMetricsTypeVal, m.GetMetricsTypeErr } -func (m *MockConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) { +func (m *MockConfig) GetPrometheusMetricsConfig() (OpsRampMetricsConfig, error) { m.Mux.RLock() defer m.Mux.RUnlock() - return m.GetPrometheusMetricsConfigVal, m.GetPrometheusMetricsConfigErr + return m.GetOpsRampMetricsConfigVal, m.GetOpsRampMetricsConfigErr } func (m *MockConfig) GetSendDelay() (time.Duration, error) { m.Mux.RLock() diff --git a/config_complete.toml b/config_complete.toml index 6663a14f56..c8cad36fb3 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -119,11 +119,6 @@ SendMetricsToOpsRamp = false # implementations of the Collector interface. Collector = "InMemCollector" -# Logger describes which logger to use for Refinery logs. Valid options are -# "logrus" and "honeycomb". The logrus option will write logs to STDOUT and the -# honeycomb option will send them to a Honeycomb dataset. -Logger = "logrus" - ######################### ## Peer Management ## ######################### @@ -224,72 +219,59 @@ MaxAlloc = 0 # commented out. [LogrusLogger] -# logrus logger currently has no options! - -###################### -## Honeycomb Logger ## -###################### +# LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] +LogFormatter = "logfmt" -# HoneycombLogger is a section of the config only used if you are using the -# HoneycombLogger to send all logs to a Honeycomb Dataset. If you are using a -# different logger (eg file-based logger) you can leave all this commented out. +# LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] +LogOutput = "file" -[HoneycombLogger] +## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" +[LogrusLogger.File] -# LoggerHoneycombAPI is the URL for the upstream Honeycomb API. -# Eligible for live reload. -LoggerHoneycombAPI = "https://api.honeycomb.io" +# FileName specifies the location where the logs are supposed be stored +FileName = "/var/log/opsramp/tracing-proxy.log" -# LoggerAPIKey is the API key to use to send log events to the Honeycomb logging -# dataset. This is separate from the APIKeys used to authenticate regular -# traffic. -# Eligible for live reload. -LoggerAPIKey = "abcd1234" +# MaxSize is the maximum size in megabytes of the log file before it gets rotated. +MaxSize = 1 -# LoggerDataset is the name of the dataset to which to send Refinery logs -# Eligible for live reload. -LoggerDataset = "Refinery Logs" +# MaxBackups is the maximum number of old log files to retain. +MaxBackups = 3 -# LoggerSamplerEnabled enables a PerKeyThroughput dynamic sampler for log messages. -# This will sample log messages based on [log level:message] key on a per second throughput basis. -# Not eligible for live reload. -LoggerSamplerEnabled = true +# Compress determines if the rotated log files should be compressed +# using gzip. +Compress = true -# LoggerSamplerThroughput is the per key per second throughput for the log message dynamic sampler. -# Not eligible for live reload. -LoggerSamplerThroughput = 10 #####################@## ## Prometheus Metrics ## #####################@## -[PrometheusMetrics] +[OpsRampMetrics] # MetricsListenAddr determines the interface and port on which Prometheus will # listen for requests for /metrics. Must be different from the main Refinery # listener. # Not eligible for live reload. MetricsListenAddr = "localhost:2112" -[OpsRampMetrics] # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. # Not Eligible for live reload. -OpsRampMetricsAPI = "" +OpsRampMetricsAPI = "https://placeholder.api.com/" # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. # Not Eligible for live reload. -OpsRampTenantID = "" +OpsRampTenantID = "placeholder_tenantID" # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. # This is separate from the APIKeys used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPIKey = "" +OpsRampMetricsAPIKey = "placeholder_key" # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. # This is separate from the APISecret used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPISecret = "" +OpsRampMetricsAPISecret = "placeholder_secret" # OpsRampMetricsReportingInterval is frequency specified in seconds at which # the metrics are collected and sent to OpsRamp diff --git a/go.mod b/go.mod index e11455ad66..1c25e5a544 100644 --- a/go.mod +++ b/go.mod @@ -39,6 +39,7 @@ require ( google.golang.org/grpc v1.44.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect ) //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 diff --git a/go.sum b/go.sum index d716e84ab7..46442f85b9 100644 --- a/go.sum +++ b/go.sum @@ -857,6 +857,8 @@ gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXa gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/logger/honeycomb.go b/logger/honeycomb.go deleted file mode 100644 index 16e8af75a5..0000000000 --- a/logger/honeycomb.go +++ /dev/null @@ -1,256 +0,0 @@ -package logger - -import ( - "errors" - "fmt" - "net/http" - "os" - "strings" - "time" - - "github.com/honeycombio/dynsampler-go" - libtrace "github.com/honeycombio/libhoney-go" - "github.com/honeycombio/libhoney-go/transmission" - - "github.com/jirs5/tracing-proxy/config" -) - -// HoneycombLogger is a Logger implementation that sends all logs to a Honeycomb -// dataset. It requires a HoneycombLogger section of the config to exist with -// three keys, LoggerHoneycombAPI, LoggerAPIKey, and LoggerDataset. -type HoneycombLogger struct { - Config config.Config `inject:""` - UpstreamTransport *http.Transport `inject:"upstreamTransport"` - Version string `inject:"version"` - loggerConfig config.HoneycombLoggerConfig - libhClient *libtrace.Client - builder *libtrace.Builder - sampler dynsampler.Sampler -} - -type HoneycombEntry struct { - loggerConfig config.HoneycombLoggerConfig - builder *libtrace.Builder - sampler dynsampler.Sampler -} - -const ( - UnknownLevel config.HoneycombLevel = iota - DebugLevel - InfoLevel - WarnLevel - ErrorLevel - PanicLevel -) - -func (h *HoneycombLogger) Start() error { - // logLevel is defined outside the HoneycombLogger section - // and is set independently, before Start() is called, so we need to - // preserve it. - // TODO: make LogLevel part of the HoneycombLogger/LogrusLogger sections? - logLevel := h.loggerConfig.Level - loggerConfig, err := h.Config.GetHoneycombLoggerConfig() - if err != nil { - return err - } - loggerConfig.Level = logLevel - h.loggerConfig = loggerConfig - var loggerTx transmission.Sender - if h.loggerConfig.LoggerAPIKey == "" { - loggerTx = &transmission.DiscardSender{} - } else { - loggerTx = &transmission.Honeycomb{ - // logs are often sent in flurries; flush every half second - MaxBatchSize: 100, - BatchTimeout: 500 * time.Millisecond, - UserAgentAddition: "tracing-proxy/" + h.Version + " (metrics)", - Transport: h.UpstreamTransport, - PendingWorkCapacity: libtrace.DefaultPendingWorkCapacity, - } - } - - if loggerConfig.LoggerSamplerEnabled { - h.sampler = &dynsampler.PerKeyThroughput{ - ClearFrequencySec: 10, - PerKeyThroughputPerSec: loggerConfig.LoggerSamplerThroughput, - MaxKeys: 1000, - } - err := h.sampler.Start() - if err != nil { - return err - } - } - - libhClientConfig := libtrace.ClientConfig{ - APIHost: h.loggerConfig.LoggerHoneycombAPI, - APIKey: h.loggerConfig.LoggerAPIKey, - Dataset: h.loggerConfig.LoggerDataset, - Transmission: loggerTx, - } - libhClient, err := libtrace.NewClient(libhClientConfig) - if err != nil { - return err - } - h.libhClient = libhClient - - if hostname, err := os.Hostname(); err == nil { - h.libhClient.AddField("hostname", hostname) - } - startTime := time.Now() - h.libhClient.AddDynamicField("process_uptime_seconds", func() interface{} { - return time.Now().Sub(startTime) / time.Second - }) - - h.builder = h.libhClient.NewBuilder() - - // listen for responses from honeycomb, log to STDOUT if something unusual - // comes back - go h.readResponses() - - // listen for config reloads - h.Config.RegisterReloadCallback(h.reloadBuilder) - - fmt.Printf("Starting Honeycomb Logger - see Honeycomb %s dataset for service logs\n", h.loggerConfig.LoggerDataset) - - return nil -} - -func (h *HoneycombLogger) readResponses() { - resps := h.libhClient.TxResponses() - for resp := range resps { - respString := fmt.Sprintf("Response: status: %d, duration: %s", resp.StatusCode, resp.Duration) - // read response, log if there's an error - switch { - case resp.StatusCode == 0: // log message dropped due to sampling - continue - case resp.Err != nil: - fmt.Fprintf(os.Stderr, "Honeycomb Logger got an error back from Honeycomb while trying to send a log line: %s, error: %s, body: %s\n", respString, resp.Err.Error(), string(resp.Body)) - case resp.StatusCode > 202: - fmt.Fprintf(os.Stderr, "Honeycomb Logger got an unexpected status code back from Honeycomb while trying to send a log line: %s, %s\n", respString, string(resp.Body)) - } - } -} - -func (h *HoneycombLogger) reloadBuilder() { - h.Debug().Logf("reloading config for Honeycomb logger") - // preseve log level - logLevel := h.loggerConfig.Level - loggerConfig, err := h.Config.GetHoneycombLoggerConfig() - if err != nil { - // complain about this both to STDOUT and to the previously configured - // honeycomb logger - fmt.Printf("failed to reload configs for Honeycomb logger: %+v\n", err) - h.Error().Logf("failed to reload configs for Honeycomb logger: %+v", err) - return - } - loggerConfig.Level = logLevel - h.loggerConfig = loggerConfig - h.builder.APIHost = h.loggerConfig.LoggerHoneycombAPI - h.builder.WriteKey = h.loggerConfig.LoggerAPIKey - h.builder.Dataset = h.loggerConfig.LoggerDataset -} - -func (h *HoneycombLogger) Stop() error { - fmt.Printf("stopping honey logger\n") - libtrace.Flush() - return nil -} - -func (h *HoneycombLogger) Debug() Entry { - if h.loggerConfig.Level > DebugLevel { - return nullEntry - } - - ev := &HoneycombEntry{ - loggerConfig: h.loggerConfig, - builder: h.builder.Clone(), - sampler: h.sampler, - } - ev.builder.AddField("level", "debug") - - return ev -} - -func (h *HoneycombLogger) Info() Entry { - if h.loggerConfig.Level > InfoLevel { - return nullEntry - } - - ev := &HoneycombEntry{ - loggerConfig: h.loggerConfig, - builder: h.builder.Clone(), - sampler: h.sampler, - } - ev.builder.AddField("level", "info") - - return ev -} - -func (h *HoneycombLogger) Error() Entry { - if h.loggerConfig.Level > ErrorLevel { - return nullEntry - } - - ev := &HoneycombEntry{ - loggerConfig: h.loggerConfig, - builder: h.builder.Clone(), - sampler: h.sampler, - } - ev.builder.AddField("level", "error") - - return ev -} - -func (h *HoneycombLogger) SetLevel(level string) error { - sanitizedLevel := strings.TrimSpace(strings.ToLower(level)) - var lvl config.HoneycombLevel - switch sanitizedLevel { - case "debug": - lvl = DebugLevel - case "info": - lvl = InfoLevel - case "warn", "warning": - lvl = WarnLevel - case "error": - lvl = ErrorLevel - case "panic": - lvl = PanicLevel - default: - return errors.New(fmt.Sprintf("unrecognized logging level: %s", level)) - } - h.loggerConfig.Level = lvl - return nil -} - -func (h *HoneycombEntry) WithField(key string, value interface{}) Entry { - h.builder.AddField(key, value) - return h -} - -func (h *HoneycombEntry) WithString(key string, value string) Entry { - return h.WithField(key, value) -} - -func (h *HoneycombEntry) WithFields(fields map[string]interface{}) Entry { - h.builder.Add(fields) - return h -} - -func (h *HoneycombEntry) Logf(f string, args ...interface{}) { - ev := h.builder.NewEvent() - msg := fmt.Sprintf(f, args...) - ev.AddField("msg", msg) - ev.Metadata = map[string]string{ - "api_host": ev.APIHost, - "dataset": ev.Dataset, - } - level, ok := ev.Fields()["level"].(string) - if !ok { - level = "unknown" - } - if h.sampler != nil { - rate := h.sampler.GetSampleRate(fmt.Sprintf(`%s:%s`, level, msg)) - ev.SampleRate = uint(rate) - } - ev.Send() -} diff --git a/logger/logger.go b/logger/logger.go index 2298e66a9a..b70eb9ddcb 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -1,18 +1,18 @@ package logger -import ( - "fmt" - "os" - - "github.com/jirs5/tracing-proxy/config" -) +import "github.com/sirupsen/logrus" type Logger interface { Debug() Entry Info() Entry Error() Entry + Fatal() Entry + Panic() Entry + Warn() Entry // SetLevel sets the logging level (debug, info, warn, error) SetLevel(level string) error + + Init() *logrus.Logger } type Entry interface { @@ -26,21 +26,6 @@ type Entry interface { Logf(f string, args ...interface{}) } -func GetLoggerImplementation(c config.Config) Logger { - var logger Logger - loggerType, err := c.GetLoggerType() - if err != nil { - fmt.Printf("unable to get logger type from config: %v\n", err) - os.Exit(1) - } - switch loggerType { - case "honeycomb": - logger = &HoneycombLogger{} - case "logrus": - logger = &LogrusLogger{} - default: - fmt.Printf("unknown logger type %s. Exiting.\n", loggerType) - os.Exit(1) - } - return logger +func GetLoggerImplementation() Logger { + return &LogrusLogger{} } diff --git a/logger/logger_test.go b/logger/logger_test.go deleted file mode 100644 index de03ab625b..0000000000 --- a/logger/logger_test.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build all || race -// +build all race - -package logger - -import ( - "testing" - - "github.com/jirs5/tracing-proxy/config" - - "github.com/stretchr/testify/assert" -) - -func TestHoneycombLoggerRespectsLogLevelAfterStart(t *testing.T) { - cfg := &config.MockConfig{GetHoneycombLoggerConfigVal: config.HoneycombLoggerConfig{}} - hcLogger := &HoneycombLogger{ - Config: cfg, - loggerConfig: config.HoneycombLoggerConfig{Level: WarnLevel}, - } - - assert.Equal(t, WarnLevel, hcLogger.loggerConfig.Level) - err := hcLogger.Start() - assert.Nil(t, err) - assert.Equal(t, WarnLevel, hcLogger.loggerConfig.Level) -} diff --git a/logger/logrus.go b/logger/logrus.go index f0fdc5a74c..4af488e808 100644 --- a/logger/logrus.go +++ b/logger/logrus.go @@ -1,9 +1,10 @@ package logger import ( - "github.com/sirupsen/logrus" - "github.com/jirs5/tracing-proxy/config" + "github.com/sirupsen/logrus" + lumberjack "gopkg.in/natefinch/lumberjack.v2" + "os" ) // LogrusLogger is a Logger implementation that sends all logs to stdout using @@ -21,11 +22,104 @@ type LogrusEntry struct { } func (l *LogrusLogger) Start() error { - l.logger = logrus.New() l.logger.SetLevel(l.level) + l.logger.SetReportCaller(true) + + logrusConfig, err := l.Config.GetLogrusConfig() + if err != nil { + return err + } + + switch logrusConfig.LogOutput { + case "stdout": + l.logger.SetOutput(os.Stdout) + case "stderr": + l.logger.SetOutput(os.Stderr) + case "file": + l.logger.SetOutput(&lumberjack.Logger{ + Filename: logrusConfig.File.FileName, + MaxSize: logrusConfig.File.MaxSize, + MaxBackups: logrusConfig.File.MaxBackups, + Compress: logrusConfig.File.Compress, + }) + } + + l.logger.SetFormatter(&logrus.TextFormatter{ + DisableColors: true, + ForceQuote: true, + FullTimestamp: true, + DisableLevelTruncation: true, + QuoteEmptyFields: true, + FieldMap: logrus.FieldMap{ + logrus.FieldKeyFile: "file", + logrus.FieldKeyTime: "timestamp", + logrus.FieldKeyLevel: "level", + logrus.FieldKeyMsg: "message", + logrus.FieldKeyFunc: "caller", + }, + }) + + //l.logger.SetFormatter(&logrus.JSONFormatter{ + // FieldMap: logrus.FieldMap{ + // logrus.FieldKeyFile: "file", + // logrus.FieldKeyTime: "timestamp", + // logrus.FieldKeyLevel: "level", + // logrus.FieldKeyMsg: "message", + // logrus.FieldKeyFunc: "caller", + // }, + //}) return nil } +func (l *LogrusLogger) Init() *logrus.Logger { + l.logger = logrus.New() + return l.logger +} + +func (l *LogrusLogger) Panic() Entry { + if !l.logger.IsLevelEnabled(logrus.PanicLevel) { + return nullEntry + } + + return &LogrusEntry{ + entry: logrus.NewEntry(l.logger), + level: logrus.PanicLevel, + } +} + +func (l *LogrusLogger) Fatal() Entry { + if !l.logger.IsLevelEnabled(logrus.FatalLevel) { + return nullEntry + } + + return &LogrusEntry{ + entry: logrus.NewEntry(l.logger), + level: logrus.FatalLevel, + } +} + +func (l *LogrusLogger) Warn() Entry { + if !l.logger.IsLevelEnabled(logrus.WarnLevel) { + return nullEntry + } + + return &LogrusEntry{ + entry: logrus.NewEntry(l.logger), + level: logrus.WarnLevel, + } +} + +func (l *LogrusLogger) Trace() Entry { + if !l.logger.IsLevelEnabled(logrus.TraceLevel) { + return nullEntry + } + + return &LogrusEntry{ + entry: logrus.NewEntry(l.logger), + level: logrus.TraceLevel, + } +} + func (l *LogrusLogger) Debug() Entry { if !l.logger.IsLevelEnabled(logrus.DebugLevel) { return nullEntry @@ -95,6 +189,14 @@ func (l *LogrusEntry) WithFields(fields map[string]interface{}) Entry { func (l *LogrusEntry) Logf(f string, args ...interface{}) { switch l.level { + case logrus.WarnLevel: + l.entry.Warnf(f, args...) + case logrus.FatalLevel: + l.entry.Fatalf(f, args...) + case logrus.PanicLevel: + l.entry.Panicf(f, args...) + case logrus.TraceLevel: + l.entry.Tracef(f, args...) case logrus.DebugLevel: l.entry.Debugf(f, args...) case logrus.InfoLevel: diff --git a/logger/mock.go b/logger/mock.go deleted file mode 100644 index dc6270ad58..0000000000 --- a/logger/mock.go +++ /dev/null @@ -1,78 +0,0 @@ -package logger - -import ( - "fmt" - - "github.com/jirs5/tracing-proxy/config" -) - -type MockLogger struct { - Events []*MockLoggerEvent -} - -type MockLoggerEvent struct { - l *MockLogger - level config.HoneycombLevel - Fields map[string]interface{} -} - -func (l *MockLogger) Debug() Entry { - return &MockLoggerEvent{ - l: l, - level: DebugLevel, - Fields: make(map[string]interface{}), - } -} - -func (l *MockLogger) Info() Entry { - return &MockLoggerEvent{ - l: l, - level: InfoLevel, - Fields: make(map[string]interface{}), - } -} - -func (l *MockLogger) Error() Entry { - return &MockLoggerEvent{ - l: l, - level: ErrorLevel, - Fields: make(map[string]interface{}), - } -} - -func (l *MockLogger) SetLevel(level string) error { - return nil -} - -func (e *MockLoggerEvent) WithField(key string, value interface{}) Entry { - e.Fields[key] = value - - return e -} - -func (e *MockLoggerEvent) WithString(key string, value string) Entry { - return e.WithField(key, value) -} - -func (e *MockLoggerEvent) WithFields(fields map[string]interface{}) Entry { - for k, v := range fields { - e.Fields[k] = v - } - - return e -} - -func (e *MockLoggerEvent) Logf(f string, args ...interface{}) { - msg := fmt.Sprintf(f, args...) - switch e.level { - case DebugLevel: - e.WithField("debug", msg) - case InfoLevel: - e.WithField("info", msg) - case ErrorLevel: - e.WithField("error", msg) - default: - panic("unexpected log level") - } - e.l.Events = append(e.l.Events, e) -} diff --git a/metrics/opsramp.go b/metrics/opsramp.go index 533c76da1c..eba889b34b 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -49,12 +49,12 @@ func (p *OpsRampMetrics) Start() error { p.Logger.Debug().Logf("Starting OpsRampMetrics") defer func() { p.Logger.Debug().Logf("Finished starting OpsRampMetrics") }() - if p.prefix == "" && p.Config.GetSendMetricsToOpsRamp() { - metricsConfig, err := p.Config.GetOpsRampMetricsConfig() - if err != nil { - p.Logger.Error().Logf("Failed to Load OpsRampMetrics Config:", err) - } + metricsConfig, err := p.Config.GetOpsRampMetricsConfig() + if err != nil { + return err + } + if p.prefix == "" && p.Config.GetSendMetricsToOpsRamp() { go func() { metricsTicker := time.NewTicker(time.Duration(metricsConfig.OpsRampMetricsReportingInterval) * time.Second) defer metricsTicker.Stop() @@ -76,17 +76,12 @@ func (p *OpsRampMetrics) Start() error { } - pc, err := p.Config.GetPrometheusMetricsConfig() - if err != nil { - return err - } - p.metrics = make(map[string]interface{}) muxxer := mux.NewRouter() muxxer.Handle("/metrics", promhttp.Handler()) - go http.ListenAndServe(pc.MetricsListenAddr, muxxer) + go http.ListenAndServe(metricsConfig.MetricsListenAddr, muxxer) return nil } From 0af1d70ead6195ef08917985ad92b74536fcdac3 Mon Sep 17 00:00:00 2001 From: LokeshOpsramp <59053467+LokeshOpsramp@users.noreply.github.com> Date: Thu, 7 Apr 2022 14:34:16 +0530 Subject: [PATCH 155/351] added support for logging into file and log rollups (#1) --- cmd/tracing-proxy/main.go | 16 +-- config/config.go | 11 +- config/file_config.go | 110 ++++------------ config/mock.go | 78 ++++++------ config_complete.toml | 58 +++------ go.mod | 1 + go.sum | 2 + logger/honeycomb.go | 256 -------------------------------------- logger/logger.go | 31 ++--- logger/logger_test.go | 25 ---- logger/logrus.go | 108 +++++++++++++++- logger/mock.go | 78 ------------ metrics/opsramp.go | 17 +-- 13 files changed, 209 insertions(+), 582 deletions(-) delete mode 100644 logger/honeycomb.go delete mode 100644 logger/logger_test.go delete mode 100644 logger/mock.go diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index a13be3cfe6..60616a34cb 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -14,8 +14,6 @@ import ( libtrace "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" flag "github.com/jessevdk/go-flags" - "github.com/sirupsen/logrus" - "github.com/jirs5/tracing-proxy/app" "github.com/jirs5/tracing-proxy/collect" "github.com/jirs5/tracing-proxy/config" @@ -93,7 +91,7 @@ func main() { } // get desired implementation for each dependency to inject - lgr := logger.GetLoggerImplementation(c) + lgr := logger.GetLoggerImplementation() collector := collect.GetCollectorImplementation(c) metricsConfig := metrics.GetMetricsImplementation("") shrdr := sharder.GetSharderImplementation(c) @@ -105,6 +103,7 @@ func main() { fmt.Printf("unable to get logging level from config: %v\n", err) os.Exit(1) } + logrusLogger := lgr.Init() if err := lgr.SetLevel(logLevel); err != nil { fmt.Printf("unable to set logging level: %v\n", err) os.Exit(1) @@ -206,15 +205,8 @@ func main() { os.Exit(1) } - // the logger provided to startstop must be valid before any service is - // started, meaning it can't rely on injected configs. make a custom logger - // just for this step - ststLogger := logrus.New() - level, _ := logrus.ParseLevel(logLevel) - ststLogger.SetLevel(level) - - defer startstop.Stop(g.Objects(), ststLogger) - if err := startstop.Start(g.Objects(), ststLogger); err != nil { + defer startstop.Stop(g.Objects(), logrusLogger) + if err := startstop.Start(g.Objects(), logrusLogger); err != nil { fmt.Printf("failed to start injected dependencies. error: %+v\n", err) os.Exit(1) } diff --git a/config/config.go b/config/config.go index b38ec30343..39f5761319 100644 --- a/config/config.go +++ b/config/config.go @@ -81,13 +81,6 @@ type Config interface { // themselves GetOtherConfig(name string, configStruct interface{}) error - // GetLoggerType returns the type of the logger to use. Valid types are in - // the logger package - GetLoggerType() (string, error) - - // GetHoneycombLoggerConfig returns the config specific to the HoneycombLogger - GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) - // GetCollectorType returns the type of the collector to use. Valid types // are in the collect package GetCollectorType() (string, error) @@ -98,8 +91,8 @@ type Config interface { // GetSamplerConfigForDataset returns the sampler type to use for the given dataset GetSamplerConfigForDataset(string) (interface{}, error) - // GetPrometheusMetricsConfig returns the config specific to PrometheusMetrics - GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) + // GetLogrusConfig returns the config specific to Logrus + GetLogrusConfig() (*LogrusLoggerConfig, error) // GetOpsRampMetricsConfig returns the config specific to PrometheusMetrics GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) diff --git a/config/file_config.go b/config/file_config.go index f378dbf4b5..6291fc76d2 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -31,7 +31,6 @@ type configContents struct { GRPCListenAddr string APIKeys []string `validate:"required"` HoneycombAPI string `validate:"required,url"` - Logger string `validate:"required,oneof= logrus honeycomb"` LoggingLevel string `validate:"required"` Collector string `validate:"required,oneof= InMemCollector"` Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"` @@ -56,22 +55,19 @@ type InMemoryCollectorCacheCapacity struct { MaxAlloc uint64 } -type HoneycombLevel int - -type HoneycombLoggerConfig struct { - LoggerHoneycombAPI string `validate:"required,url"` - LoggerAPIKey string `validate:"required"` - LoggerDataset string `validate:"required"` - LoggerSamplerEnabled bool - LoggerSamplerThroughput int - Level HoneycombLevel -} - -type PrometheusMetricsConfig struct { - MetricsListenAddr string `validate:"required"` +type LogrusLoggerConfig struct { + LogFormatter string `validate:"required",toml:"LogFormatter"` + LogOutput string `validate:"required,oneof= stdout stderr file",toml:"LogOutput"` + File struct { + FileName string `toml:"FileName"` + MaxSize int `toml:"MaxSize"` + MaxBackups int `toml:"MaxBackups"` + Compress bool `toml:"Compress"` + } `toml:"File"` } type OpsRampMetricsConfig struct { + MetricsListenAddr string `validate:"required"` OpsRampMetricsAPI string `validate:"required,url"` OpsRampTenantID string `validate:"required"` OpsRampMetricsAPIKey string `validate:"required"` @@ -104,8 +100,6 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.BindEnv("PeerManagement.RedisHost", "tracing-proxy_REDIS_HOST") c.BindEnv("PeerManagement.RedisPassword", "tracing-proxy_REDIS_PASSWORD") - c.BindEnv("HoneycombLogger.LoggerAPIKey", "tracing-proxy_HONEYCOMB_API_KEY") - c.BindEnv("HoneycombMetrics.MetricsAPIKey", "tracing-proxy_HONEYCOMB_API_KEY") c.SetDefault("ListenAddr", "0.0.0.0:8080") c.SetDefault("PeerListenAddr", "0.0.0.0:8081") c.SetDefault("CompressPeerCommunication", true) @@ -116,7 +110,6 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerManagement.UseTLSInsecure", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) c.SetDefault("HoneycombAPI", "https://api.jirs5") - c.SetDefault("Logger", "logrus") c.SetDefault("LoggingLevel", "debug") c.SetDefault("Collector", "InMemCollector") c.SetDefault("SendDelay", 2*time.Second) @@ -126,8 +119,6 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("UpstreamBufferSize", libtrace.DefaultPendingWorkCapacity) c.SetDefault("PeerBufferSize", libtrace.DefaultPendingWorkCapacity) c.SetDefault("MaxAlloc", uint64(0)) - c.SetDefault("HoneycombLogger.LoggerSamplerEnabled", false) - c.SetDefault("HoneycombLogger.LoggerSamplerThroughput", 5) c.SetDefault("AddHostMetadataToTrace", false) c.SetDefault("SendMetricsToOpsRamp", false) @@ -240,20 +231,8 @@ func (f *fileConfig) unmarshal() error { } func (f *fileConfig) validateConditionalConfigs() error { - // validate logger config - loggerType, err := f.GetLoggerType() - if err != nil { - return err - } - if loggerType == "honeycomb" { - _, err = f.GetHoneycombLoggerConfig() - if err != nil { - return err - } - } - // validate metrics config - _, err = f.GetPrometheusMetricsConfig() + _, err := f.GetOpsRampMetricsConfig() if err != nil { return err } @@ -465,41 +444,6 @@ func (f *fileConfig) GetLoggingLevel() (string, error) { return f.conf.LoggingLevel, nil } -func (f *fileConfig) GetLoggerType() (string, error) { - f.mux.RLock() - defer f.mux.RUnlock() - - return f.conf.Logger, nil -} - -func (f *fileConfig) GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) { - f.mux.RLock() - defer f.mux.RUnlock() - - hlConfig := &HoneycombLoggerConfig{} - if sub := f.config.Sub("HoneycombLogger"); sub != nil { - err := sub.UnmarshalExact(hlConfig) - if err != nil { - return *hlConfig, err - } - - hlConfig.LoggerAPIKey = f.config.GetString("HoneycombLogger.LoggerAPIKey") - - // https://github.com/spf13/viper/issues/747 - hlConfig.LoggerSamplerEnabled = f.config.GetBool("HoneycombLogger.LoggerSamplerEnabled") - hlConfig.LoggerSamplerThroughput = f.config.GetInt("HoneycombLogger.LoggerSamplerThroughput") - - v := validator.New() - err = v.Struct(hlConfig) - if err != nil { - return *hlConfig, err - } - - return *hlConfig, nil - } - return *hlConfig, errors.New("No config found for HoneycombLogger") -} - func (f *fileConfig) GetCollectorType() (string, error) { f.mux.RLock() defer f.mux.RUnlock() @@ -575,40 +519,34 @@ func (f *fileConfig) GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCap return *capacity, errors.New("No config found for inMemCollector") } -func (f *fileConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) { +func (f *fileConfig) GetLogrusConfig() (*LogrusLoggerConfig, error) { f.mux.RLock() defer f.mux.RUnlock() - pcConfig := &PrometheusMetricsConfig{} - if sub := f.config.Sub("PrometheusMetrics"); sub != nil { - err := sub.UnmarshalExact(pcConfig) + logrusConfig := &LogrusLoggerConfig{} + + if sub := f.config.Sub("LogrusLogger"); sub != nil { + err := sub.UnmarshalExact(logrusConfig) if err != nil { - return *pcConfig, err + return logrusConfig, err } v := validator.New() - err = v.Struct(pcConfig) + err = v.Struct(logrusConfig) if err != nil { - return *pcConfig, err + return logrusConfig, err } - return *pcConfig, nil + return logrusConfig, nil } - return *pcConfig, errors.New("No config found for PrometheusMetrics") + return nil, errors.New("No config found for LogrusConfig") } func (f *fileConfig) GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) { f.mux.RLock() defer f.mux.RUnlock() - opsRampMetricsConfig := &OpsRampMetricsConfig{ - OpsRampMetricsAPI: "https://placeholder.api.com/", - OpsRampTenantID: "placeholder_tenantID", - OpsRampMetricsAPIKey: "placeholder_key", - OpsRampMetricsAPISecret: "placeholder_secret", - OpsRampMetricsReportingInterval: 60, - OpsRampMetricsRetryCount: 2, - } + opsRampMetricsConfig := &OpsRampMetricsConfig{} if sub := f.config.Sub("OpsRampMetrics"); sub != nil { err := sub.UnmarshalExact(opsRampMetricsConfig) @@ -624,6 +562,10 @@ func (f *fileConfig) GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) { opsRampMetricsConfig.OpsRampMetricsReportingInterval = 10 } + if len(opsRampMetricsConfig.OpsRampMetricsList) < 1 { + opsRampMetricsConfig.OpsRampMetricsList = []string{".*"} + } + v := validator.New() err = v.Struct(opsRampMetricsConfig) if err != nil { diff --git a/config/mock.go b/config/mock.go index 05e43facc5..55219e355e 100644 --- a/config/mock.go +++ b/config/mock.go @@ -27,45 +27,43 @@ type MockConfig struct { GetGRPCListenAddrVal string GetLoggerTypeErr error GetLoggerTypeVal string - GetHoneycombLoggerConfigErr error - GetHoneycombLoggerConfigVal HoneycombLoggerConfig GetLoggingLevelErr error GetLoggingLevelVal string GetOtherConfigErr error // GetOtherConfigVal must be a JSON representation of the config struct to be populated. - GetOtherConfigVal string - GetPeersErr error - GetPeersVal []string - GetRedisHostErr error - GetRedisHostVal string - GetRedisPasswordErr error - GetRedisPasswordVal string - GetUseTLSErr error - GetUseTLSVal bool - GetUseTLSInsecureErr error - GetUseTLSInsecureVal bool - GetSamplerTypeErr error - GetSamplerTypeVal interface{} - GetMetricsTypeErr error - GetMetricsTypeVal string - GetPrometheusMetricsConfigErr error - GetPrometheusMetricsConfigVal PrometheusMetricsConfig - GetSendDelayErr error - GetSendDelayVal time.Duration - GetTraceTimeoutErr error - GetTraceTimeoutVal time.Duration - GetMaxBatchSizeVal uint - GetUpstreamBufferSizeVal int - GetPeerBufferSizeVal int - SendTickerVal time.Duration - IdentifierInterfaceName string - UseIPV6Identifier bool - RedisIdentifier string - PeerManagementType string - DebugServiceAddr string - DryRun bool - DryRunFieldName string - AddHostMetadataToTrace bool + GetOtherConfigVal string + GetPeersErr error + GetPeersVal []string + GetRedisHostErr error + GetRedisHostVal string + GetRedisPasswordErr error + GetRedisPasswordVal string + GetUseTLSErr error + GetUseTLSVal bool + GetUseTLSInsecureErr error + GetUseTLSInsecureVal bool + GetSamplerTypeErr error + GetSamplerTypeVal interface{} + GetMetricsTypeErr error + GetMetricsTypeVal string + GetOpsRampMetricsConfigErr error + GetOpsRampMetricsConfigVal OpsRampMetricsConfig + GetSendDelayErr error + GetSendDelayVal time.Duration + GetTraceTimeoutErr error + GetTraceTimeoutVal time.Duration + GetMaxBatchSizeVal uint + GetUpstreamBufferSizeVal int + GetPeerBufferSizeVal int + SendTickerVal time.Duration + IdentifierInterfaceName string + UseIPV6Identifier bool + RedisIdentifier string + PeerManagementType string + DebugServiceAddr string + DryRun bool + DryRunFieldName string + AddHostMetadataToTrace bool Mux sync.RWMutex } @@ -137,12 +135,6 @@ func (m *MockConfig) GetLoggerType() (string, error) { return m.GetLoggerTypeVal, m.GetLoggerTypeErr } -func (m *MockConfig) GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) { - m.Mux.RLock() - defer m.Mux.RUnlock() - - return m.GetHoneycombLoggerConfigVal, m.GetHoneycombLoggerConfigErr -} func (m *MockConfig) GetLoggingLevel() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() @@ -195,11 +187,11 @@ func (m *MockConfig) GetMetricsType() (string, error) { return m.GetMetricsTypeVal, m.GetMetricsTypeErr } -func (m *MockConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) { +func (m *MockConfig) GetPrometheusMetricsConfig() (OpsRampMetricsConfig, error) { m.Mux.RLock() defer m.Mux.RUnlock() - return m.GetPrometheusMetricsConfigVal, m.GetPrometheusMetricsConfigErr + return m.GetOpsRampMetricsConfigVal, m.GetOpsRampMetricsConfigErr } func (m *MockConfig) GetSendDelay() (time.Duration, error) { m.Mux.RLock() diff --git a/config_complete.toml b/config_complete.toml index 6663a14f56..c8cad36fb3 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -119,11 +119,6 @@ SendMetricsToOpsRamp = false # implementations of the Collector interface. Collector = "InMemCollector" -# Logger describes which logger to use for Refinery logs. Valid options are -# "logrus" and "honeycomb". The logrus option will write logs to STDOUT and the -# honeycomb option will send them to a Honeycomb dataset. -Logger = "logrus" - ######################### ## Peer Management ## ######################### @@ -224,72 +219,59 @@ MaxAlloc = 0 # commented out. [LogrusLogger] -# logrus logger currently has no options! - -###################### -## Honeycomb Logger ## -###################### +# LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] +LogFormatter = "logfmt" -# HoneycombLogger is a section of the config only used if you are using the -# HoneycombLogger to send all logs to a Honeycomb Dataset. If you are using a -# different logger (eg file-based logger) you can leave all this commented out. +# LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] +LogOutput = "file" -[HoneycombLogger] +## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" +[LogrusLogger.File] -# LoggerHoneycombAPI is the URL for the upstream Honeycomb API. -# Eligible for live reload. -LoggerHoneycombAPI = "https://api.honeycomb.io" +# FileName specifies the location where the logs are supposed be stored +FileName = "/var/log/opsramp/tracing-proxy.log" -# LoggerAPIKey is the API key to use to send log events to the Honeycomb logging -# dataset. This is separate from the APIKeys used to authenticate regular -# traffic. -# Eligible for live reload. -LoggerAPIKey = "abcd1234" +# MaxSize is the maximum size in megabytes of the log file before it gets rotated. +MaxSize = 1 -# LoggerDataset is the name of the dataset to which to send Refinery logs -# Eligible for live reload. -LoggerDataset = "Refinery Logs" +# MaxBackups is the maximum number of old log files to retain. +MaxBackups = 3 -# LoggerSamplerEnabled enables a PerKeyThroughput dynamic sampler for log messages. -# This will sample log messages based on [log level:message] key on a per second throughput basis. -# Not eligible for live reload. -LoggerSamplerEnabled = true +# Compress determines if the rotated log files should be compressed +# using gzip. +Compress = true -# LoggerSamplerThroughput is the per key per second throughput for the log message dynamic sampler. -# Not eligible for live reload. -LoggerSamplerThroughput = 10 #####################@## ## Prometheus Metrics ## #####################@## -[PrometheusMetrics] +[OpsRampMetrics] # MetricsListenAddr determines the interface and port on which Prometheus will # listen for requests for /metrics. Must be different from the main Refinery # listener. # Not eligible for live reload. MetricsListenAddr = "localhost:2112" -[OpsRampMetrics] # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. # Not Eligible for live reload. -OpsRampMetricsAPI = "" +OpsRampMetricsAPI = "https://placeholder.api.com/" # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. # Not Eligible for live reload. -OpsRampTenantID = "" +OpsRampTenantID = "placeholder_tenantID" # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. # This is separate from the APIKeys used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPIKey = "" +OpsRampMetricsAPIKey = "placeholder_key" # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. # This is separate from the APISecret used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPISecret = "" +OpsRampMetricsAPISecret = "placeholder_secret" # OpsRampMetricsReportingInterval is frequency specified in seconds at which # the metrics are collected and sent to OpsRamp diff --git a/go.mod b/go.mod index e11455ad66..1c25e5a544 100644 --- a/go.mod +++ b/go.mod @@ -39,6 +39,7 @@ require ( google.golang.org/grpc v1.44.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect ) //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 diff --git a/go.sum b/go.sum index d716e84ab7..46442f85b9 100644 --- a/go.sum +++ b/go.sum @@ -857,6 +857,8 @@ gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXa gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/logger/honeycomb.go b/logger/honeycomb.go deleted file mode 100644 index 16e8af75a5..0000000000 --- a/logger/honeycomb.go +++ /dev/null @@ -1,256 +0,0 @@ -package logger - -import ( - "errors" - "fmt" - "net/http" - "os" - "strings" - "time" - - "github.com/honeycombio/dynsampler-go" - libtrace "github.com/honeycombio/libhoney-go" - "github.com/honeycombio/libhoney-go/transmission" - - "github.com/jirs5/tracing-proxy/config" -) - -// HoneycombLogger is a Logger implementation that sends all logs to a Honeycomb -// dataset. It requires a HoneycombLogger section of the config to exist with -// three keys, LoggerHoneycombAPI, LoggerAPIKey, and LoggerDataset. -type HoneycombLogger struct { - Config config.Config `inject:""` - UpstreamTransport *http.Transport `inject:"upstreamTransport"` - Version string `inject:"version"` - loggerConfig config.HoneycombLoggerConfig - libhClient *libtrace.Client - builder *libtrace.Builder - sampler dynsampler.Sampler -} - -type HoneycombEntry struct { - loggerConfig config.HoneycombLoggerConfig - builder *libtrace.Builder - sampler dynsampler.Sampler -} - -const ( - UnknownLevel config.HoneycombLevel = iota - DebugLevel - InfoLevel - WarnLevel - ErrorLevel - PanicLevel -) - -func (h *HoneycombLogger) Start() error { - // logLevel is defined outside the HoneycombLogger section - // and is set independently, before Start() is called, so we need to - // preserve it. - // TODO: make LogLevel part of the HoneycombLogger/LogrusLogger sections? - logLevel := h.loggerConfig.Level - loggerConfig, err := h.Config.GetHoneycombLoggerConfig() - if err != nil { - return err - } - loggerConfig.Level = logLevel - h.loggerConfig = loggerConfig - var loggerTx transmission.Sender - if h.loggerConfig.LoggerAPIKey == "" { - loggerTx = &transmission.DiscardSender{} - } else { - loggerTx = &transmission.Honeycomb{ - // logs are often sent in flurries; flush every half second - MaxBatchSize: 100, - BatchTimeout: 500 * time.Millisecond, - UserAgentAddition: "tracing-proxy/" + h.Version + " (metrics)", - Transport: h.UpstreamTransport, - PendingWorkCapacity: libtrace.DefaultPendingWorkCapacity, - } - } - - if loggerConfig.LoggerSamplerEnabled { - h.sampler = &dynsampler.PerKeyThroughput{ - ClearFrequencySec: 10, - PerKeyThroughputPerSec: loggerConfig.LoggerSamplerThroughput, - MaxKeys: 1000, - } - err := h.sampler.Start() - if err != nil { - return err - } - } - - libhClientConfig := libtrace.ClientConfig{ - APIHost: h.loggerConfig.LoggerHoneycombAPI, - APIKey: h.loggerConfig.LoggerAPIKey, - Dataset: h.loggerConfig.LoggerDataset, - Transmission: loggerTx, - } - libhClient, err := libtrace.NewClient(libhClientConfig) - if err != nil { - return err - } - h.libhClient = libhClient - - if hostname, err := os.Hostname(); err == nil { - h.libhClient.AddField("hostname", hostname) - } - startTime := time.Now() - h.libhClient.AddDynamicField("process_uptime_seconds", func() interface{} { - return time.Now().Sub(startTime) / time.Second - }) - - h.builder = h.libhClient.NewBuilder() - - // listen for responses from honeycomb, log to STDOUT if something unusual - // comes back - go h.readResponses() - - // listen for config reloads - h.Config.RegisterReloadCallback(h.reloadBuilder) - - fmt.Printf("Starting Honeycomb Logger - see Honeycomb %s dataset for service logs\n", h.loggerConfig.LoggerDataset) - - return nil -} - -func (h *HoneycombLogger) readResponses() { - resps := h.libhClient.TxResponses() - for resp := range resps { - respString := fmt.Sprintf("Response: status: %d, duration: %s", resp.StatusCode, resp.Duration) - // read response, log if there's an error - switch { - case resp.StatusCode == 0: // log message dropped due to sampling - continue - case resp.Err != nil: - fmt.Fprintf(os.Stderr, "Honeycomb Logger got an error back from Honeycomb while trying to send a log line: %s, error: %s, body: %s\n", respString, resp.Err.Error(), string(resp.Body)) - case resp.StatusCode > 202: - fmt.Fprintf(os.Stderr, "Honeycomb Logger got an unexpected status code back from Honeycomb while trying to send a log line: %s, %s\n", respString, string(resp.Body)) - } - } -} - -func (h *HoneycombLogger) reloadBuilder() { - h.Debug().Logf("reloading config for Honeycomb logger") - // preseve log level - logLevel := h.loggerConfig.Level - loggerConfig, err := h.Config.GetHoneycombLoggerConfig() - if err != nil { - // complain about this both to STDOUT and to the previously configured - // honeycomb logger - fmt.Printf("failed to reload configs for Honeycomb logger: %+v\n", err) - h.Error().Logf("failed to reload configs for Honeycomb logger: %+v", err) - return - } - loggerConfig.Level = logLevel - h.loggerConfig = loggerConfig - h.builder.APIHost = h.loggerConfig.LoggerHoneycombAPI - h.builder.WriteKey = h.loggerConfig.LoggerAPIKey - h.builder.Dataset = h.loggerConfig.LoggerDataset -} - -func (h *HoneycombLogger) Stop() error { - fmt.Printf("stopping honey logger\n") - libtrace.Flush() - return nil -} - -func (h *HoneycombLogger) Debug() Entry { - if h.loggerConfig.Level > DebugLevel { - return nullEntry - } - - ev := &HoneycombEntry{ - loggerConfig: h.loggerConfig, - builder: h.builder.Clone(), - sampler: h.sampler, - } - ev.builder.AddField("level", "debug") - - return ev -} - -func (h *HoneycombLogger) Info() Entry { - if h.loggerConfig.Level > InfoLevel { - return nullEntry - } - - ev := &HoneycombEntry{ - loggerConfig: h.loggerConfig, - builder: h.builder.Clone(), - sampler: h.sampler, - } - ev.builder.AddField("level", "info") - - return ev -} - -func (h *HoneycombLogger) Error() Entry { - if h.loggerConfig.Level > ErrorLevel { - return nullEntry - } - - ev := &HoneycombEntry{ - loggerConfig: h.loggerConfig, - builder: h.builder.Clone(), - sampler: h.sampler, - } - ev.builder.AddField("level", "error") - - return ev -} - -func (h *HoneycombLogger) SetLevel(level string) error { - sanitizedLevel := strings.TrimSpace(strings.ToLower(level)) - var lvl config.HoneycombLevel - switch sanitizedLevel { - case "debug": - lvl = DebugLevel - case "info": - lvl = InfoLevel - case "warn", "warning": - lvl = WarnLevel - case "error": - lvl = ErrorLevel - case "panic": - lvl = PanicLevel - default: - return errors.New(fmt.Sprintf("unrecognized logging level: %s", level)) - } - h.loggerConfig.Level = lvl - return nil -} - -func (h *HoneycombEntry) WithField(key string, value interface{}) Entry { - h.builder.AddField(key, value) - return h -} - -func (h *HoneycombEntry) WithString(key string, value string) Entry { - return h.WithField(key, value) -} - -func (h *HoneycombEntry) WithFields(fields map[string]interface{}) Entry { - h.builder.Add(fields) - return h -} - -func (h *HoneycombEntry) Logf(f string, args ...interface{}) { - ev := h.builder.NewEvent() - msg := fmt.Sprintf(f, args...) - ev.AddField("msg", msg) - ev.Metadata = map[string]string{ - "api_host": ev.APIHost, - "dataset": ev.Dataset, - } - level, ok := ev.Fields()["level"].(string) - if !ok { - level = "unknown" - } - if h.sampler != nil { - rate := h.sampler.GetSampleRate(fmt.Sprintf(`%s:%s`, level, msg)) - ev.SampleRate = uint(rate) - } - ev.Send() -} diff --git a/logger/logger.go b/logger/logger.go index 2298e66a9a..b70eb9ddcb 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -1,18 +1,18 @@ package logger -import ( - "fmt" - "os" - - "github.com/jirs5/tracing-proxy/config" -) +import "github.com/sirupsen/logrus" type Logger interface { Debug() Entry Info() Entry Error() Entry + Fatal() Entry + Panic() Entry + Warn() Entry // SetLevel sets the logging level (debug, info, warn, error) SetLevel(level string) error + + Init() *logrus.Logger } type Entry interface { @@ -26,21 +26,6 @@ type Entry interface { Logf(f string, args ...interface{}) } -func GetLoggerImplementation(c config.Config) Logger { - var logger Logger - loggerType, err := c.GetLoggerType() - if err != nil { - fmt.Printf("unable to get logger type from config: %v\n", err) - os.Exit(1) - } - switch loggerType { - case "honeycomb": - logger = &HoneycombLogger{} - case "logrus": - logger = &LogrusLogger{} - default: - fmt.Printf("unknown logger type %s. Exiting.\n", loggerType) - os.Exit(1) - } - return logger +func GetLoggerImplementation() Logger { + return &LogrusLogger{} } diff --git a/logger/logger_test.go b/logger/logger_test.go deleted file mode 100644 index de03ab625b..0000000000 --- a/logger/logger_test.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build all || race -// +build all race - -package logger - -import ( - "testing" - - "github.com/jirs5/tracing-proxy/config" - - "github.com/stretchr/testify/assert" -) - -func TestHoneycombLoggerRespectsLogLevelAfterStart(t *testing.T) { - cfg := &config.MockConfig{GetHoneycombLoggerConfigVal: config.HoneycombLoggerConfig{}} - hcLogger := &HoneycombLogger{ - Config: cfg, - loggerConfig: config.HoneycombLoggerConfig{Level: WarnLevel}, - } - - assert.Equal(t, WarnLevel, hcLogger.loggerConfig.Level) - err := hcLogger.Start() - assert.Nil(t, err) - assert.Equal(t, WarnLevel, hcLogger.loggerConfig.Level) -} diff --git a/logger/logrus.go b/logger/logrus.go index f0fdc5a74c..4af488e808 100644 --- a/logger/logrus.go +++ b/logger/logrus.go @@ -1,9 +1,10 @@ package logger import ( - "github.com/sirupsen/logrus" - "github.com/jirs5/tracing-proxy/config" + "github.com/sirupsen/logrus" + lumberjack "gopkg.in/natefinch/lumberjack.v2" + "os" ) // LogrusLogger is a Logger implementation that sends all logs to stdout using @@ -21,11 +22,104 @@ type LogrusEntry struct { } func (l *LogrusLogger) Start() error { - l.logger = logrus.New() l.logger.SetLevel(l.level) + l.logger.SetReportCaller(true) + + logrusConfig, err := l.Config.GetLogrusConfig() + if err != nil { + return err + } + + switch logrusConfig.LogOutput { + case "stdout": + l.logger.SetOutput(os.Stdout) + case "stderr": + l.logger.SetOutput(os.Stderr) + case "file": + l.logger.SetOutput(&lumberjack.Logger{ + Filename: logrusConfig.File.FileName, + MaxSize: logrusConfig.File.MaxSize, + MaxBackups: logrusConfig.File.MaxBackups, + Compress: logrusConfig.File.Compress, + }) + } + + l.logger.SetFormatter(&logrus.TextFormatter{ + DisableColors: true, + ForceQuote: true, + FullTimestamp: true, + DisableLevelTruncation: true, + QuoteEmptyFields: true, + FieldMap: logrus.FieldMap{ + logrus.FieldKeyFile: "file", + logrus.FieldKeyTime: "timestamp", + logrus.FieldKeyLevel: "level", + logrus.FieldKeyMsg: "message", + logrus.FieldKeyFunc: "caller", + }, + }) + + //l.logger.SetFormatter(&logrus.JSONFormatter{ + // FieldMap: logrus.FieldMap{ + // logrus.FieldKeyFile: "file", + // logrus.FieldKeyTime: "timestamp", + // logrus.FieldKeyLevel: "level", + // logrus.FieldKeyMsg: "message", + // logrus.FieldKeyFunc: "caller", + // }, + //}) return nil } +func (l *LogrusLogger) Init() *logrus.Logger { + l.logger = logrus.New() + return l.logger +} + +func (l *LogrusLogger) Panic() Entry { + if !l.logger.IsLevelEnabled(logrus.PanicLevel) { + return nullEntry + } + + return &LogrusEntry{ + entry: logrus.NewEntry(l.logger), + level: logrus.PanicLevel, + } +} + +func (l *LogrusLogger) Fatal() Entry { + if !l.logger.IsLevelEnabled(logrus.FatalLevel) { + return nullEntry + } + + return &LogrusEntry{ + entry: logrus.NewEntry(l.logger), + level: logrus.FatalLevel, + } +} + +func (l *LogrusLogger) Warn() Entry { + if !l.logger.IsLevelEnabled(logrus.WarnLevel) { + return nullEntry + } + + return &LogrusEntry{ + entry: logrus.NewEntry(l.logger), + level: logrus.WarnLevel, + } +} + +func (l *LogrusLogger) Trace() Entry { + if !l.logger.IsLevelEnabled(logrus.TraceLevel) { + return nullEntry + } + + return &LogrusEntry{ + entry: logrus.NewEntry(l.logger), + level: logrus.TraceLevel, + } +} + func (l *LogrusLogger) Debug() Entry { if !l.logger.IsLevelEnabled(logrus.DebugLevel) { return nullEntry @@ -95,6 +189,14 @@ func (l *LogrusEntry) WithFields(fields map[string]interface{}) Entry { func (l *LogrusEntry) Logf(f string, args ...interface{}) { switch l.level { + case logrus.WarnLevel: + l.entry.Warnf(f, args...) + case logrus.FatalLevel: + l.entry.Fatalf(f, args...) + case logrus.PanicLevel: + l.entry.Panicf(f, args...) + case logrus.TraceLevel: + l.entry.Tracef(f, args...) case logrus.DebugLevel: l.entry.Debugf(f, args...) case logrus.InfoLevel: diff --git a/logger/mock.go b/logger/mock.go deleted file mode 100644 index dc6270ad58..0000000000 --- a/logger/mock.go +++ /dev/null @@ -1,78 +0,0 @@ -package logger - -import ( - "fmt" - - "github.com/jirs5/tracing-proxy/config" -) - -type MockLogger struct { - Events []*MockLoggerEvent -} - -type MockLoggerEvent struct { - l *MockLogger - level config.HoneycombLevel - Fields map[string]interface{} -} - -func (l *MockLogger) Debug() Entry { - return &MockLoggerEvent{ - l: l, - level: DebugLevel, - Fields: make(map[string]interface{}), - } -} - -func (l *MockLogger) Info() Entry { - return &MockLoggerEvent{ - l: l, - level: InfoLevel, - Fields: make(map[string]interface{}), - } -} - -func (l *MockLogger) Error() Entry { - return &MockLoggerEvent{ - l: l, - level: ErrorLevel, - Fields: make(map[string]interface{}), - } -} - -func (l *MockLogger) SetLevel(level string) error { - return nil -} - -func (e *MockLoggerEvent) WithField(key string, value interface{}) Entry { - e.Fields[key] = value - - return e -} - -func (e *MockLoggerEvent) WithString(key string, value string) Entry { - return e.WithField(key, value) -} - -func (e *MockLoggerEvent) WithFields(fields map[string]interface{}) Entry { - for k, v := range fields { - e.Fields[k] = v - } - - return e -} - -func (e *MockLoggerEvent) Logf(f string, args ...interface{}) { - msg := fmt.Sprintf(f, args...) - switch e.level { - case DebugLevel: - e.WithField("debug", msg) - case InfoLevel: - e.WithField("info", msg) - case ErrorLevel: - e.WithField("error", msg) - default: - panic("unexpected log level") - } - e.l.Events = append(e.l.Events, e) -} diff --git a/metrics/opsramp.go b/metrics/opsramp.go index 533c76da1c..eba889b34b 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -49,12 +49,12 @@ func (p *OpsRampMetrics) Start() error { p.Logger.Debug().Logf("Starting OpsRampMetrics") defer func() { p.Logger.Debug().Logf("Finished starting OpsRampMetrics") }() - if p.prefix == "" && p.Config.GetSendMetricsToOpsRamp() { - metricsConfig, err := p.Config.GetOpsRampMetricsConfig() - if err != nil { - p.Logger.Error().Logf("Failed to Load OpsRampMetrics Config:", err) - } + metricsConfig, err := p.Config.GetOpsRampMetricsConfig() + if err != nil { + return err + } + if p.prefix == "" && p.Config.GetSendMetricsToOpsRamp() { go func() { metricsTicker := time.NewTicker(time.Duration(metricsConfig.OpsRampMetricsReportingInterval) * time.Second) defer metricsTicker.Stop() @@ -76,17 +76,12 @@ func (p *OpsRampMetrics) Start() error { } - pc, err := p.Config.GetPrometheusMetricsConfig() - if err != nil { - return err - } - p.metrics = make(map[string]interface{}) muxxer := mux.NewRouter() muxxer.Handle("/metrics", promhttp.Handler()) - go http.ListenAndServe(pc.MetricsListenAddr, muxxer) + go http.ListenAndServe(metricsConfig.MetricsListenAddr, muxxer) return nil } From 59ce65267ca15aa9bbce178df5a52448c26a357f Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Thu, 7 Apr 2022 14:40:23 +0530 Subject: [PATCH 156/351] allow config of log format from config --- logger/logrus.go | 51 +++++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/logger/logrus.go b/logger/logrus.go index 4af488e808..688583fc69 100644 --- a/logger/logrus.go +++ b/logger/logrus.go @@ -44,30 +44,33 @@ func (l *LogrusLogger) Start() error { }) } - l.logger.SetFormatter(&logrus.TextFormatter{ - DisableColors: true, - ForceQuote: true, - FullTimestamp: true, - DisableLevelTruncation: true, - QuoteEmptyFields: true, - FieldMap: logrus.FieldMap{ - logrus.FieldKeyFile: "file", - logrus.FieldKeyTime: "timestamp", - logrus.FieldKeyLevel: "level", - logrus.FieldKeyMsg: "message", - logrus.FieldKeyFunc: "caller", - }, - }) - - //l.logger.SetFormatter(&logrus.JSONFormatter{ - // FieldMap: logrus.FieldMap{ - // logrus.FieldKeyFile: "file", - // logrus.FieldKeyTime: "timestamp", - // logrus.FieldKeyLevel: "level", - // logrus.FieldKeyMsg: "message", - // logrus.FieldKeyFunc: "caller", - // }, - //}) + switch logrusConfig.LogFormatter { + case "logfmt": + l.logger.SetFormatter(&logrus.TextFormatter{ + DisableColors: true, + ForceQuote: true, + FullTimestamp: true, + DisableLevelTruncation: true, + QuoteEmptyFields: true, + FieldMap: logrus.FieldMap{ + logrus.FieldKeyFile: "file", + logrus.FieldKeyTime: "timestamp", + logrus.FieldKeyLevel: "level", + logrus.FieldKeyMsg: "message", + logrus.FieldKeyFunc: "caller", + }, + }) + case "json": + l.logger.SetFormatter(&logrus.JSONFormatter{ + FieldMap: logrus.FieldMap{ + logrus.FieldKeyFile: "file", + logrus.FieldKeyTime: "timestamp", + logrus.FieldKeyLevel: "level", + logrus.FieldKeyMsg: "message", + logrus.FieldKeyFunc: "caller", + }, + }) + } return nil } From b39c65ed6e717d7a74a7de264c93eb369289c9c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Apr 2022 14:49:50 +0100 Subject: [PATCH 157/351] Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1 (#390) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.11.0 to 1.12.1. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.11.0...v1.12.1) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 6296caea96..9721d1d00d 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/klauspost/compress v1.13.6 github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_golang v1.12.1 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.10.1 diff --git a/go.sum b/go.sum index 051ee8381d..5da2c25c9e 100644 --- a/go.sum +++ b/go.sum @@ -346,8 +346,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -356,14 +357,16 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -519,6 +522,7 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8= golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -618,8 +622,9 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486 h1:5hpz5aRr+W1erYCL5JRhSUBJRph7l9XkNveoExlrKYk= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From db810915501979c32011e139fb3568384fad1ba1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Apr 2022 14:50:07 +0100 Subject: [PATCH 158/351] Bump github.com/stretchr/testify from 1.7.0 to 1.7.1 (#426) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.0 to 1.7.1. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.7.0...v1.7.1) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 9721d1d00d..ba5182985d 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.10.1 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.7.1 github.com/tidwall/gjson v1.14.0 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.9.0 diff --git a/go.sum b/go.sum index 5da2c25c9e..8c38936216 100644 --- a/go.sum +++ b/go.sum @@ -398,8 +398,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/gjson v1.14.0 h1:6aeJ0bzojgWLa82gDQHcx3S0Lr/O51I9bJ5nv6JFx5w= From 7863b4cd17ae38a23e83118e5291887e3059765c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Apr 2022 14:50:23 +0100 Subject: [PATCH 159/351] Bump github.com/klauspost/compress from 1.13.6 to 1.15.1 (#427) Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.13.6 to 1.15.1. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.13.6...v1.15.1) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index ba5182985d..84a5ffbc1b 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.13.6 + github.com/klauspost/compress v1.15.1 github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.1 diff --git a/go.sum b/go.sum index 8c38936216..ee2401ee2e 100644 --- a/go.sum +++ b/go.sum @@ -281,8 +281,9 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= From c473263f2c3b11f28c732a4bd6d516c9595d9915 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Apr 2022 14:51:20 +0100 Subject: [PATCH 160/351] Bump google.golang.org/grpc from 1.43.0 to 1.45.0 (#428) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.43.0 to 1.45.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.43.0...v1.45.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 84a5ffbc1b..e2ddec054d 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.9.0 golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect - google.golang.org/grpc v1.43.0 + google.golang.org/grpc v1.45.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect ) diff --git a/go.sum b/go.sum index ee2401ee2e..94e8b88737 100644 --- a/go.sum +++ b/go.sum @@ -828,8 +828,9 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From 08daaf24281bfdcc61f1e1832219a5ab21f3e8b8 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 8 Apr 2022 17:04:53 +0100 Subject: [PATCH 161/351] prepare v1.13.0 release (#433) --- CHANGELOG.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ba1731067..79a2e1d12a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Refinery Changelog +## 1.13.0 2022-04-08 + +### Enhancements + +- Add parsing for nested json fields in the rules sampler (#418) | [@ecobrien29](https://github.com/ecobrien29) + +### Maintenance + +- Update husky to v0.10.3 (#431) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) +- Bump google.golang.org/grpc from 1.43.0 to 1.45.0 (#428) +- Bump github.com/klauspost/compress from 1.13.6 to 1.15.1 (#427) +- Bump github.com/stretchr/testify from 1.7.0 to 1.7.1 (#426) +- Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1 (#390) + ## 1.12.1 2022-03-28 ### Fixes From 1ed7251824af4875a6b674ff99d9e020fd237563 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Mon, 11 Apr 2022 14:46:41 +0530 Subject: [PATCH 162/351] tls options as config params --- cmd/tracing-proxy/main.go | 4 +++- config/config.go | 6 ++++++ config/file_config.go | 16 ++++++++++++++++ config_complete.toml | 6 ++++++ go.mod | 4 ++-- go.sum | 8 ++++---- 6 files changed, 37 insertions(+), 7 deletions(-) diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 60616a34cb..67a4ae309d 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -142,6 +142,8 @@ func main() { BlockOnSend: true, EnableMsgpackEncoding: false, Metrics: upstreamMetricsConfig, + UseTls: c.GetGlobalUseTLS(), + UseTlsInsecure: c.GetGlobalUseTLSInsecureSkip(), }, }) if err != nil { @@ -160,7 +162,7 @@ func main() { UserAgentAddition: userAgentAddition, Transport: peerTransport, DisableCompression: !c.GetCompressPeerCommunication(), - EnableMsgpackEncoding: true, + EnableMsgpackEncoding: false, Metrics: peerMetricsConfig, }, }) diff --git a/config/config.go b/config/config.go index 39f5761319..8cf8ebede1 100644 --- a/config/config.go +++ b/config/config.go @@ -124,4 +124,10 @@ type Config interface { GetAddHostMetadataToTrace() bool GetSendMetricsToOpsRamp() bool + + // GetUseTLS returns true when TLS must be enabled to dial + GetGlobalUseTLS() bool + + // GetUseTLSInsecureSkip returns false when certificate checks are disabled + GetGlobalUseTLSInsecureSkip() bool } diff --git a/config/file_config.go b/config/file_config.go index 6291fc76d2..d4cf933be6 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -47,6 +47,8 @@ type configContents struct { InMemCollector InMemoryCollectorCacheCapacity `validate:"required"` AddHostMetadataToTrace bool SendMetricsToOpsRamp bool + UseTls bool + UseTlsInSecure bool } type InMemoryCollectorCacheCapacity struct { @@ -672,3 +674,17 @@ func (f *fileConfig) GetSendMetricsToOpsRamp() bool { return f.conf.SendMetricsToOpsRamp } + +func (f *fileConfig) GetGlobalUseTLS() bool { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.UseTls +} + +func (f *fileConfig) GetGlobalUseTLSInsecureSkip() bool { + f.mux.RLock() + defer f.mux.RUnlock() + + return !f.conf.UseTlsInSecure +} diff --git a/config_complete.toml b/config_complete.toml index c8cad36fb3..3220e9c5f5 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -50,6 +50,12 @@ APIKeys = [ #HoneycombAPI = "localhost:50052" HoneycombAPI = "https://asura.opsramp.net" + +#Tls Options +UseTls = true +UseTlsInsecure = false + + # SendDelay is a short timer that will be triggered when a trace is complete. # Refinery will wait this duration before actually sending the trace. The # reason for this short delay is to allow for small network delays or clock diff --git a/go.mod b/go.mod index 1c25e5a544..ab18f78feb 100644 --- a/go.mod +++ b/go.mod @@ -39,12 +39,12 @@ require ( google.golang.org/grpc v1.44.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98 +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220411084221-92bd94607988 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802 diff --git a/go.sum b/go.sum index 46442f85b9..4f1db860ee 100644 --- a/go.sum +++ b/go.sum @@ -45,6 +45,7 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -271,12 +272,10 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jirs5/husky v0.9.1-0.20220325122121-523bfbec37a7 h1:etPTWTKk/1S2dWAdOoW+c1smMqkrJwQZjkUoppbL/zc= -github.com/jirs5/husky v0.9.1-0.20220325122121-523bfbec37a7/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802 h1:EOjRO/vmYd/KOqKNE9hURrNISv+4tspT+HawDAqgNqA= github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98 h1:SllAt3oySFffDLd9/T4uwE9x7JnGu6PD0T+H7gvWMLU= -github.com/jirs5/libtrace-go v0.0.0-20220323133508-a1eeabcc5f98/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= +github.com/jirs5/libtrace-go v0.0.0-20220411084221-92bd94607988 h1:xD3eYBWEyzW9RFDTOie2b184aiCFzBqHLwpP4ihV/7g= +github.com/jirs5/libtrace-go v0.0.0-20220411084221-92bd94607988/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -448,6 +447,7 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= From cd11082b93ba3ed9cef52fed9ba0f48e27edf348 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Mon, 11 Apr 2022 15:03:51 +0530 Subject: [PATCH 163/351] tls options as config params --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ab18f78feb..38b59a600f 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220411084221-92bd94607988 +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220411092904-37782dea6332 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802 diff --git a/go.sum b/go.sum index 4f1db860ee..f3e80fdba2 100644 --- a/go.sum +++ b/go.sum @@ -274,8 +274,8 @@ github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LF github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802 h1:EOjRO/vmYd/KOqKNE9hURrNISv+4tspT+HawDAqgNqA= github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v0.0.0-20220411084221-92bd94607988 h1:xD3eYBWEyzW9RFDTOie2b184aiCFzBqHLwpP4ihV/7g= -github.com/jirs5/libtrace-go v0.0.0-20220411084221-92bd94607988/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= +github.com/jirs5/libtrace-go v0.0.0-20220411092904-37782dea6332 h1:Jr3X5SPdxSAu5ChJ9HmK4dRRpdeWmpyebI0dKzaF9tQ= +github.com/jirs5/libtrace-go v0.0.0-20220411092904-37782dea6332/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= From b2553dfa02698d9cde6ce38be41909297bc27578 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Tue, 12 Apr 2022 18:51:30 +0530 Subject: [PATCH 164/351] grpc listen handler for peers added --- config/config.go | 4 ++++ config/file_config.go | 15 +++++++++++++++ config_complete.toml | 3 +-- go.mod | 4 ++-- go.sum | 8 ++++---- route/otlp_trace.go | 7 +++++++ route/route.go | 41 ++++++++++++++++++++++++++++++++++++++++- 7 files changed, 73 insertions(+), 9 deletions(-) diff --git a/config/config.go b/config/config.go index 8cf8ebede1..05449935e3 100644 --- a/config/config.go +++ b/config/config.go @@ -25,6 +25,10 @@ type Config interface { // peer traffic GetPeerListenAddr() (string, error) + // GetPeerListenAddr returns the address and port on which to listen for + // GRPC peer traffic + GetGRPCPeerListenAddr() (string, error) + // GetCompressPeerCommunication will be true if tracing-proxy should compress // data before forwarding it to a peer. GetCompressPeerCommunication() bool diff --git a/config/file_config.go b/config/file_config.go index d4cf933be6..47cce52111 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -29,6 +29,7 @@ type configContents struct { PeerListenAddr string `validate:"required"` CompressPeerCommunication bool GRPCListenAddr string + GRPCPeerListenAddr string APIKeys []string `validate:"required"` HoneycombAPI string `validate:"required,url"` LoggingLevel string `validate:"required"` @@ -362,6 +363,20 @@ func (f *fileConfig) GetGRPCListenAddr() (string, error) { return f.conf.GRPCListenAddr, nil } +func (f *fileConfig) GetGRPCPeerListenAddr() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + // GRPC listen addr is optional, only check value is valid if not empty + if f.conf.GRPCPeerListenAddr != "" { + _, _, err := net.SplitHostPort(f.conf.GRPCPeerListenAddr) + if err != nil { + return "", err + } + } + return f.conf.GRPCPeerListenAddr, nil +} + func (f *fileConfig) GetAPIKeys() ([]string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index 3220e9c5f5..b9a5373c3a 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -23,6 +23,7 @@ GRPCListenAddr = "0.0.0.0:9090" # Should be of the form 0.0.0.0:8081 # Not eligible for live reload. PeerListenAddr = "0.0.0.0:8083" +GRPCPeerListenAddr = "0.0.0.0:8084" # CompressPeerCommunication determines whether refinery will compress span data # it forwards to peers. If it costs money to transmit data between refinery @@ -50,12 +51,10 @@ APIKeys = [ #HoneycombAPI = "localhost:50052" HoneycombAPI = "https://asura.opsramp.net" - #Tls Options UseTls = true UseTlsInsecure = false - # SendDelay is a short timer that will be triggered when a trace is complete. # Refinery will wait this duration before actually sending the trace. The # reason for this short delay is to allow for small network delays or clock diff --git a/go.mod b/go.mod index 38b59a600f..3b326e4837 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220411092904-37782dea6332 +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220412054723-e3a318f30726 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 -replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802 +replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4 diff --git a/go.sum b/go.sum index f3e80fdba2..d20c2cd8d9 100644 --- a/go.sum +++ b/go.sum @@ -272,10 +272,10 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802 h1:EOjRO/vmYd/KOqKNE9hURrNISv+4tspT+HawDAqgNqA= -github.com/jirs5/husky v0.9.1-0.20220406121028-046683d10802/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v0.0.0-20220411092904-37782dea6332 h1:Jr3X5SPdxSAu5ChJ9HmK4dRRpdeWmpyebI0dKzaF9tQ= -github.com/jirs5/libtrace-go v0.0.0-20220411092904-37782dea6332/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= +github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4 h1:YmBfU5uzwUZdYTvI60Vn+5JNaIaw6611td3Ltu8A0sQ= +github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= +github.com/jirs5/libtrace-go v0.0.0-20220412054723-e3a318f30726 h1:q2D7u9fmAb8tyGTYg1/6jSTfmcme8vajv9z2eSG50+0= +github.com/jirs5/libtrace-go v0.0.0-20220412054723-e3a318f30726/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 19ec432859..e5d896de5a 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -3,6 +3,7 @@ package route import ( "context" "fmt" + proxypb "github.com/honeycombio/libhoney-go/proto/proxypb" "net/http" huskyotlp "github.com/honeycombio/husky/otlp" @@ -95,3 +96,9 @@ func processTraceRequest( return nil } + +func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTraceProxyServiceRequest) (*proxypb.ExportTraceProxyServiceResponse, error) { + + fmt.Println("Received Trace from peer: %v \n", in.Items) + return &proxypb.ExportTraceProxyServiceResponse{Message: "Received Successfully by peer", Status: "Success"}, nil +} diff --git a/route/route.go b/route/route.go index 82f6343824..0122b045a9 100644 --- a/route/route.go +++ b/route/route.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "fmt" + proxypb "github.com/honeycombio/libhoney-go/proto/proxypb" "io" "io/ioutil" "math" @@ -80,6 +81,11 @@ type Router struct { // used to identify Router as a OTLP TraceServer collectortrace.UnimplementedTraceServiceServer + proxypb.UnimplementedTraceProxyServiceServer +} + +func (r *Router) mustEmbedUnimplementedTraceProxyServiceServer() { + panic("implement me") } type BatchResponse struct { @@ -108,6 +114,10 @@ func (r *Router) SetVersion(ver string) { r.versionStr = ver } +type server struct { + proxypb.UnimplementedTraceProxyServiceServer +} + // LnS spins up the Listen and Serve portion of the router. A router is // initialized as being for either incoming traffic from clients or traffic from // a peer. They listen on different addresses so peer traffic can be @@ -169,7 +179,7 @@ func (r *Router) LnS(incomingOrPeer string) { // pass everything else through unmolested muxxer.PathPrefix("/").HandlerFunc(r.proxy).Name("proxy") - var listenAddr, grpcAddr string + var listenAddr, grpcAddr, grpcPeerAddr string if r.incomingOrPeer == "incoming" { listenAddr, err = r.Config.GetListenAddr() if err != nil { @@ -188,6 +198,14 @@ func (r *Router) LnS(incomingOrPeer string) { r.iopLogger.Error().Logf("failed to get peer listen addr config: %s", err) return } + + // GRPC listen addr is optional, err means addr was not empty and invalid + grpcPeerAddr, err = r.Config.GetGRPCPeerListenAddr() + if err != nil { + r.iopLogger.Error().Logf("failed to get grpc listen addr config: %s", err) + return + } + } r.iopLogger.Info().Logf("Listening on %s", listenAddr) @@ -217,6 +235,27 @@ func (r *Router) LnS(incomingOrPeer string) { go r.grpcServer.Serve(l) } + if len(grpcPeerAddr) > 0 { + l, err := net.Listen("tcp", grpcPeerAddr) + if err != nil { + r.iopLogger.Error().Logf("failed to listen to grpc peer addr: " + grpcPeerAddr) + } + + r.iopLogger.Info().Logf("gRPC Peer listening on %s", grpcPeerAddr) + serverOpts := []grpc.ServerOption{ + grpc.MaxSendMsgSize(GRPCMessageSizeMax), // default is math.MaxInt32 + grpc.MaxRecvMsgSize(GRPCMessageSizeMax), // default is 4MB + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 10 * time.Second, + Timeout: 2 * time.Second, + MaxConnectionIdle: time.Minute, + }), + } + r.grpcServer = grpc.NewServer(serverOpts...) + proxypb.RegisterTraceProxyServiceServer(r.grpcServer, &server{}) + go r.grpcServer.Serve(l) + } + r.doneWG.Add(1) go func() { defer r.doneWG.Done() From db2acccfff219655ad2c456a9bfb82a6453ae405 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Tue, 12 Apr 2022 20:32:37 +0530 Subject: [PATCH 165/351] grpc listen handler for peers added --- route/route.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/route/route.go b/route/route.go index 0122b045a9..435c2f059c 100644 --- a/route/route.go +++ b/route/route.go @@ -84,10 +84,6 @@ type Router struct { proxypb.UnimplementedTraceProxyServiceServer } -func (r *Router) mustEmbedUnimplementedTraceProxyServiceServer() { - panic("implement me") -} - type BatchResponse struct { Status int `json:"status"` Error string `json:"error,omitempty"` @@ -115,7 +111,7 @@ func (r *Router) SetVersion(ver string) { } type server struct { - proxypb.UnimplementedTraceProxyServiceServer + proxypb.TraceProxyServiceServer } // LnS spins up the Listen and Serve portion of the router. A router is From 0743ada04b33d44782837c820354b935e886f20c Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Tue, 12 Apr 2022 21:47:43 +0530 Subject: [PATCH 166/351] grpc listen handler for peers added --- route/route.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/route/route.go b/route/route.go index 435c2f059c..8c1f37f4f2 100644 --- a/route/route.go +++ b/route/route.go @@ -248,7 +248,7 @@ func (r *Router) LnS(incomingOrPeer string) { }), } r.grpcServer = grpc.NewServer(serverOpts...) - proxypb.RegisterTraceProxyServiceServer(r.grpcServer, &server{}) + proxypb.RegisterTraceProxyServiceServer(r.grpcServer, r) go r.grpcServer.Serve(l) } From 04f2e2157a888420ffc97bb3f04bb2ce4a4cf1b0 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Wed, 13 Apr 2022 10:09:15 +0530 Subject: [PATCH 167/351] grpc listen handler for peers added --- route/route.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/route/route.go b/route/route.go index 8c1f37f4f2..ffba623582 100644 --- a/route/route.go +++ b/route/route.go @@ -81,7 +81,7 @@ type Router struct { // used to identify Router as a OTLP TraceServer collectortrace.UnimplementedTraceServiceServer - proxypb.UnimplementedTraceProxyServiceServer + proxypb.TraceProxyServiceServer } type BatchResponse struct { From be7d0a2db074b98b65e4b574efd8cfc965cde327 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Wed, 13 Apr 2022 19:55:03 +0530 Subject: [PATCH 168/351] grpc listen handler for peers added --- go.mod | 2 +- go.sum | 4 ++-- route/otlp_trace.go | 58 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 60 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 3b326e4837..1b71b3ae48 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220412054723-e3a318f30726 +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220413141532-0f5e194e63a4 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4 diff --git a/go.sum b/go.sum index d20c2cd8d9..bb45628376 100644 --- a/go.sum +++ b/go.sum @@ -274,8 +274,8 @@ github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LF github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4 h1:YmBfU5uzwUZdYTvI60Vn+5JNaIaw6611td3Ltu8A0sQ= github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v0.0.0-20220412054723-e3a318f30726 h1:q2D7u9fmAb8tyGTYg1/6jSTfmcme8vajv9z2eSG50+0= -github.com/jirs5/libtrace-go v0.0.0-20220412054723-e3a318f30726/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= +github.com/jirs5/libtrace-go v0.0.0-20220413141532-0f5e194e63a4 h1:Y0QqSPkthRPkNaSIG6cRQFNPsilRUS6E2BGn06MaBQY= +github.com/jirs5/libtrace-go v0.0.0-20220413141532-0f5e194e63a4/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index e5d896de5a..cb51f38d8c 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -2,9 +2,13 @@ package route import ( "context" + "encoding/json" "fmt" proxypb "github.com/honeycombio/libhoney-go/proto/proxypb" + "google.golang.org/grpc/metadata" + "log" "net/http" + "time" huskyotlp "github.com/honeycombio/husky/otlp" "github.com/jirs5/tracing-proxy/types" @@ -99,6 +103,58 @@ func processTraceRequest( func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTraceProxyServiceRequest) (*proxypb.ExportTraceProxyServiceResponse, error) { - fmt.Println("Received Trace from peer: %v \n", in.Items) + fmt.Println("Received Trace data from peer \n") + + var token, tenantId, datasetName string + apiHost, err := r.Config.GetHoneycombAPI() + if err != nil { + r.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") + return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get apihost", Status: "Failed"}, nil + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + log.Println("Failed to get metadata") + return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get request metadata", Status: "Failed"}, nil + } else { + authorization := md.Get("Authorization") + if len(authorization) == 0 { + return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get Authorization", Status: "Failed"}, nil + } else { + token = authorization[0] + recvdTenantId := md.Get("tenantId") + if len(recvdTenantId) == 0 { + return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get TenantId", Status: "Failed"}, nil + } else { + tenantId = recvdTenantId[0] + datasetName = md.Get("dataset")[0] + } + } + log.Printf("\nauthorization:%v", token) + log.Printf("\nTenantId:%v", tenantId) + } + + var requestID types.RequestIDContextKey + + for _, item := range in.Items { + layout := "2006-01-02T15:04:05.000Z" + timestamp, err := time.Parse(layout, item.Timestamp) + + var data map[string]interface{} + inrec, _ := json.Marshal(item.Data) + json.Unmarshal(inrec, &data) + + event := &types.Event{ + Context: ctx, + APIHost: apiHost, + APIToken: token, + APITenantId: tenantId, + Dataset: datasetName, + Timestamp: timestamp, + Data: data, + } + if err = r.processEvent(event, requestID); err != nil { + r.Logger.Error().Logf("Error processing event: " + err.Error()) + } + } return &proxypb.ExportTraceProxyServiceResponse{Message: "Received Successfully by peer", Status: "Success"}, nil } From 7bafcac783346666d20ac0692ed0fde27304e8ad Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Mon, 18 Apr 2022 17:18:30 +0530 Subject: [PATCH 169/351] grpc listen handler for peers added --- route/otlp_trace.go | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/route/otlp_trace.go b/route/otlp_trace.go index cb51f38d8c..4bcb950968 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -143,10 +143,30 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr inrec, _ := json.Marshal(item.Data) json.Unmarshal(inrec, &data) + //Translate ResourceAttributes , SpanAttributes, EventAttributes from proto format to interface{} + attributes := make(map[string]interface{}) + for _, kv := range item.Data.ResourceAttributes { + attributes[kv.Key] = extractKeyValue(kv.Value) + } + data["resourceAttributes"] = attributes + + attributes = make(map[string]interface{}) + for _, kv := range item.Data.SpanAttributes { + attributes[kv.Key] = extractKeyValue(kv.Value) + } + data["spanAttributes"] = attributes + + attributes = make(map[string]interface{}) + for _, kv := range item.Data.EventAttributes { + attributes[kv.Key] = extractKeyValue(kv.Value) + } + data["eventAttributes"] = attributes + event := &types.Event{ Context: ctx, APIHost: apiHost, APIToken: token, + APIKey: "token", //Hardcoded for time-being. This need to be cleaned APITenantId: tenantId, Dataset: datasetName, Timestamp: timestamp, @@ -158,3 +178,22 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr } return &proxypb.ExportTraceProxyServiceResponse{Message: "Received Successfully by peer", Status: "Success"}, nil } + +func extractKeyValue(v *proxypb.AnyValue) string { + if x, ok := v.GetValue().(*proxypb.AnyValue_StringValue); ok { + return x.StringValue + } else if x, ok := v.GetValue().(*proxypb.AnyValue_IntValue); ok { + return fmt.Sprintf("%d", x.IntValue) + } else if x, ok := v.GetValue().(*proxypb.AnyValue_BoolValue); ok { + return fmt.Sprintf("%v", x.BoolValue) + } else if x, ok := v.GetValue().(*proxypb.AnyValue_DoubleValue); ok { + return fmt.Sprintf("%f", x.DoubleValue) + } else if x, ok := v.GetValue().(*proxypb.AnyValue_BytesValue); ok { + return fmt.Sprintf("%v", x.BytesValue) + } else if x, ok := v.GetValue().(*proxypb.AnyValue_ArrayValue); ok { + return x.ArrayValue.String() + } else if x, ok := v.GetValue().(*proxypb.AnyValue_KvlistValue); ok { + return x.KvlistValue.String() + } + return v.String() +} From a0f130ca13e4d1e3c5b95997c845dab1f3c6ce70 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Mon, 18 Apr 2022 17:28:49 +0530 Subject: [PATCH 170/351] grpc listen addr used in peers list instead of http listener addr --- config_complete.toml | 2 +- internal/peer/redis.go | 3 ++- sharder/deterministic.go | 4 +++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/config_complete.toml b/config_complete.toml index b9a5373c3a..646d97d0fc 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -136,7 +136,7 @@ Type = "file" # hostname (or ip address) and port. All servers in the cluster should be in # this list, including this host. Peers = [ - "http://127.0.0.1:8083", + "http://127.0.0.1:8084", #only grpc peer listener used # "http://127.0.0.1:8083", # "http://10.1.2.3.4:8080", # "http://refinery-1231:8080", diff --git a/internal/peer/redis.go b/internal/peer/redis.go index a6b7e49d85..50ecac2aad 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -216,7 +216,8 @@ func buildOptions(c config.Config) []redis.DialOption { func publicAddr(c config.Config) (string, error) { // compute the public version of my peer listen address - listenAddr, _ := c.GetPeerListenAddr() + //listenAddr, _ := c.GetPeerListenAddr() //Temporarily removed http peer listen addr, only grpc listener + listenAddr, _ := c.GetGRPCPeerListenAddr() _, port, err := net.SplitHostPort(listenAddr) if err != nil { diff --git a/sharder/deterministic.go b/sharder/deterministic.go index 2da822af82..0c21d6b263 100644 --- a/sharder/deterministic.go +++ b/sharder/deterministic.go @@ -106,7 +106,9 @@ func (d *DeterministicSharder) Start() error { } // get my listen address for peer traffic for the Port number - listenAddr, err := d.Config.GetPeerListenAddr() + //listenAddr, err := d.Config.GetPeerListenAddr() //Temporarily removed http peer listen addr, only grpc listener + listenAddr, err := d.Config.GetGRPCPeerListenAddr() + if err != nil { return errors.Wrap(err, "failed to get listen addr config") } From c7f1e922819f1de67d4b17102bee67c5f08c99b0 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Tue, 19 Apr 2022 11:22:59 +0100 Subject: [PATCH 171/351] Update go to 1.18 (#430) --- .circleci/config.yml | 8 +- go.mod | 47 +++++++- go.sum | 263 +------------------------------------------ 3 files changed, 48 insertions(+), 270 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 581fc1b3b6..6e49d91489 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -40,7 +40,7 @@ commands: jobs: test: docker: - - image: cimg/go:1.17 + - image: cimg/go:1.18 - image: redis:6 steps: - checkout @@ -64,7 +64,7 @@ jobs: build_binaries: docker: - - image: cimg/go:1.17 + - image: cimg/go:1.18 steps: - checkout - go-build: @@ -140,7 +140,7 @@ jobs: build_docker: docker: - - image: cimg/go:1.17 + - image: cimg/go:1.18 steps: - setup_googleko - checkout @@ -151,7 +151,7 @@ jobs: publish_docker: docker: - - image: cimg/go:1.17 + - image: cimg/go:1.18 steps: - setup_googleko - checkout diff --git a/go.mod b/go.mod index e2ddec054d..31926dc292 100644 --- a/go.mod +++ b/go.mod @@ -1,14 +1,12 @@ module github.com/honeycombio/refinery -go 1.16 +go 1.18 require ( github.com/davecgh/go-spew v1.1.1 github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d - github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect github.com/fsnotify/fsnotify v1.5.1 - github.com/go-playground/universal-translator v0.17.0 // indirect github.com/go-playground/validator v9.31.0+incompatible github.com/golang/protobuf v1.5.2 github.com/gomodule/redigo v1.8.8 @@ -20,7 +18,6 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.1 - github.com/leodido/go-urn v1.2.0 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.1 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 @@ -33,5 +30,47 @@ require ( golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect google.golang.org/grpc v1.45.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect + github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 // indirect + github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 // indirect + github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect + github.com/go-playground/locales v0.13.0 // indirect + github.com/go-playground/universal-translator v0.17.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/leodido/go-urn v1.2.0 // indirect + github.com/magiconair/properties v1.8.5 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser v0.1.1 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/protobuf v1.27.1 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/go.sum b/go.sum index 94e8b88737..8eaa44e78c 100644 --- a/go.sum +++ b/go.sum @@ -13,19 +13,6 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -34,7 +21,6 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -47,7 +33,6 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -57,19 +42,11 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -77,35 +54,25 @@ github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= @@ -124,9 +91,6 @@ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 h1:KnnwHN59Jx github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691/go.mod h1:sKLL1iua/0etWfo/nPCmyz+v2XDMXy+Ho53W7RAuZNY= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -146,14 +110,11 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+ github.com/go-playground/validator v9.31.0+incompatible h1:UA72EPEogEnq76ehGdEDp4Mit+3FDh548oRqwVgNsHA= github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+PugkyDjY2bRrL/UBU4f3rvrgkN3V8JEig= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -161,8 +122,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -178,10 +137,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.8 h1:f6cXq6RRfiyrOJEV7p3JhLDlmawGBVBBP1MggY8Mo4E= github.com/gomodule/redigo v1.8.8/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -193,17 +150,12 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -211,66 +163,31 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= github.com/honeycombio/husky v0.10.3 h1:407j6dXPG2ClzBGwIm/pgD+1N56jGamb3SZESttcuPg= github.com/honeycombio/husky v0.10.3/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -279,7 +196,6 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= @@ -288,37 +204,17 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -330,8 +226,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -341,11 +235,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= @@ -356,14 +247,12 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= @@ -372,16 +261,12 @@ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= @@ -410,7 +295,6 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= @@ -422,33 +306,21 @@ github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -471,8 +343,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -481,10 +351,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -499,7 +365,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -514,18 +379,8 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8= golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -533,18 +388,7 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -553,15 +397,11 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -570,18 +410,13 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -595,36 +430,14 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -633,7 +446,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= @@ -654,7 +466,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -679,22 +490,9 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -716,22 +514,6 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -770,36 +552,6 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -814,24 +566,12 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -863,7 +603,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= From 181e8f1c5d1da78d87ddf749a6dc03cf44d56d46 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Tue, 19 Apr 2022 16:45:26 +0530 Subject: [PATCH 172/351] fix in start, end time from peers --- route/otlp_trace.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 4bcb950968..8407bb2715 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -136,7 +136,7 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr var requestID types.RequestIDContextKey for _, item := range in.Items { - layout := "2006-01-02T15:04:05.000Z" + layout := "2006-01-02 15:04:05.000000000 +0000 UTC" timestamp, err := time.Parse(layout, item.Timestamp) var data map[string]interface{} @@ -162,6 +162,10 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr } data["eventAttributes"] = attributes + //Type cast start and end time + data["startTime"] = item.Data.StartTime + data["endTime"] = item.Data.EndTime + event := &types.Event{ Context: ctx, APIHost: apiHost, From 1c723e636432a0e62b50fc936687de3645b2d901 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 20 Apr 2022 15:34:01 +0100 Subject: [PATCH 173/351] update otlp to v0.11.0 (#437) --- go.mod | 2 +- go.sum | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 31926dc292..c30c6886f7 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/stretchr/testify v1.7.1 github.com/tidwall/gjson v1.14.0 github.com/vmihailenco/msgpack/v4 v4.3.11 - go.opentelemetry.io/proto/otlp v0.9.0 + go.opentelemetry.io/proto/otlp v0.11.0 golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect google.golang.org/grpc v1.45.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 diff --git a/go.sum b/go.sum index 8eaa44e78c..d47a8fecbb 100644 --- a/go.sum +++ b/go.sum @@ -69,7 +69,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -312,8 +311,8 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= -go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -568,8 +567,8 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= From 9e2c166baa50ce6e27d3ec919650c463d261e1c1 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Tue, 26 Apr 2022 20:26:29 +0100 Subject: [PATCH 174/351] Add support for environment and dataset rules with same names (#438) Co-authored-by: Steven E. Harris Co-authored-by: Kent Quirk --- README.md | 30 ++++++++++++++++ collect/collect.go | 4 +-- collect/collect_test.go | 2 +- config/config.go | 2 ++ config/config_test.go | 37 +++++++++++++++++++ config/file_config.go | 8 +++++ config/mock.go | 8 +++++ sample/sample.go | 19 ++++++---- sample/sample_test.go | 80 +++++++++++++++++++++++++++++++++++++++++ 9 files changed, 181 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 74db7a164d..3fb20ffe04 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,36 @@ By default, a Refinery process will register itself in Redis using its local hos In environments where domain name resolution is slow or unreliable, override the reliance on name lookups by specifying the name of the peering network interface with the `IdentifierInterfaceName` configuration option. See the [Refinery documentation](https://docs.honeycomb.io/manage-data-volume/refinery/) for more details on tuning a cluster. + +### Mixing Legacy and Environment & Services Rule Definitions + +With the change to support environemt and services in Honeycomb, some users will want to support both sending telemetry to a legacy dataset and a new environment called the same thing (eg `production`). + +This can be accomplished by leveraging the new `DatasetPrefix` configuration property and then using that prefix in the rules definitions for the legacy datasets. + +When Refinery receives telemetry using an API key associated to a legacy dataset, it will then use the prefix in the form `{prefix}.{dataset}` when trying to resolve the rules definition. + +For example +config.toml +```toml +DatasetPrefix = "legacy" +``` + +rules.toml +```toml +# default rules +Sampler = "DeterministicSampler" +SampleRate = 1 + + [production] # environment called "production" + Sampler = "DeterministicSampler" + SampleRate = 5 + + [legacy.production] # dataset called "production" + Sampler = "DeterministicSampler" + SampleRate = 10 +``` + ## How sampling decisions are made In the configuration file, you can choose from a few sampling methods and specify options for each. The `DynamicSampler` is the most interesting and most commonly used. It uses the `AvgSampleRate` algorithm from the [`dynsampler-go`](https://github.com/honeycombio/dynsampler-go) package. Briefly described, you configure Refinery to examine the trace for a set of fields (for example, `request.status_code` and `request.method`). It collects all the values found in those fields anywhere in the trace (eg "200" and "GET") together into a key it hands to the dynsampler. The dynsampler code will look at the frequency that key appears during the previous 30 seconds (or other value set by the `ClearFrequencySec` setting) and use that to hand back a desired sample rate. More frequent keys are sampled more heavily, so that an even distribution of traffic across the keyspace is represented in Honeycomb. diff --git a/collect/collect.go b/collect/collect.go index 07bd273721..e6d23f3c0c 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -449,9 +449,9 @@ func (i *InMemCollector) send(trace *types.Trace) { logFields["environment"] = samplerKey } - // use sampler key to find sampler, crete and cache if not found + // use sampler key to find sampler; create and cache if not found if sampler, found = i.datasetSamplers[samplerKey]; !found { - sampler = i.SamplerFactory.GetSamplerImplementationForDataset(samplerKey) + sampler = i.SamplerFactory.GetSamplerImplementationForKey(samplerKey, isLegacyKey) i.datasetSamplers[samplerKey] = sampler } diff --git a/collect/collect_test.go b/collect/collect_test.go index d0ddfbf684..968ba002f3 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -186,7 +186,7 @@ func TestDryRunMode(t *testing.T) { Config: conf, Logger: &logger.NullLogger{}, } - sampler := samplerFactory.GetSamplerImplementationForDataset("test") + sampler := samplerFactory.GetSamplerImplementationForKey("test", true) coll := &InMemCollector{ Config: conf, Logger: &logger.NullLogger{}, diff --git a/config/config.go b/config/config.go index 4028535348..fe0e70696d 100644 --- a/config/config.go +++ b/config/config.go @@ -139,4 +139,6 @@ type Config interface { GetAddHostMetadataToTrace() bool GetEnvironmentCacheTTL() time.Duration + + GetDatasetPrefix() string } diff --git a/config/config_test.go b/config/config_test.go index fb2d7e2a46..6251bb3512 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -652,3 +652,40 @@ func TestHoneycombLoggerConfigDefaults(t *testing.T) { assert.Equal(t, false, loggerConfig.LoggerSamplerEnabled) assert.Equal(t, 5, loggerConfig.LoggerSamplerThroughput) } + +func TestDatasetPrefix(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + assert.NoError(t, err) + defer os.RemoveAll(tmpDir) + + configFile, err := ioutil.TempFile(tmpDir, "*.toml") + assert.NoError(t, err) + + _, err = configFile.Write([]byte(` + DatasetPrefix = "dataset" + + [InMemCollector] + CacheCapacity=1000 + + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + + [HoneycombLogger] + LoggerHoneycombAPI="http://honeycomb.io" + LoggerAPIKey="1234" + LoggerDataset="loggerDataset" + `)) + assert.NoError(t, err) + configFile.Close() + + rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + assert.NoError(t, err) + + c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + assert.NoError(t, err) + + assert.Equal(t, "dataset", c.GetDatasetPrefix()) +} diff --git a/config/file_config.go b/config/file_config.go index 13484922b3..e4067cd7ce 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -49,6 +49,7 @@ type configContents struct { InMemCollector InMemoryCollectorCacheCapacity `validate:"required"` AddHostMetadataToTrace bool EnvironmentCacheTTL time.Duration + DatasetPrefix string } type InMemoryCollectorCacheCapacity struct { @@ -736,3 +737,10 @@ func (f *fileConfig) GetEnvironmentCacheTTL() time.Duration { return f.conf.EnvironmentCacheTTL } + +func (f *fileConfig) GetDatasetPrefix() string { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.DatasetPrefix +} diff --git a/config/mock.go b/config/mock.go index 76793eabb6..706003c18a 100644 --- a/config/mock.go +++ b/config/mock.go @@ -71,6 +71,7 @@ type MockConfig struct { DryRunFieldName string AddHostMetadataToTrace bool EnvironmentCacheTTL time.Duration + DatasetPrefix string Mux sync.RWMutex } @@ -328,3 +329,10 @@ func (f *MockConfig) GetEnvironmentCacheTTL() time.Duration { return f.EnvironmentCacheTTL } + +func (f *MockConfig) GetDatasetPrefix() string { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.DatasetPrefix +} diff --git a/sample/sample.go b/sample/sample.go index eef4337ca5..1186005b26 100644 --- a/sample/sample.go +++ b/sample/sample.go @@ -1,6 +1,7 @@ package sample import ( + "fmt" "os" "github.com/honeycombio/refinery/config" @@ -21,10 +22,16 @@ type SamplerFactory struct { Metrics metrics.Metrics `inject:"metrics"` } -// GetSamplerImplementationForDataset returns the sampler implementation for the dataset, -// or nil if it is not defined -func (s *SamplerFactory) GetSamplerImplementationForDataset(dataset string) Sampler { - c, err := s.Config.GetSamplerConfigForDataset(dataset) +// GetSamplerImplementationForKey returns the sampler implementation for the given +// samplerKey (dataset for legacy keys, environment otherwise), or nil if it is not defined +func (s *SamplerFactory) GetSamplerImplementationForKey(samplerKey string, isLegacyKey bool) Sampler { + if isLegacyKey { + if prefix := s.Config.GetDatasetPrefix(); prefix != "" { + samplerKey = fmt.Sprintf("%s.%s", prefix, samplerKey) + } + } + + c, err := s.Config.GetSamplerConfigForDataset(samplerKey) if err != nil { return nil } @@ -49,11 +56,11 @@ func (s *SamplerFactory) GetSamplerImplementationForDataset(dataset string) Samp err = sampler.Start() if err != nil { - s.Logger.Debug().WithField("dataset", dataset).Logf("failed to start sampler") + s.Logger.Debug().WithField("dataset", samplerKey).Logf("failed to start sampler") return nil } - s.Logger.Debug().WithField("dataset", dataset).Logf("created implementation for sampler type %T", c) + s.Logger.Debug().WithField("dataset", samplerKey).Logf("created implementation for sampler type %T", c) return sampler } diff --git a/sample/sample_test.go b/sample/sample_test.go index 53fad51b40..aa74161837 100644 --- a/sample/sample_test.go +++ b/sample/sample_test.go @@ -3,12 +3,15 @@ package sample import ( + "io/ioutil" + "os" "testing" "github.com/facebookgo/inject" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/logger" "github.com/honeycombio/refinery/metrics" + "github.com/stretchr/testify/assert" ) func TestDependencyInjection(t *testing.T) { @@ -27,3 +30,80 @@ func TestDependencyInjection(t *testing.T) { t.Error(err) } } + +func TestDatasetPrefix(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + assert.NoError(t, err) + defer os.RemoveAll(tmpDir) + + configFile, err := ioutil.TempFile(tmpDir, "*.toml") + assert.NoError(t, err) + + _, err = configFile.Write([]byte(` + DatasetPrefix = "dataset" + + [InMemCollector] + CacheCapacity=1000 + + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + + [HoneycombLogger] + LoggerHoneycombAPI="http://honeycomb.io" + LoggerAPIKey="1234" + LoggerDataset="loggerDataset" + `)) + assert.NoError(t, err) + configFile.Close() + + rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + assert.NoError(t, err) + + _, err = rulesFile.Write([]byte(` + Sampler = "DeterministicSampler" + SampleRate = 1 + + [production] + Sampler = "DeterministicSampler" + SampleRate = 10 + + [dataset.production] + Sampler = "DeterministicSampler" + SampleRate = 20 + `)) + assert.NoError(t, err) + rulesFile.Close() + + c, err := config.NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + assert.NoError(t, err) + + assert.Equal(t, "dataset", c.GetDatasetPrefix()) + + factory := SamplerFactory{Config: c, Logger: &logger.NullLogger{}, Metrics: &metrics.NullMetrics{}} + + defaultSampler := &DeterministicSampler{ + Config: &config.DeterministicSamplerConfig{SampleRate: 1}, + Logger: &logger.NullLogger{}, + } + defaultSampler.Start() + + envSampler := &DeterministicSampler{ + Config: &config.DeterministicSamplerConfig{SampleRate: 10}, + Logger: &logger.NullLogger{}, + } + envSampler.Start() + + datasetSampler := &DeterministicSampler{ + Config: &config.DeterministicSamplerConfig{SampleRate: 20}, + Logger: &logger.NullLogger{}, + } + datasetSampler.Start() + + assert.Equal(t, defaultSampler, factory.GetSamplerImplementationForKey("unknown", false)) + assert.Equal(t, defaultSampler, factory.GetSamplerImplementationForKey("unknown", true)) + assert.Equal(t, envSampler, factory.GetSamplerImplementationForKey("production", false)) + assert.Equal(t, datasetSampler, factory.GetSamplerImplementationForKey("production", true)) +} From ae781951add292bf3ac70079e4e9809d6944a7a3 Mon Sep 17 00:00:00 2001 From: "saikalyan.bhagavathula" Date: Fri, 29 Apr 2022 17:44:48 +0530 Subject: [PATCH 175/351] Checking which peers are up --- internal/peer/file.go | 83 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 79 insertions(+), 4 deletions(-) diff --git a/internal/peer/file.go b/internal/peer/file.go index 165abc0d38..c6ad23cfe4 100644 --- a/internal/peer/file.go +++ b/internal/peer/file.go @@ -1,22 +1,97 @@ package peer -import "github.com/jirs5/tracing-proxy/config" +import ( + "github.com/jirs5/tracing-proxy/config" + "net" + "sort" + "strings" + "sync" + "time" +) type filePeers struct { c config.Config + peers []string + callbacks []func() + peerLock sync.Mutex } - +var firstOccurancesOfGetPeers bool = false // NewFilePeers returns a peers collection backed by the config file func newFilePeers(c config.Config) Peers { - return &filePeers{ + p := &filePeers{ c: c, + peers: make([]string, 1), + callbacks: make([]func(), 0), } + + go p.watchFilePeers() + + return p } func (p *filePeers) GetPeers() ([]string, error) { - return p.c.GetPeers() + + if !firstOccurancesOfGetPeers { + firstOccurancesOfGetPeers = true + return p.c.GetPeers() + } + p.peerLock.Lock() + defer p.peerLock.Unlock() + retList := make([]string, len(p.peers)) + copy(retList, p.peers) + return retList, nil } +func (p *filePeers) watchFilePeers() { + tk := time.NewTicker(20 * time.Second) + originalPeerList, _:= p.c.GetPeers() + sort.Strings(originalPeerList) + oldPeerList := originalPeerList + for range tk.C { + currentPeers := getPeerMembers(originalPeerList) + sort.Strings(currentPeers) + if !equal(currentPeers, oldPeerList) { + p.peerLock.Lock() + p.peers = currentPeers + oldPeerList = currentPeers + p.peerLock.Unlock() + for _, callback := range p.callbacks { + // don't block on any of the callbacks. + go callback() + } + } + } +} func (p *filePeers) RegisterUpdatedPeersCallback(callback func()) { // do nothing, file based peers are not reloaded + p.callbacks = append(p.callbacks, callback) } + +func getPeerMembers(originalPeerlist []string) []string { + var workingPeers []string + wg := sync.WaitGroup{} + for _, peer := range originalPeerlist { + wg.Add(1) + go func(goPeer string) { + opened := isOpen(goPeer) + if opened { + workingPeers = append(workingPeers, goPeer) + } + wg.Done() + }(peer) + } + wg.Wait() + return workingPeers +} + +func isOpen(peer string) bool { + urlBreaker := strings.Split(peer, ":") + peerUrl := string(urlBreaker[1][2:]) + ":" + urlBreaker[2] + conn, err := net.Dial("tcp", peerUrl) + + if err == nil { + _ = conn.Close() + return true + } + return false +} \ No newline at end of file From b4b94d9894aab03d1be7de99ccda82cb322e6f03 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Tue, 3 May 2022 15:56:52 +0100 Subject: [PATCH 176/351] prepare v1.14.0 release (#445) --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 79a2e1d12a..b9e19778e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Refinery Changelog +## 1.14.0 2022-05-03 + +### Enhancements + +- Add support for environment and dataset rules with same names (#438) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) + +### Maintenance + +- Update otlp to v0.11.0 (#437) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) +- Update go to 1.18 (#430) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) + +**Note**: The docker image used to create the binaries has been updated to a version that does not suffer a [OpenSSL CVE](https://mta.openssl.org/pipermail/openssl-announce/2022-March/000219.html). + ## 1.13.0 2022-04-08 ### Enhancements From 77a2ecb2c2554ef59ed9384954cd6a57a09830aa Mon Sep 17 00:00:00 2001 From: sujitha-pallapothu Date: Thu, 5 May 2022 10:33:35 +0530 Subject: [PATCH 177/351] metric tags --- metrics/opsramp.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/metrics/opsramp.go b/metrics/opsramp.go index eba889b34b..e883a11835 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "net/http" "net/url" + "os" "regexp" "strings" "sync" @@ -25,6 +26,8 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" ) + + type OpsRampMetrics struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` @@ -98,18 +101,27 @@ func (p *OpsRampMetrics) Register(name string, metricType string) { return } + hostMap := make(map[string]string) + + if hostname, err := os.Hostname(); err == nil && hostname != "" { + + hostMap["hostname"]=hostname + } + switch metricType { case "counter": newmet = promauto.NewCounter(prometheus.CounterOpts{ Name: name, Namespace: p.prefix, Help: name, + ConstLabels: hostMap, }) case "gauge": newmet = promauto.NewGauge(prometheus.GaugeOpts{ Name: name, Namespace: p.prefix, Help: name, + ConstLabels: hostMap, }) case "histogram": newmet = promauto.NewHistogram(prometheus.HistogramOpts{ @@ -119,6 +131,7 @@ func (p *OpsRampMetrics) Register(name string, metricType string) { // This is an attempt at a usable set of buckets for a wide range of metrics // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous Buckets: prometheus.ExponentialBuckets(1, 4, 16), + ConstLabels: hostMap, }) } @@ -137,6 +150,12 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s if exists { return } + hostMap := make(map[string]string) + if hostname, err := os.Hostname(); err == nil && hostname != "" { + + hostMap["hostname"]=hostname + } + switch metricType { case "counter": @@ -144,6 +163,7 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s Name: name, Namespace: p.prefix, Help: desc, + ConstLabels: hostMap, }, labels) case "gauge": newmet = promauto.NewGaugeVec( @@ -151,6 +171,7 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s Name: name, Namespace: p.prefix, Help: desc, + ConstLabels: hostMap, }, labels) case "histogram": @@ -158,6 +179,7 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s Name: name, Namespace: p.prefix, Help: desc, + ConstLabels: hostMap, // This is an attempt at a usable set of buckets for a wide range of metrics // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous Buckets: prometheus.ExponentialBuckets(1, 4, 16), @@ -434,6 +456,7 @@ func (p *OpsRampMetrics) PushMetricsToOpsRamp() (int, error) { request := prompb.WriteRequest{Timeseries: timeSeries} out, err := proto.Marshal(&request) + if err != nil { return -1, err } From ce47060dca12fe46fb9b11bb7f33ad1a9956ce10 Mon Sep 17 00:00:00 2001 From: "saikalyan.bhagavathula" Date: Thu, 5 May 2022 14:12:06 +0530 Subject: [PATCH 178/351] commit hash change for export retry --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 1b71b3ae48..fc1003dd32 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220413141532-0f5e194e63a4 +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20220505064121-8635e7c10a91 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4 From 7d8fd4604a681234358cafb0c11703455c3c48f0 Mon Sep 17 00:00:00 2001 From: "saikalyan.bhagavathula" Date: Fri, 6 May 2022 11:36:02 +0530 Subject: [PATCH 179/351] adding go.sum --- go.sum | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/go.sum b/go.sum index bb45628376..112d4b5444 100644 --- a/go.sum +++ b/go.sum @@ -274,8 +274,8 @@ github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LF github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4 h1:YmBfU5uzwUZdYTvI60Vn+5JNaIaw6611td3Ltu8A0sQ= github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v0.0.0-20220413141532-0f5e194e63a4 h1:Y0QqSPkthRPkNaSIG6cRQFNPsilRUS6E2BGn06MaBQY= -github.com/jirs5/libtrace-go v0.0.0-20220413141532-0f5e194e63a4/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= +github.com/jirs5/libtrace-go v1.15.9-0.20220505064121-8635e7c10a91 h1:iJj3Jcbt854dE5EC6iRO78dMLcG/jLiVonhM/tV9zXo= +github.com/jirs5/libtrace-go v1.15.9-0.20220505064121-8635e7c10a91/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -380,6 +380,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= +github.com/saikalyan-bhagavathula/libtrace-go v1.15.9-0.20220425111856-8b2ee16c2883 h1:xMC53EqdEypIeoZJ7tRfLzB9jaSUIQ71RTyadZlsKlM= +github.com/saikalyan-bhagavathula/libtrace-go v1.15.9-0.20220425111856-8b2ee16c2883/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= From beca13b5d553cea3cd78db6bfde78e68ee476c70 Mon Sep 17 00:00:00 2001 From: Jason Harley Date: Fri, 6 May 2022 04:57:39 -0400 Subject: [PATCH 180/351] README: remove incorrect mention of sending SIGUSR1 to trigger a configuration reload (#447) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3fb20ffe04..eb21bf1936 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,7 @@ There are a few vital configuration options; read through this list and make sur There are a few components of Refinery with multiple implementations; the config file lets you choose which you'd like. As an example, there are two logging implementations - one that uses `logrus` and sends logs to STDOUT and a `honeycomb` implementation that sends the log messages to a Honeycomb dataset instead. Components with multiple implementations have one top level config item that lets you choose which implementation to use and then a section further down with additional config options for that choice (for example, the Honeycomb logger requires an API key). -When configuration changes, send Refinery a USR1 signal and it will re-read the configuration. +When configuration changes, Refinery will automatically reload the configuration. ### Redis-based Peer Management From ae3e8f52fd383e64384559633037a5edd38b170c Mon Sep 17 00:00:00 2001 From: Levi Wilson Date: Mon, 9 May 2022 03:56:01 -0600 Subject: [PATCH 181/351] add a note about reloading the configuration when running within docker (#448) Co-authored-by: Mike Goldsmith --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index eb21bf1936..3914ac485a 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,9 @@ There are a few vital configuration options; read through this list and make sur There are a few components of Refinery with multiple implementations; the config file lets you choose which you'd like. As an example, there are two logging implementations - one that uses `logrus` and sends logs to STDOUT and a `honeycomb` implementation that sends the log messages to a Honeycomb dataset instead. Components with multiple implementations have one top level config item that lets you choose which implementation to use and then a section further down with additional config options for that choice (for example, the Honeycomb logger requires an API key). -When configuration changes, Refinery will automatically reload the configuration. +When configuration changes, Refinery will automatically reload the configuration[^1]. + +[^1]: When running Refinery within docker, be sure to mount the directory containing configuration & rules files so that [reloading will work](https://github.com/spf13/viper/issues/920) as expected. ### Redis-based Peer Management From f2a48214b5309d2d7547e33c091074e325f16695 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 11 May 2022 14:05:34 +0100 Subject: [PATCH 182/351] bump husky to 0.10.5 (#450) --- go.mod | 8 ++++---- go.sum | 21 ++++++++++++++------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index c30c6886f7..4cd4f40dc5 100644 --- a/go.mod +++ b/go.mod @@ -13,11 +13,11 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.10.3 + github.com/honeycombio/husky v0.10.5 github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.1 + github.com/klauspost/compress v1.15.2 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.1 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 @@ -28,7 +28,7 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect - google.golang.org/grpc v1.45.0 + google.golang.org/grpc v1.46.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 ) @@ -68,7 +68,7 @@ require ( golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/protobuf v1.28.0 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index d47a8fecbb..d2a70ada08 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,7 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -71,6 +72,7 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= @@ -152,6 +154,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -178,8 +181,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.10.3 h1:407j6dXPG2ClzBGwIm/pgD+1N56jGamb3SZESttcuPg= -github.com/honeycombio/husky v0.10.3/go.mod h1:KltmTfiasGGV0L3Hv6KEzm9YSvv3vRTz/JRSq1K+d78= +github.com/honeycombio/husky v0.10.5 h1:jzOQJw4FDBKfy1DDwWzFB6lqmUOF4oJNSZelGBlve4A= +github.com/honeycombio/husky v0.10.5/go.mod h1:i69+3xApIcsQn9PPeFRndTOOw6edNG66TxTTd+L6oq0= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -197,8 +200,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw= +github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -378,6 +381,7 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8= @@ -429,7 +433,9 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -569,8 +575,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -583,8 +589,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= From 0b02b1baf913ad84b5df645d5a4bdf1b5cce7ab6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 May 2022 15:00:51 -0400 Subject: [PATCH 183/351] Bump github.com/fsnotify/fsnotify from 1.5.1 to 1.5.4 (#441) Bumps [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify) from 1.5.1 to 1.5.4. - [Release notes](https://github.com/fsnotify/fsnotify/releases) - [Changelog](https://github.com/fsnotify/fsnotify/blob/main/CHANGELOG.md) - [Commits](https://github.com/fsnotify/fsnotify/compare/v1.5.1...v1.5.4) --- updated-dependencies: - dependency-name: github.com/fsnotify/fsnotify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 4cd4f40dc5..d838ccdfec 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d - github.com/fsnotify/fsnotify v1.5.1 + github.com/fsnotify/fsnotify v1.5.4 github.com/go-playground/validator v9.31.0+incompatible github.com/golang/protobuf v1.5.2 github.com/gomodule/redigo v1.8.8 @@ -64,7 +64,7 @@ require ( github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect diff --git a/go.sum b/go.sum index d2a70ada08..47ff6e7a88 100644 --- a/go.sum +++ b/go.sum @@ -92,8 +92,8 @@ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 h1:KnnwHN59Jx github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691/go.mod h1:sKLL1iua/0etWfo/nPCmyz+v2XDMXy+Ho53W7RAuZNY= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -442,9 +442,9 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From a17c1c49a4e4ccdd352ed8f03d7d818e32554fbb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 May 2022 15:04:54 -0400 Subject: [PATCH 184/351] Bump github.com/tidwall/gjson from 1.14.0 to 1.14.1 (#444) Bumps [github.com/tidwall/gjson](https://github.com/tidwall/gjson) from 1.14.0 to 1.14.1. - [Release notes](https://github.com/tidwall/gjson/releases) - [Commits](https://github.com/tidwall/gjson/compare/v1.14.0...v1.14.1) --- updated-dependencies: - dependency-name: github.com/tidwall/gjson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d838ccdfec..d9d620271a 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.10.1 github.com/stretchr/testify v1.7.1 - github.com/tidwall/gjson v1.14.0 + github.com/tidwall/gjson v1.14.1 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect diff --git a/go.sum b/go.sum index 47ff6e7a88..0d241a9e58 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,8 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tidwall/gjson v1.14.0 h1:6aeJ0bzojgWLa82gDQHcx3S0Lr/O51I9bJ5nv6JFx5w= -github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= +github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= From f3c5606d089b210b9d503c9a05d7573f5974e344 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 May 2022 15:17:18 -0400 Subject: [PATCH 185/351] Bump github.com/klauspost/compress from 1.15.2 to 1.15.4 (#451) Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.15.2 to 1.15.4. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.15.2...v1.15.4) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d9d620271a..0a6950483f 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.2 + github.com/klauspost/compress v1.15.4 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.1 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 diff --git a/go.sum b/go.sum index 0d241a9e58..cdc515810a 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw= -github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ= +github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= From eef204e283e4ddad8029ba0eec02db9748e4c20f Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Mon, 16 May 2022 18:37:47 -0400 Subject: [PATCH 186/351] Fix crash bug related to sharding (#455) // The algorithm in WhichShard works correctly for divisors of 2^32-1. The prime factorization of that includes // 1, 3, 5, 17, so we need something other than 3 to be sure that this test would fail. // It was tested (and failed) without the additional conditional. --- sharder/deterministic.go | 6 +++++ sharder/deterministic_test.go | 42 +++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/sharder/deterministic.go b/sharder/deterministic.go index 8b77ecb135..2042b0ef75 100644 --- a/sharder/deterministic.go +++ b/sharder/deterministic.go @@ -227,6 +227,12 @@ func (d *DeterministicSharder) WhichShard(traceID string) Shard { portion := math.MaxUint32 / len(d.peers) index := v / uint32(portion) + // #454 -- index can get out of range if v is close to 0xFFFFFFFF and portion would be non-integral. + // Consider revisiting this with a different sharding mechanism if we rework our scaling behavior. + if index >= uint32(len(d.peers)) { + index = 0 + } + return d.peers[index] } diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go index 49336a8f5e..9fc85839c7 100644 --- a/sharder/deterministic_test.go +++ b/sharder/deterministic_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package sharder @@ -47,3 +48,44 @@ func TestWhichShard(t *testing.T) { assert.Equal(t, shard.GetAddress(), sharder.WhichShard(traceID).GetAddress(), "should select the same peer if peer list becomes empty") } + +func TestWhichShardAtEdge(t *testing.T) { + const ( + selfAddr = "127.0.0.1:8081" + traceID = "RCIVNUNA" // carefully chosen (by trying over a billion times) to hash in WhichShard to 0xFFFFFFFF + ) + + // The algorithm in WhichShard works correctly for divisors of 2^32-1. The prime factorization of that includes + // 1, 3, 5, 17, so we need something other than 3 to be sure that this test would fail. + // It was tested (and failed) without the additional conditional. + peers := []string{ + "http://" + selfAddr, + "http://2.2.2.2:8081", + "http://3.3.3.3:8081", + "http://4.4.4.4:8081", + } + config := &config.MockConfig{ + GetPeerListenAddrVal: selfAddr, + GetPeersVal: peers, + PeerManagementType: "file", + } + filePeers, err := peer.NewPeers(config) + assert.Equal(t, nil, err) + sharder := DeterministicSharder{ + Config: config, + Logger: &logger.NullLogger{}, + Peers: filePeers, + } + + assert.NoError(t, sharder.Start(), + "starting deterministic sharder should not error") + + shard := sharder.WhichShard(traceID) + assert.Contains(t, peers, shard.GetAddress(), + "should select a peer for a trace") + + config.GetPeersVal = []string{} + config.ReloadConfig() + assert.Equal(t, shard.GetAddress(), sharder.WhichShard(traceID).GetAddress(), + "should select the same peer if peer list becomes empty") +} From 3f32e38e6deb3badda91930e686781b425ab737b Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Mon, 16 May 2022 20:37:32 -0400 Subject: [PATCH 187/351] Prep for 1.14.1 release (#456) ## 1.14.1 2022-05-16 ### Fixes - Fix crash bug related to sharding (#455) | [@kentquirk](https://github.com/kentquirk) ### Maintenance - bump husky to 0.10.5 (#450) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - Bump github.com/klauspost/compress from 1.15.2 to 1.15.4 (#451) | dependabot - Bump github.com/tidwall/gjson from 1.14.0 to 1.14.1 (#444) | dependabot - Bump github.com/fsnotify/fsnotify from 1.5.1 to 1.5.4 (#441) | dependabot ### Documentation - add a note about reloading the configuration when running within docker (#448) | [@leviwilson](https://github.com/leviwilson) - README: remove incorrect mention of sending SIGUSR1 to trigger a configuration reload (#447) | [@jharley](https://github.com/jharley) --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9e19778e4..ab6dbb10c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Refinery Changelog +## 1.14.1 2022-05-16 + +### Fixes + +- Fix crash bug related to sharding (#455) | [@kentquirk](https://github.com/kentquirk) + +### Maintenance + +- bump husky to 0.10.5 (#450) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) +- Bump github.com/klauspost/compress from 1.15.2 to 1.15.4 (#451) | dependabot +- Bump github.com/tidwall/gjson from 1.14.0 to 1.14.1 (#444) | dependabot +- Bump github.com/fsnotify/fsnotify from 1.5.1 to 1.5.4 (#441) | dependabot + +### Documentation + +- add a note about reloading the configuration when running within docker (#448) | [@leviwilson](https://github.com/leviwilson) +- README: remove incorrect mention of sending SIGUSR1 to trigger a configuration reload (#447) | [@jharley](https://github.com/jharley) + ## 1.14.0 2022-05-03 ### Enhancements From 5e3cde1b907a7047268275074baecf7016ea2201 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 18 May 2022 14:46:19 +0100 Subject: [PATCH 188/351] replace legacy with classic in raadme (#457) --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 3914ac485a..0f6cdde79c 100644 --- a/README.md +++ b/README.md @@ -73,18 +73,18 @@ In environments where domain name resolution is slow or unreliable, override the See the [Refinery documentation](https://docs.honeycomb.io/manage-data-volume/refinery/) for more details on tuning a cluster. -### Mixing Legacy and Environment & Services Rule Definitions +### Mixing Classic and Environment & Services Rule Definitions -With the change to support environemt and services in Honeycomb, some users will want to support both sending telemetry to a legacy dataset and a new environment called the same thing (eg `production`). +With the change to support environemt and services in Honeycomb, some users will want to support both sending telemetry to a classic dataset and a new environment called the same thing (eg `production`). -This can be accomplished by leveraging the new `DatasetPrefix` configuration property and then using that prefix in the rules definitions for the legacy datasets. +This can be accomplished by leveraging the new `DatasetPrefix` configuration property and then using that prefix in the rules definitions for the classic datasets. -When Refinery receives telemetry using an API key associated to a legacy dataset, it will then use the prefix in the form `{prefix}.{dataset}` when trying to resolve the rules definition. +When Refinery receives telemetry using an API key associated to a classic dataset, it will then use the prefix in the form `{prefix}.{dataset}` when trying to resolve the rules definition. For example config.toml ```toml -DatasetPrefix = "legacy" +DatasetPrefix = "classic" ``` rules.toml @@ -97,7 +97,7 @@ SampleRate = 1 Sampler = "DeterministicSampler" SampleRate = 5 - [legacy.production] # dataset called "production" + [classic.production] # dataset called "production" Sampler = "DeterministicSampler" SampleRate = 10 ``` From 363cd8a208eefe9eb97327694855b5fa763920b4 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 20 May 2022 18:06:59 +0100 Subject: [PATCH 189/351] Create helm-chart issue on release (#458) --- .github/workflows/release.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..35588fa25a --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,22 @@ +name: Create helm chart issue on release +on: + release: + types: [published] +jobs: + create_issue: + runs-on: ubuntu-latest + steps: + - name: Create an issue + uses: actions-ecosystem/action-create-issue@v1 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + repo: github.com/honeycombio/helm-charts + title: ${{ steps.date.outputs.today }} + body: | + ## Bump Refinery + + Update Refinery to latest version + + labels: | + type: dependencies + status: oncall From 3b1add48371a1c30ea053c3fe5c85a47d2684122 Mon Sep 17 00:00:00 2001 From: Chris Toshok Date: Wed, 25 May 2022 03:03:02 -0700 Subject: [PATCH 190/351] Replace hand-rolled binary.BigEndian.Uint32 with the real deal (#459) --- sample/deterministic.go | 9 ++------- sharder/deterministic.go | 9 ++------- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/sample/deterministic.go b/sample/deterministic.go index cfdd2b816a..57f4c761c7 100644 --- a/sample/deterministic.go +++ b/sample/deterministic.go @@ -2,6 +2,7 @@ package sample import ( "crypto/sha1" + "encoding/binary" "math" "github.com/honeycombio/refinery/config" @@ -39,12 +40,6 @@ func (d *DeterministicSampler) GetSampleRate(trace *types.Trace) (rate uint, kee return 1, true } sum := sha1.Sum([]byte(trace.TraceID + shardingSalt)) - v := bytesToUint32be(sum[:4]) + v := binary.BigEndian.Uint32(sum[:4]) return uint(d.sampleRate), v <= d.upperBound } - -// bytesToUint32 takes a slice of 4 bytes representing a big endian 32 bit -// unsigned value and returns the equivalent uint32. -func bytesToUint32be(b []byte) uint32 { - return uint32(b[3]) | (uint32(b[2]) << 8) | (uint32(b[1]) << 16) | (uint32(b[0]) << 24) -} diff --git a/sharder/deterministic.go b/sharder/deterministic.go index 2042b0ef75..af8138f1f7 100644 --- a/sharder/deterministic.go +++ b/sharder/deterministic.go @@ -2,6 +2,7 @@ package sharder import ( "crypto/sha1" + "encoding/binary" "fmt" "math" "net" @@ -222,7 +223,7 @@ func (d *DeterministicSharder) WhichShard(traceID string) Shard { // add in the sharding salt to ensure the sh1sum is spread differently from // others that use the same algorithm sum := sha1.Sum([]byte(traceID + shardingSalt)) - v := bytesToUint32be(sum[:4]) + v := binary.BigEndian.Uint32(sum[:4]) portion := math.MaxUint32 / len(d.peers) index := v / uint32(portion) @@ -235,9 +236,3 @@ func (d *DeterministicSharder) WhichShard(traceID string) Shard { return d.peers[index] } - -// bytesToUint32 takes a slice of 4 bytes representing a big endian 32 bit -// unsigned value and returns the equivalent uint32. -func bytesToUint32be(b []byte) uint32 { - return uint32(b[3]) | (uint32(b[2]) << 8) | (uint32(b[1]) << 16) | (uint32(b[0]) << 24) -} From 138daf1e85a0cc1b623884ada826598493e7ed07 Mon Sep 17 00:00:00 2001 From: Jamie Danielson Date: Thu, 9 Jun 2022 10:45:53 -0400 Subject: [PATCH 191/351] github_token needs underscore not hyphen (#464) use `github_token` instead of `github-token` in `release.yml` github workflow --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 35588fa25a..4c5402e731 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,7 +9,7 @@ jobs: - name: Create an issue uses: actions-ecosystem/action-create-issue@v1 with: - github-token: ${{ secrets.GITHUB_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} repo: github.com/honeycombio/helm-charts title: ${{ steps.date.outputs.today }} body: | From db6b93fb18d5df5786dadfabefda3b609acf4f04 Mon Sep 17 00:00:00 2001 From: Kevan Carstensen Date: Fri, 10 Jun 2022 05:11:42 -0700 Subject: [PATCH 192/351] Add rule Scope configuration option to rules-based sampler (#440) We'd like to be able to apply all of the conditions in a rules-based sampler rule to a single span. In other words, we'd like a rule to be considered matched only if all of the conditions in that rule are satisfied by a single span in the trace. This allows us to more accurately make per-service sampling decisions in a dataset written to by multiple services. Co-authored-by: Mike Goldsmith --- config/config_test.go | 10 +- config/sampler_config.go | 1 + rules_complete.toml | 23 +++- sample/rules.go | 232 +++++++++++++++++++++++++-------------- sample/rules_test.go | 120 ++++++++++++++++++++ 5 files changed, 300 insertions(+), 86 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 6251bb3512..5e99906091 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -246,7 +246,7 @@ func TestReadRulesConfig(t *testing.T) { assert.NoError(t, err) switch r := d.(type) { case *RulesBasedSamplerConfig: - assert.Len(t, r.Rule, 4) + assert.Len(t, r.Rule, 5) var rule *RulesBasedSamplerRule @@ -260,6 +260,14 @@ func TestReadRulesConfig(t *testing.T) { assert.Equal(t, "keep slow 500 errors", rule.Name) assert.Len(t, rule.Condition, 2) + rule = r.Rule[3] + assert.Equal(t, 5, rule.SampleRate) + assert.Equal(t, "span", rule.Scope) + + rule = r.Rule[4] + assert.Equal(t, 10, rule.SampleRate) + assert.Equal(t, "", rule.Scope) + default: assert.Fail(t, "dataset4 should have a rules based sampler", d) } diff --git a/config/sampler_config.go b/config/sampler_config.go index d49ebca1af..b35965c9c2 100644 --- a/config/sampler_config.go +++ b/config/sampler_config.go @@ -62,6 +62,7 @@ type RulesBasedSamplerRule struct { SampleRate int Sampler *RulesBasedDownstreamSampler Drop bool + Scope string `validate:"oneof=span trace"` Condition []*RulesBasedSamplerCondition } diff --git a/rules_complete.toml b/rules_complete.toml index 080ca92a3b..f984d9d938 100644 --- a/rules_complete.toml +++ b/rules_complete.toml @@ -210,7 +210,7 @@ SampleRate = 1 CheckNestedFields = false [[dataset4.rule]] - name = "drop healtchecks" + name = "drop healthchecks" drop = true [[dataset4.rule.condition]] field = "http.route" @@ -242,6 +242,27 @@ SampleRate = 1 AddSampleRateKeyToTrace = true AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + [[dataset4.rule]] + name = "sample traces originating from a service" + # if scope is set to "span", a single span in the trace must match + # *all* of the conditions associated with this rule for the rule to + # apply to the trace. + # + # this is especially helpful when sampling a dataset written to + # by multiple services that call one another in normal operation – + # you can set Scope to 'span' to attribute traces to an origin + # service in a way that would be difficult without it. + Scope = "span" + SampleRate = 5 + [[dataset4.rule.condition]] + field = "service name" + operator = "=" + value = "users" + [[dataset4.rule.condition]] + field = "meta.span_type" + operator = "=" + value = "root" + [[dataset4.rule]] SampleRate = 10 # default when no rules match, if missing defaults to 10 diff --git a/sample/rules.go b/sample/rules.go index 73c9ceb023..7aa1664e77 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -65,93 +65,22 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b }) for _, rule := range s.Config.Rule { - var matched int + var matched bool - for _, condition := range rule.Condition { - span: - for _, span := range trace.GetSpans() { - var match bool - value, exists := span.Data[condition.Field] - if !exists && s.Config.CheckNestedFields { - jsonStr, err := json.Marshal(span.Data) - if err == nil { - result := gjson.Get(string(jsonStr), condition.Field) - if result.Exists() { - value = result.String() - exists = true - } - } - } - - switch exists { - case true: - switch condition.Operator { - case "exists": - match = exists - case "!=": - if comparison, ok := compare(value, condition.Value); ok { - match = comparison != equal - } - case "=": - if comparison, ok := compare(value, condition.Value); ok { - match = comparison == equal - } - case ">": - if comparison, ok := compare(value, condition.Value); ok { - match = comparison == more - } - case ">=": - if comparison, ok := compare(value, condition.Value); ok { - match = comparison == more || comparison == equal - } - case "<": - if comparison, ok := compare(value, condition.Value); ok { - match = comparison == less - } - case "<=": - if comparison, ok := compare(value, condition.Value); ok { - match = comparison == less || comparison == equal - } - case "starts-with": - switch a := value.(type) { - case string: - switch b := condition.Value.(type) { - case string: - match = strings.HasPrefix(a, b) - } - } - case "contains": - switch a := value.(type) { - case string: - switch b := condition.Value.(type) { - case string: - match = strings.Contains(a, b) - } - } - case "does-not-contain": - switch a := value.(type) { - case string: - switch b := condition.Value.(type) { - case string: - match = !strings.Contains(a, b) - } - } - } - case false: - switch condition.Operator { - case "not-exists": - match = !exists - } - } - - if match { - matched++ - break span - } - } + switch rule.Scope { + case "span": + matched = ruleMatchesSpanInTrace(trace, rule, s.Config.CheckNestedFields) + case "trace", "": + matched = ruleMatchesTrace(trace, rule, s.Config.CheckNestedFields) + default: + logger.WithFields(map[string]interface{}{ + "rule_name": rule.Name, + "scope": rule.Scope, + }).Logf("invalid scope %s given for rule: %s", rule.Scope, rule.Name) + matched = true } - if rule.Condition == nil || matched == len(rule.Condition) { + if matched { var rate uint var keep bool @@ -188,6 +117,141 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b return 1, true } +func ruleMatchesTrace(t *types.Trace, rule *config.RulesBasedSamplerRule, checkNestedFields bool) bool { + // We treat a rule with no conditions as a match. + if rule.Condition == nil { + return true + } + + var matched int + + for _, condition := range rule.Condition { + span: + for _, span := range t.GetSpans() { + value, exists := extractValueFromSpan(span, condition, checkNestedFields) + + if conditionMatchesValue(condition, value, exists) { + matched++ + break span + } + } + } + + return matched == len(rule.Condition) +} + +func ruleMatchesSpanInTrace(trace *types.Trace, rule *config.RulesBasedSamplerRule, checkNestedFields bool) bool { + // We treat a rule with no conditions as a match. + if rule.Condition == nil { + return true + } + + for _, span := range trace.GetSpans() { + // the number of conditions that match this span. + // incremented later on after we match a condition + // since we need to match *all* conditions on a single span, we reset in each iteration of the loop. + matchCount := 0 + for _, condition := range rule.Condition { + // whether this condition is matched by this span. + value, exists := extractValueFromSpan(span, condition, checkNestedFields) + + if conditionMatchesValue(condition, value, exists) { + matchCount++ + } + } + // If this span was matched by every condition, then the rule as a whole + // matches (and we can return) + if matchCount == len(rule.Condition) { + return true + } + } + + // if the rule didn't match above, then it doesn't match the trace. + return false +} + +func extractValueFromSpan(span *types.Span, condition *config.RulesBasedSamplerCondition, checkNestedFields bool) (interface{}, bool) { + // whether this condition is matched by this span. + value, exists := span.Data[condition.Field] + if !exists && checkNestedFields { + jsonStr, err := json.Marshal(span.Data) + if err == nil { + result := gjson.Get(string(jsonStr), condition.Field) + if result.Exists() { + value = result.String() + exists = true + } + } + } + + return value, exists +} + +func conditionMatchesValue(condition *config.RulesBasedSamplerCondition, value interface{}, exists bool) bool { + var match bool + switch exists { + case true: + switch condition.Operator { + case "exists": + match = exists + case "!=": + if comparison, ok := compare(value, condition.Value); ok { + match = comparison != equal + } + case "=": + if comparison, ok := compare(value, condition.Value); ok { + match = comparison == equal + } + case ">": + if comparison, ok := compare(value, condition.Value); ok { + match = comparison == more + } + case ">=": + if comparison, ok := compare(value, condition.Value); ok { + match = comparison == more || comparison == equal + } + case "<": + if comparison, ok := compare(value, condition.Value); ok { + match = comparison == less + } + case "<=": + if comparison, ok := compare(value, condition.Value); ok { + match = comparison == less || comparison == equal + } + case "starts-with": + switch a := value.(type) { + case string: + switch b := condition.Value.(type) { + case string: + match = strings.HasPrefix(a, b) + } + } + case "contains": + switch a := value.(type) { + case string: + switch b := condition.Value.(type) { + case string: + match = strings.Contains(a, b) + } + } + case "does-not-contain": + switch a := value.(type) { + case string: + switch b := condition.Value.(type) { + case string: + match = !strings.Contains(a, b) + } + } + } + case false: + switch condition.Operator { + case "not-exists": + match = !exists + } + } + return match +} + const ( less = -1 equal = 0 diff --git a/sample/rules_test.go b/sample/rules_test.go index 553e415a5f..f12e0fd50f 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -835,3 +835,123 @@ func TestRulesWithEMADynamicSampler(t *testing.T) { } } } + +func TestRuleMatchesSpanMatchingSpan(t *testing.T) { + data := []TestRulesData{ + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "Rule to match span", + Scope: "span", + SampleRate: 10, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "rule_test", + Operator: "=", + Value: int64(1), + }, + { + Field: "rule_test_2", + Operator: "=", + Value: int64(2), + }, + }, + }, + }, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "rule_test": int64(1), + "http.status_code": "200", + "rule_test_2": int64(2), + }, + }, + }, + { + Event: types.Event{ + Data: map[string]interface{}{ + "rule_test": int64(1), + "http.status_code": "200", + }, + }, + }, + }, + ExpectedKeep: true, + ExpectedRate: 10, + }, + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "Rule to match span", + Scope: "span", + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "rule_test", + Operator: "=", + Value: int64(1), + }, + { + Field: "rule_test_2", + Operator: "=", + Value: int64(2), + }, + }, + }, + { + Name: "Default rule", + Drop: true, + SampleRate: 1, + }, + }, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "rule_test": int64(1), + "http.status_code": "200", + }, + }, + }, + { + Event: types.Event{ + Data: map[string]interface{}{ + "rule_test_2": int64(2), + "http.status_code": "200", + }, + }, + }, + }, + ExpectedKeep: false, + ExpectedRate: 1, + }, + } + + for _, d := range data { + sampler := &RulesBasedSampler{ + Config: d.Rules, + Logger: &logger.NullLogger{}, + Metrics: &metrics.NullMetrics{}, + } + + trace := &types.Trace{} + + for _, span := range d.Spans { + trace.AddSpan(span) + } + + sampler.Start() + rate, keep := sampler.GetSampleRate(trace) + + assert.Equal(t, d.ExpectedRate, rate, d.Rules) + + // we can only test when we don't expect to keep the trace + if !d.ExpectedKeep { + assert.Equal(t, d.ExpectedKeep, keep, d.Rules) + } + } +} From 3b45d2d3ab5378ee8b4ff274498f0dd7e36be9cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 12 Jun 2022 15:24:12 -0400 Subject: [PATCH 193/351] Bump github.com/prometheus/client_golang from 1.12.1 to 1.12.2 (#463) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.12.1 to 1.12.2. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.12.1...v1.12.2) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0a6950483f..76be5cf440 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.4 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.12.1 + github.com/prometheus/client_golang v1.12.2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.10.1 diff --git a/go.sum b/go.sum index cdc515810a..7de3564a60 100644 --- a/go.sum +++ b/go.sum @@ -241,8 +241,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From da9ff7fe96f6d511595b58af580f2c50d7435da3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 12 Jun 2022 15:34:25 -0400 Subject: [PATCH 194/351] Bump github.com/klauspost/compress from 1.15.4 to 1.15.6 (#466) Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.15.4 to 1.15.6. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.15.4...v1.15.6) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 76be5cf440..8b0488f0a3 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.4 + github.com/klauspost/compress v1.15.6 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 diff --git a/go.sum b/go.sum index 7de3564a60..a2e7001e61 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ= -github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= +github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= From 66274ef0e8960fb84f1480e0c700ade704a50fbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 12 Jun 2022 15:38:06 -0400 Subject: [PATCH 195/351] Bump github.com/honeycombio/husky from 0.10.5 to 0.10.6 (#460) Bumps [github.com/honeycombio/husky](https://github.com/honeycombio/husky) from 0.10.5 to 0.10.6. - [Release notes](https://github.com/honeycombio/husky/releases) - [Changelog](https://github.com/honeycombio/husky/blob/main/CHANGELOG.md) - [Commits](https://github.com/honeycombio/husky/compare/v0.10.5...v0.10.6) --- updated-dependencies: - dependency-name: github.com/honeycombio/husky dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8b0488f0a3..fd8fa7cb01 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.10.5 + github.com/honeycombio/husky v0.10.6 github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 diff --git a/go.sum b/go.sum index a2e7001e61..cbd99c929c 100644 --- a/go.sum +++ b/go.sum @@ -181,8 +181,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.10.5 h1:jzOQJw4FDBKfy1DDwWzFB6lqmUOF4oJNSZelGBlve4A= -github.com/honeycombio/husky v0.10.5/go.mod h1:i69+3xApIcsQn9PPeFRndTOOw6edNG66TxTTd+L6oq0= +github.com/honeycombio/husky v0.10.6 h1:jU/lXqo7Qz6e9eUJErIH3Lst2gjKWSJ4oAXYjFSXkn0= +github.com/honeycombio/husky v0.10.6/go.mod h1:i69+3xApIcsQn9PPeFRndTOOw6edNG66TxTTd+L6oq0= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= From c72c4eaf26bf37be39ac80bc44923c638b4fbe4a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 12 Jun 2022 15:46:16 -0400 Subject: [PATCH 196/351] Bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#467) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.1 to 1.7.2. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.7.1...v1.7.2) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index fd8fa7cb01..6d3899697b 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.10.1 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.7.2 github.com/tidwall/gjson v1.14.1 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 @@ -72,5 +72,5 @@ require ( gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index cbd99c929c..846dd97d55 100644 --- a/go.sum +++ b/go.sum @@ -287,8 +287,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= @@ -613,8 +613,8 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 8b4789057f526e799537e0536471c34d4e86a127 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Tue, 14 Jun 2022 23:27:51 +0100 Subject: [PATCH 197/351] Validate successful span scoped rules test (#465) --- sample/rules_test.go | 144 +++++++++++++++++++------------------------ 1 file changed, 65 insertions(+), 79 deletions(-) diff --git a/sample/rules_test.go b/sample/rules_test.go index f12e0fd50f..2f7183f1bb 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -1,6 +1,3 @@ -//go:build all || race -// +build all race - package sample import ( @@ -837,121 +834,110 @@ func TestRulesWithEMADynamicSampler(t *testing.T) { } func TestRuleMatchesSpanMatchingSpan(t *testing.T) { - data := []TestRulesData{ + testCases := []struct { + name string + spans []*types.Span + keepSpanScope bool + keepTraceScope bool + }{ { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "Rule to match span", - Scope: "span", - SampleRate: 10, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "rule_test", - Operator: "=", - Value: int64(1), - }, - { - Field: "rule_test_2", - Operator: "=", - Value: int64(2), - }, - }, - }, - }, - }, - Spans: []*types.Span{ + name: "all conditions match single span", + keepSpanScope: true, + keepTraceScope: true, + spans: []*types.Span{ { Event: types.Event{ Data: map[string]interface{}{ "rule_test": int64(1), "http.status_code": "200", - "rule_test_2": int64(2), }, }, }, { Event: types.Event{ Data: map[string]interface{}{ - "rule_test": int64(1), - "http.status_code": "200", + "rule_test": int64(5), + "http.status_code": "500", }, }, }, }, - ExpectedKeep: true, - ExpectedRate: 10, }, { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "Rule to match span", - Scope: "span", - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "rule_test", - Operator: "=", - Value: int64(1), - }, - { - Field: "rule_test_2", - Operator: "=", - Value: int64(2), - }, - }, - }, - { - Name: "Default rule", - Drop: true, - SampleRate: 1, - }, - }, - }, - Spans: []*types.Span{ + name: "all conditions do not match single span", + keepSpanScope: false, + keepTraceScope: true, + spans: []*types.Span{ { Event: types.Event{ Data: map[string]interface{}{ "rule_test": int64(1), - "http.status_code": "200", + "http.status_code": "500", }, }, }, { Event: types.Event{ Data: map[string]interface{}{ - "rule_test_2": int64(2), + "rule_test": int64(5), "http.status_code": "200", }, }, }, }, - ExpectedKeep: false, - ExpectedRate: 1, }, } - for _, d := range data { - sampler := &RulesBasedSampler{ - Config: d.Rules, - Logger: &logger.NullLogger{}, - Metrics: &metrics.NullMetrics{}, - } - - trace := &types.Trace{} + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, scope := range []string{"span", "trace"} { + sampler := &RulesBasedSampler{ + Config: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "Rule to match span", + Scope: scope, + SampleRate: 1, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "rule_test", + Operator: "=", + Value: int64(1), + }, + { + Field: "http.status_code", + Operator: "=", + Value: "200", + }, + }, + }, + { + Name: "Default rule", + Drop: true, + SampleRate: 1, + }, + }, + }, + Logger: &logger.NullLogger{}, + Metrics: &metrics.NullMetrics{}, + } - for _, span := range d.Spans { - trace.AddSpan(span) - } + trace := &types.Trace{} - sampler.Start() - rate, keep := sampler.GetSampleRate(trace) + for _, span := range tc.spans { + trace.AddSpan(span) + } - assert.Equal(t, d.ExpectedRate, rate, d.Rules) + sampler.Start() + rate, keep := sampler.GetSampleRate(trace) - // we can only test when we don't expect to keep the trace - if !d.ExpectedKeep { - assert.Equal(t, d.ExpectedKeep, keep, d.Rules) - } + assert.Equal(t, uint(1), rate, rate) + if scope == "span" { + assert.Equal(t, tc.keepSpanScope, keep, keep) + } else { + assert.Equal(t, tc.keepTraceScope, keep, keep) + } + } + }) } } From c5e4fc6e92b6b19ef85ee327a6b115aad2e2ae44 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Jul 2022 11:00:20 +0100 Subject: [PATCH 198/351] Bump github.com/spf13/viper from 1.10.1 to 1.12.0 (#461) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.10.1 to 1.12.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.10.1...v1.12.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 25 ++++++------ go.sum | 125 +++++++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 104 insertions(+), 46 deletions(-) diff --git a/go.mod b/go.mod index 6d3899697b..f84dd6df7c 100644 --- a/go.mod +++ b/go.mod @@ -22,13 +22,13 @@ require ( github.com/prometheus/client_golang v1.12.2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 - github.com/spf13/viper v1.10.1 + github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.7.2 github.com/tidwall/gjson v1.14.1 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 - golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect - google.golang.org/grpc v1.46.0 + golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect + google.golang.org/grpc v1.46.2 gopkg.in/alexcesaro/statsd.v2 v2.0.0 ) @@ -44,33 +44,34 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect - github.com/magiconair/properties v1.8.5 // indirect + github.com/magiconair/properties v1.8.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/spf13/afero v1.6.0 // indirect - github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/subosito/gotenv v1.2.0 // indirect + github.com/subosito/gotenv v1.3.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect - gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 846dd97d55..b60d4e9b71 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -13,6 +14,9 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -30,12 +34,12 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -47,7 +51,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -56,9 +59,9 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -69,8 +72,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -92,6 +95,7 @@ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 h1:KnnwHN59Jx github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691/go.mod h1:sKLL1iua/0etWfo/nPCmyz+v2XDMXy+Ho53W7RAuZNY= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -151,13 +155,15 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -165,10 +171,14 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -186,6 +196,7 @@ github.com/honeycombio/husky v0.10.6/go.mod h1:i69+3xApIcsQn9PPeFRndTOOw6edNG66T github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -206,19 +217,19 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -228,13 +239,15 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -263,22 +276,22 @@ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= -github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -287,10 +300,11 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -308,11 +322,13 @@ github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= @@ -320,9 +336,10 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -345,6 +362,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -353,6 +371,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -382,15 +402,23 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8= -golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -400,6 +428,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -433,24 +462,32 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= @@ -498,10 +535,16 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -519,6 +562,9 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -557,8 +603,15 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -571,12 +624,16 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -602,8 +659,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= -gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From b1488e827f3eff689e18625cad832bbea83615d7 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Fri, 1 Jul 2022 17:09:48 +0100 Subject: [PATCH 199/351] Prepare 1.15.0 release (#470) Co-authored-by: Vera Reynolds --- CHANGELOG.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab6dbb10c1..4d27e57b9f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Refinery Changelog +## 1.15.0 2022-07-01 + +### Enhancements + +- Add rule Scope configuration option to rules-based sampler (#440) | [isnotajoke](https://github.com/isnotajoke) +- Replace hand-rolled binary.BigEndian.Uint32 with the real deal (#459) | [toshok](https://github.com/toshok) +- Validate successful span scoped rules test (#465) | [MikeGoldsmith](https://github.com/MikeGoldsmith) +- Create helm-chart issue on release (#458) | [MikeGoldsmith](https://github.com/MikeGoldsmith) +- github_token needs underscore not hyphen (#464) | [@JamieDanielson](https://github.com/JamieDanielson) + +### Maintenance + +- Replace legacy with classic in readme (#457) | [MikeGoldsmith](https://github.com/MikeGoldsmith) + +### Dependencies + +- Bump github.com/spf13/viper from 1.10.1 to 1.12.0 (#461) +- Bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#467) +- Bump github.com/honeycombio/husky from 0.10.5 to 0.10.6 (#460) +- Bump github.com/klauspost/compress from 1.15.4 to 1.15.6 (#466) +- Bump github.com/prometheus/client_golang from 1.12.1 to 1.12.2 (#463) + ## 1.14.1 2022-05-16 ### Fixes From 83c1812a55271531d986b6fc6cbfd80358687ff6 Mon Sep 17 00:00:00 2001 From: Jamie Danielson Date: Wed, 6 Jul 2022 16:30:24 -0400 Subject: [PATCH 200/351] maint: change title on release workflow from (unset) var to hard-coded string (#474) Error in previous workflow that attempted to create an issue states "Input required and not supplied: title". While the title does match that of the example provided in the actions repo, it is based on a variable that is set in the example that we do not have set. I believe this is causing a blank title which is not allowed, as the title value is required. This PR changes the title from a variable that is unset ({ steps.date.outputs.today }}) to a hard-coded string (Bump Refinery to Latest Version). --- .github/workflows/release.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4c5402e731..f0ea3eedc4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,6 +2,7 @@ name: Create helm chart issue on release on: release: types: [published] + workflow_dispatch: jobs: create_issue: runs-on: ubuntu-latest @@ -11,7 +12,7 @@ jobs: with: github_token: ${{ secrets.GITHUB_TOKEN }} repo: github.com/honeycombio/helm-charts - title: ${{ steps.date.outputs.today }} + title: Bump Refinery to Latest Version body: | ## Bump Refinery From 89fba624e972c38dc1f934d9701c6774ab3a95ca Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Tue, 12 Jul 2022 13:26:34 -0400 Subject: [PATCH 201/351] maint: update GH action token (#477) GITHUB_TOKEN is a limited token with only enough permission to modify current repo --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f0ea3eedc4..1b3390a769 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,7 +10,7 @@ jobs: - name: Create an issue uses: actions-ecosystem/action-create-issue@v1 with: - github_token: ${{ secrets.GITHUB_TOKEN }} + github_token: ${{ secrets.GHPROJECTS_TOKEN }} repo: github.com/honeycombio/helm-charts title: Bump Refinery to Latest Version body: | From 3f40eb04be6e1d1977477c5fe035b2b3c22a855f Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Tue, 12 Jul 2022 15:20:51 -0400 Subject: [PATCH 202/351] maint: attempt to fix release action (#478) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1b3390a769..9174d7b661 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -11,7 +11,7 @@ jobs: uses: actions-ecosystem/action-create-issue@v1 with: github_token: ${{ secrets.GHPROJECTS_TOKEN }} - repo: github.com/honeycombio/helm-charts + repo: honeycombio/helm-charts title: Bump Refinery to Latest Version body: | ## Bump Refinery From 6c0a1d9a9b2661244686920c5a728df4c6a0e254 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 17 Jul 2022 14:51:20 -0400 Subject: [PATCH 203/351] Bump github.com/stretchr/testify from 1.7.2 to 1.8.0 (#472) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.2 to 1.8.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.7.2...v1.8.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f84dd6df7c..b50cdb0998 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.8.0 github.com/tidwall/gjson v1.14.1 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 diff --git a/go.sum b/go.sum index b60d4e9b71..f72c697672 100644 --- a/go.sum +++ b/go.sum @@ -294,6 +294,7 @@ github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -301,8 +302,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= From 68a2efd5ef83b0abd888fbee47323123bacb4cf9 Mon Sep 17 00:00:00 2001 From: sujitha-pallapothu Date: Tue, 26 Jul 2022 17:00:40 +0530 Subject: [PATCH 204/351] app/app_test.go change --- app/app_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/app/app_test.go b/app/app_test.go index 6dbb353fc6..b9747a1e5d 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/assert" "gopkg.in/alexcesaro/statsd.v2" - "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" "github.com/jirs5/tracing-proxy/collect" "github.com/jirs5/tracing-proxy/config" @@ -115,7 +114,7 @@ func newStartedApp( GetListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort), GetPeerListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort+1), GetAPIKeysVal: []string{"KEY"}, - GetHoneycombAPIVal: "http://jirs5", + GetOpsrampAPIVal: "http://jirs5", GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{CacheCapacity: 10000}, AddHostMetadataToTrace: enableHostMetadata, } From 49ce1c9fae6f723b44c55465af5f60fdba20af16 Mon Sep 17 00:00:00 2001 From: sujitha-pallapothu Date: Tue, 26 Jul 2022 17:29:12 +0530 Subject: [PATCH 205/351] traceproxy cleanup --- cmd/tracing-proxy/main.go | 4 ++-- config.toml | 4 ++-- config/config.go | 4 ++-- config/config_test.go | 26 ++++++++++++------------- config/config_test_reload_error_test.go | 2 +- config/file_config.go | 8 ++++---- config/mock.go | 8 ++++---- config_complete.toml | 12 ++++++------ deployment/kubernetes/k8s-rules-cm.yaml | 6 +++--- go.mod | 4 ++-- go.sum | 11 ++++------- route/otlp_trace.go | 23 ++++++++++++---------- route/proxy.go | 2 +- route/route.go | 4 ++-- transmit/transmit.go | 10 +++++----- 15 files changed, 64 insertions(+), 64 deletions(-) diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 67a4ae309d..4b82507ced 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -132,7 +132,7 @@ func main() { userAgentAddition := "tracing-proxy/" + version upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{ - Transmission: &transmission.Honeycomb{ + Transmission: &transmission.Opsramptraceproxy{ MaxBatchSize: c.GetMaxBatchSize(), BatchTimeout: libtrace.DefaultBatchTimeout, MaxConcurrentBatches: libtrace.DefaultMaxConcurrentBatches, @@ -154,7 +154,7 @@ func main() { fmt.Println("upstream client created..") peerClient, err := libtrace.NewClient(libtrace.ClientConfig{ - Transmission: &transmission.Honeycomb{ + Transmission: &transmission.Opsramptraceproxy{ MaxBatchSize: c.GetMaxBatchSize(), BatchTimeout: libtrace.DefaultBatchTimeout, MaxConcurrentBatches: libtrace.DefaultMaxConcurrentBatches, diff --git a/config.toml b/config.toml index a89bb78452..ca5d708fa2 100644 --- a/config.toml +++ b/config.toml @@ -19,9 +19,9 @@ CacheCapacity = 1000 [HoneycombMetrics] -# MetricsHoneycombAPI is the URL for the upstream Honeycomb API. +# MetricsOpsrampAPI is the URL for the upstream Honeycomb API. # Eligible for live reload. -MetricsHoneycombAPI = "https://api.jirs5" +MetricsOpsrampAPI = "https://api.jirs5" # MetricsAPIKey is the API key to use to send log events to the Honeycomb logging # dataset. This is separate from the APIKeys used to authenticate regular diff --git a/config/config.go b/config/config.go index 05449935e3..5d60cbd81b 100644 --- a/config/config.go +++ b/config/config.go @@ -60,9 +60,9 @@ type Config interface { // UseTLSInsecure returns true when certificate checks are disabled GetUseTLSInsecure() (bool, error) - // GetHoneycombAPI returns the base URL (protocol, hostname, and port) of + // GetOpsrampAPI returns the base URL (protocol, hostname, and port) of // the upstream Honeycomb API server - GetHoneycombAPI() (string, error) + GetOpsrampAPI() (string, error) // GetLoggingLevel returns the verbosity with which we should log GetLoggingLevel() (string, error) diff --git a/config/config_test.go b/config/config_test.go index cacf9eaf5d..4dad95f787 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -63,7 +63,7 @@ func TestReload(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -242,7 +242,7 @@ func TestPeerManagementType(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -276,7 +276,7 @@ func TestAbsentTraceKeyField(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -318,7 +318,7 @@ func TestDebugServiceAddr(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -345,7 +345,7 @@ func TestDryRun(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -383,7 +383,7 @@ func TestMaxAlloc(t *testing.T) { MaxAlloc=17179869184 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -411,7 +411,7 @@ func TestGetSamplerTypes(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -502,7 +502,7 @@ func TestDefaultSampler(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 @@ -539,13 +539,13 @@ func TestHoneycombLoggerConfig(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 [HoneycombLogger] - LoggerHoneycombAPI="http://jirs5" + LoggerOpsrampAPI="http://jirs5" LoggerAPIKey="1234" LoggerDataset="loggerDataset" LoggerSamplerEnabled=true @@ -564,7 +564,7 @@ func TestHoneycombLoggerConfig(t *testing.T) { assert.NoError(t, err) - assert.Equal(t, "http://jirs5", loggerConfig.LoggerHoneycombAPI) + assert.Equal(t, "http://jirs5", loggerConfig.LoggerOpsrampAPI) assert.Equal(t, "1234", loggerConfig.LoggerAPIKey) assert.Equal(t, "loggerDataset", loggerConfig.LoggerDataset) assert.Equal(t, true, loggerConfig.LoggerSamplerEnabled) @@ -587,13 +587,13 @@ func TestHoneycombLoggerConfigDefaults(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 [HoneycombLogger] - LoggerHoneycombAPI="http://jirs5" + LoggerOpsrampAPI="http://jirs5" LoggerAPIKey="1234" LoggerDataset="loggerDataset" `) diff --git a/config/config_test_reload_error_test.go b/config/config_test_reload_error_test.go index ecddf17932..d443bbbb4f 100644 --- a/config/config_test_reload_error_test.go +++ b/config/config_test_reload_error_test.go @@ -29,7 +29,7 @@ func TestErrorReloading(t *testing.T) { CacheCapacity=1000 [HoneycombMetrics] - MetricsHoneycombAPI="http://jirs5" + MetricsOpsrampAPI="http://jirs5" MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 diff --git a/config/file_config.go b/config/file_config.go index 47cce52111..e679b3c50d 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -31,7 +31,7 @@ type configContents struct { GRPCListenAddr string GRPCPeerListenAddr string APIKeys []string `validate:"required"` - HoneycombAPI string `validate:"required,url"` + OpsrampAPI string `validate:"required,url"` LoggingLevel string `validate:"required"` Collector string `validate:"required,oneof= InMemCollector"` Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"` @@ -112,7 +112,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerManagement.UseTLS", false) c.SetDefault("PeerManagement.UseTLSInsecure", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) - c.SetDefault("HoneycombAPI", "https://api.jirs5") + c.SetDefault("OpsrampAPI", "https://api.jirs5") c.SetDefault("LoggingLevel", "debug") c.SetDefault("Collector", "InMemCollector") c.SetDefault("SendDelay", 2*time.Second) @@ -447,11 +447,11 @@ func (f *fileConfig) GetRedisIdentifier() (string, error) { return f.config.GetString("PeerManagement.RedisIdentifier"), nil } -func (f *fileConfig) GetHoneycombAPI() (string, error) { +func (f *fileConfig) GetOpsrampAPI() (string, error) { f.mux.RLock() defer f.mux.RUnlock() - return f.conf.HoneycombAPI, nil + return f.conf.OpsrampAPI, nil } func (f *fileConfig) GetLoggingLevel() (string, error) { diff --git a/config/mock.go b/config/mock.go index 55219e355e..41fd85fb47 100644 --- a/config/mock.go +++ b/config/mock.go @@ -16,8 +16,8 @@ type MockConfig struct { GetCollectorTypeVal string GetInMemoryCollectorCacheCapacityErr error GetInMemoryCollectorCacheCapacityVal InMemoryCollectorCacheCapacity - GetHoneycombAPIErr error - GetHoneycombAPIVal string + GetOpsrampAPIErr error + GetOpsrampAPIVal string GetListenAddrErr error GetListenAddrVal string GetPeerListenAddrErr error @@ -99,11 +99,11 @@ func (m *MockConfig) GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCap return m.GetInMemoryCollectorCacheCapacityVal, m.GetInMemoryCollectorCacheCapacityErr } -func (m *MockConfig) GetHoneycombAPI() (string, error) { +func (m *MockConfig) GetOpsrampAPI() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() - return m.GetHoneycombAPIVal, m.GetHoneycombAPIErr + return m.GetOpsrampAPIVal, m.GetOpsrampAPIErr } func (m *MockConfig) GetListenAddr() (string, error) { m.Mux.RLock() diff --git a/config_complete.toml b/config_complete.toml index 646d97d0fc..a239d52a1a 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -33,8 +33,8 @@ GRPCPeerListenAddr = "0.0.0.0:8084" # utilization over data transfer costs. CompressPeerCommunication = true -# APIKeys is a list of Honeycomb API keys that the proxy will accept. This list -# only applies to events - other Honeycomb API actions will fall through to the +# APIKeys is a list of Opsramp API keys that the proxy will accept. This list +# only applies to events - other Opsramp API actions will fall through to the # upstream API directly. # Adding keys here causes events arriving with API keys not in this list to be # rejected with an HTTP 401 error If an API key that is a literal '*' is in the @@ -46,10 +46,10 @@ APIKeys = [ "*", # wildcard accept all keys ] -# HoneycombAPI is the URL for the upstream Honeycomb API. +# OpsrampAPI is the URL for the upstream Opsramp API. # Eligible for live reload. -#HoneycombAPI = "localhost:50052" -HoneycombAPI = "https://asura.opsramp.net" +#OpsrampAPI = "localhost:50052" +OpsrampAPI = "https://asura.opsramp.net" #Tls Options UseTls = true @@ -220,7 +220,7 @@ MaxAlloc = 0 # LogrusLogger is a section of the config only used if you are using the # LogrusLogger to send all logs to STDOUT using the logrus package. If you are -# using a different logger (eg honeycomb logger) you can leave all this +# using a different logger (eg Opsramp logger) you can leave all this # commented out. [LogrusLogger] diff --git a/deployment/kubernetes/k8s-rules-cm.yaml b/deployment/kubernetes/k8s-rules-cm.yaml index 4f4276e02b..463cc6bc6f 100644 --- a/deployment/kubernetes/k8s-rules-cm.yaml +++ b/deployment/kubernetes/k8s-rules-cm.yaml @@ -47,7 +47,7 @@ data: # trace and uses them to form a key. This key is handed to the standard dynamic # sampler algorithm which generates a sample rate based on the frequency with # which that key has appeared in the previous ClearFrequencySec seconds. See - # https://github.com/honeycombio/dynsampler-go for more detail on the mechanics + # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from # that package. Sampler = "DynamicSampler" @@ -80,7 +80,7 @@ data: # sampler as part of the key. The number of spans is exact, so if there are # normally small variations in trace length you may want to leave this off. If # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Honeycomb, set this to true. + # indicator of traces you'd like to see in Opsramp, set this to true. # Eligible for live reload. UseTraceLength = true @@ -154,7 +154,7 @@ data: # sampler as part of the key. The number of spans is exact, so if there are # normally small variations in trace length you may want to leave this off. If # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Honeycomb, set this to true. + # indicator of traces you'd like to see in Opsramp, set this to true. # Eligible for live reload. UseTraceLength = true diff --git a/go.mod b/go.mod index fc1003dd32..2a198d8cb4 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20220505064121-8635e7c10a91 +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20220616111622-198db7b29be0 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 -replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4 +replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df diff --git a/go.sum b/go.sum index 112d4b5444..f970e070a2 100644 --- a/go.sum +++ b/go.sum @@ -272,10 +272,10 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4 h1:YmBfU5uzwUZdYTvI60Vn+5JNaIaw6611td3Ltu8A0sQ= -github.com/jirs5/husky v0.9.1-0.20220412060429-354fd2b490b4/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v1.15.9-0.20220505064121-8635e7c10a91 h1:iJj3Jcbt854dE5EC6iRO78dMLcG/jLiVonhM/tV9zXo= -github.com/jirs5/libtrace-go v1.15.9-0.20220505064121-8635e7c10a91/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= +github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df h1:vN66WfIFppi2IVEIp00wnmgBbvM6Jd6oT+WN5ChdUnQ= +github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= +github.com/jirs5/libtrace-go v1.15.9-0.20220616111622-198db7b29be0 h1:gA2frU8jEl4y1LngSFyy4GQrUPLgad4pI6uU8r4RBnE= +github.com/jirs5/libtrace-go v1.15.9-0.20220616111622-198db7b29be0/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -380,8 +380,6 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= -github.com/saikalyan-bhagavathula/libtrace-go v1.15.9-0.20220425111856-8b2ee16c2883 h1:xMC53EqdEypIeoZJ7tRfLzB9jaSUIQ71RTyadZlsKlM= -github.com/saikalyan-bhagavathula/libtrace-go v1.15.9-0.20220425111856-8b2ee16c2883/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -449,7 +447,6 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 8407bb2715..239e08ec6f 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -35,7 +35,7 @@ func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { token := ri.ApiToken tenantId := ri.ApiTenantId - if err := processTraceRequest(req.Context(), router, result.Batches, ri.ApiKey, ri.Dataset, token, tenantId); err != nil { + if err := processTraceRequest(req.Context(), router, result.Batches, ri.Dataset, token, tenantId); err != nil { router.handlerReturnWithError(w, ErrUpstreamFailed, err) } } @@ -45,8 +45,10 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac /*if err := ri.ValidateHeaders(); err != nil { return nil, huskyotlp.AsGRPCError(err) }*/ + router.Metrics.Increment(router.incomingOrPeer + "_router_batch") fmt.Println("Translating Trace Req ..") result, err := huskyotlp.TranslateTraceReq(req, ri) + //fmt.Println("req",result.Batches[0]) if err != nil { return nil, huskyotlp.AsGRPCError(err) } @@ -55,8 +57,9 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac fmt.Println("Token:", token) fmt.Println("TenantId:", tenantId) + fmt.Println("dataset:", ri.Dataset) - if err := processTraceRequest(ctx, router, result.Batches, ri.ApiKey, ri.Dataset, token, tenantId); err != nil { + if err := processTraceRequest(ctx, router, result.Batches, ri.Dataset, token, tenantId); err != nil { return nil, huskyotlp.AsGRPCError(err) } @@ -67,24 +70,23 @@ func processTraceRequest( ctx context.Context, router *Router, batches []huskyotlp.Batch, - apiKey string, datasetName string, token string, tenantId string) error { var requestID types.RequestIDContextKey - apiHost, err := router.Config.GetHoneycombAPI() + apiHost, err := router.Config.GetOpsrampAPI() if err != nil { router.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") return err } + //fmt.Println("datasetName",datasetName) for _, batch := range batches { for _, ev := range batch.Events { event := &types.Event{ Context: ctx, APIHost: apiHost, - APIKey: apiKey, APIToken: token, APITenantId: tenantId, Dataset: datasetName, @@ -104,9 +106,10 @@ func processTraceRequest( func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTraceProxyServiceRequest) (*proxypb.ExportTraceProxyServiceResponse, error) { fmt.Println("Received Trace data from peer \n") + r.Metrics.Increment(r.incomingOrPeer + "_router_batch") var token, tenantId, datasetName string - apiHost, err := r.Config.GetHoneycombAPI() + apiHost, err := r.Config.GetOpsrampAPI() if err != nil { r.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get apihost", Status: "Failed"}, nil @@ -167,10 +170,10 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr data["endTime"] = item.Data.EndTime event := &types.Event{ - Context: ctx, - APIHost: apiHost, - APIToken: token, - APIKey: "token", //Hardcoded for time-being. This need to be cleaned + Context: ctx, + APIHost: apiHost, + APIToken: token, + //APIKey: "token", //Hardcoded for time-being. This need to be cleaned APITenantId: tenantId, Dataset: datasetName, Timestamp: timestamp, diff --git a/route/proxy.go b/route/proxy.go index 354acd2571..e6ca01537c 100644 --- a/route/proxy.go +++ b/route/proxy.go @@ -14,7 +14,7 @@ import ( func (r *Router) proxy(w http.ResponseWriter, req *http.Request) { r.Metrics.Increment(r.incomingOrPeer + "_router_proxied") r.Logger.Debug().Logf("proxying request for %s", req.URL.Path) - upstreamTarget, err := r.Config.GetHoneycombAPI() + upstreamTarget, err := r.Config.GetOpsrampAPI() if err != nil { w.WriteHeader(http.StatusServiceUnavailable) io.WriteString(w, `{"error":"upstream target unavailable"}`) diff --git a/route/route.go b/route/route.go index ffba623582..b27fe83e26 100644 --- a/route/route.go +++ b/route/route.go @@ -340,7 +340,7 @@ func (r *Router) requestToEvent(req *http.Request, reqBod []byte) (*types.Event, vars := mux.Vars(req) dataset := vars["datasetName"] - apiHost, err := r.Config.GetHoneycombAPI() + apiHost, err := r.Config.GetOpsrampAPI() if err != nil { return nil, err } @@ -541,7 +541,7 @@ func (r *Router) batchedEventToEvent(req *http.Request, bev batchedEvent) (*type // once for the entire batch instead of in every event. vars := mux.Vars(req) dataset := vars["datasetName"] - apiHost, err := r.Config.GetHoneycombAPI() + apiHost, err := r.Config.GetOpsrampAPI() if err != nil { return nil, err } diff --git a/transmit/transmit.go b/transmit/transmit.go index b8ae6cbbba..6f80885736 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -15,7 +15,7 @@ import ( ) type Transmission interface { - // Enqueue accepts a single event and schedules it for transmission to Honeycomb + // Enqueue accepts a single event and schedules it for transmission to Opsramp EnqueueEvent(ev *types.Event) EnqueueSpan(ev *types.Span) // Flush flushes the in-flight queue of all events and spans @@ -50,7 +50,7 @@ func (d *DefaultTransmission) Start() error { // upstreamAPI doesn't get set when the client is initialized, because // it can be reloaded from the config file while live - upstreamAPI, err := d.Config.GetHoneycombAPI() + upstreamAPI, err := d.Config.GetOpsrampAPI() if err != nil { return err } @@ -84,10 +84,10 @@ func (d *DefaultTransmission) Start() error { func (d *DefaultTransmission) reloadTransmissionBuilder() { d.Logger.Debug().Logf("reloading transmission config") - upstreamAPI, err := d.Config.GetHoneycombAPI() + upstreamAPI, err := d.Config.GetOpsrampAPI() if err != nil { // log and skip reload - d.Logger.Error().Logf("Failed to reload Honeycomb API when reloading configs:", err) + d.Logger.Error().Logf("Failed to reload Opsramp API when reloading configs:", err) } builder := d.LibhClient.NewBuilder() builder.APIHost = upstreamAPI @@ -101,7 +101,7 @@ func (d *DefaultTransmission) EnqueueEvent(ev *types.Event) { Logf("transmit sending event") libhEv := d.builder.NewEvent() libhEv.APIHost = ev.APIHost - libhEv.WriteKey = ev.APIKey + //libhEv.WriteKey = ev.APIKey libhEv.Dataset = ev.Dataset libhEv.SampleRate = ev.SampleRate libhEv.Timestamp = ev.Timestamp From 4d071d77f3098b6d8e78c29888580e4377afdcc2 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Tue, 26 Jul 2022 15:02:22 -0400 Subject: [PATCH 206/351] Update Husky and other dependencies (#481) ## Which problem is this PR solving? - Bumps Husky to v0.12, which also forces otlp and grpc upgrades - Bumps compress to latest ## Short description of the changes - Upgrade github.com/klauspost/compress from v1.15.6 to v1.15.8 (closes #471) - Upgrade go.opentelemetry.io/proto/otlp from v0.11.0 to v0.18.0 (closes #469) - Upgrade google.golang.org/grpc to from v1.46.0 to v1.48.0 (closes #462) - Upgrade github.com/honeycombio/husky from v0.11.0 to v0.12.0 --- go.mod | 15 ++++++++------- go.sum | 29 ++++++++++++++++++++--------- route/otlp_trace_test.go | 20 ++++++++++---------- route/route.go | 3 ++- 4 files changed, 40 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index b50cdb0998..a8305d9a81 100644 --- a/go.mod +++ b/go.mod @@ -8,16 +8,16 @@ require ( github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d github.com/fsnotify/fsnotify v1.5.4 github.com/go-playground/validator v9.31.0+incompatible - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.2 // indirect github.com/gomodule/redigo v1.8.8 github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.10.6 + github.com/honeycombio/husky v0.12.0 github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.6 + github.com/klauspost/compress v1.15.8 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 @@ -26,12 +26,14 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tidwall/gjson v1.14.1 github.com/vmihailenco/msgpack/v4 v4.3.11 - go.opentelemetry.io/proto/otlp v0.11.0 + go.opentelemetry.io/proto/otlp v0.18.0 golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect - google.golang.org/grpc v1.46.2 + google.golang.org/grpc v1.48.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 ) +require google.golang.org/protobuf v1.28.0 + require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect @@ -41,7 +43,7 @@ require ( github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect github.com/go-playground/locales v0.13.0 // indirect github.com/go-playground/universal-translator v0.17.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect @@ -69,7 +71,6 @@ require ( golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index f72c697672..ce18d4ca7c 100644 --- a/go.sum +++ b/go.sum @@ -40,6 +40,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -51,6 +52,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -62,6 +64,7 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -74,6 +77,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -117,6 +121,8 @@ github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+Pugk github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -181,8 +187,9 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -191,8 +198,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.10.6 h1:jU/lXqo7Qz6e9eUJErIH3Lst2gjKWSJ4oAXYjFSXkn0= -github.com/honeycombio/husky v0.10.6/go.mod h1:i69+3xApIcsQn9PPeFRndTOOw6edNG66TxTTd+L6oq0= +github.com/honeycombio/husky v0.12.0 h1:LjJB7czXUE5AXlXPj+X7eX2gybDpUrhwAtAGmC4iQmA= +github.com/honeycombio/husky v0.12.0/go.mod h1:Wiu2MyV4WimeYzauaMsuSa4O24Jx/JgED+xVDiiUiQ4= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -211,8 +218,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= -github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA= +github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -282,6 +289,7 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= @@ -331,8 +339,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= +go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80= +go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -421,6 +429,7 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -611,6 +620,7 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -631,10 +641,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/route/otlp_trace_test.go b/route/otlp_trace_test.go index a3f13eb877..7ab351a023 100644 --- a/route/otlp_trace_test.go +++ b/route/otlp_trace_test.go @@ -11,7 +11,6 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" huskyotlp "github.com/honeycombio/husky/otlp" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/logger" @@ -24,6 +23,7 @@ import ( resource "go.opentelemetry.io/proto/otlp/resource/v1" trace "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) const legacyAPIKey = "***REMOVED***" @@ -67,7 +67,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("span with status", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -83,7 +83,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("span without status", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithoutStatus(), }}, }}, @@ -105,7 +105,7 @@ func TestOTLPHandler(t *testing.T) { spanID := []byte{1, 0, 0, 0, 0} req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: []*trace.Span{{ TraceId: traceID, SpanId: spanID, @@ -153,7 +153,7 @@ func TestOTLPHandler(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: []*trace.Span{{ Name: "span_with_link", TraceId: traceID, @@ -191,7 +191,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("can receive OTLP over HTTP/protobuf", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -218,7 +218,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("can receive OTLP over HTTP/protobuf with gzip encoding", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -254,7 +254,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("can receive OTLP over HTTP/protobuf with zstd encoding", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -317,7 +317,7 @@ func TestOTLPHandler(t *testing.T) { {Key: "service.name", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "my-service"}}}, }, }, - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: []*trace.Span{{ Name: "my-span", }}, @@ -350,7 +350,7 @@ func TestOTLPHandler(t *testing.T) { {Key: "service.name", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "my-service"}}}, }, }, - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: []*trace.Span{{ Name: "my-span", }}, diff --git a/route/route.go b/route/route.go index 93f74e4155..4f8e59cff9 100644 --- a/route/route.go +++ b/route/route.go @@ -233,7 +233,8 @@ func (r *Router) LnS(incomingOrPeer string) { } func (r *Router) Stop() error { - ctx, _ := context.WithTimeout(context.Background(), time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() err := r.server.Shutdown(ctx) if err != nil { return err From fe02075b8325e643b6e26cc68d4c8a5e2af0919b Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 27 Jul 2022 11:21:55 -0400 Subject: [PATCH 207/351] Revert "Update Husky and other dependencies (#481)" (#482) This reverts commit 4d071d77f3098b6d8e78c29888580e4377afdcc2. Reverts honeycombio/refinery#481. Because AWS is sending data using OTLP 0.7.0, we cannot upgrade refinery to 0.18 yet. We're going to have to fork the proto definition so that we can handle both; that work is going to be done in Husky and imported here. --- go.mod | 15 +++++++-------- go.sum | 29 +++++++++-------------------- route/otlp_trace_test.go | 20 ++++++++++---------- route/route.go | 3 +-- 4 files changed, 27 insertions(+), 40 deletions(-) diff --git a/go.mod b/go.mod index a8305d9a81..b50cdb0998 100644 --- a/go.mod +++ b/go.mod @@ -8,16 +8,16 @@ require ( github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d github.com/fsnotify/fsnotify v1.5.4 github.com/go-playground/validator v9.31.0+incompatible - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.2 github.com/gomodule/redigo v1.8.8 github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.12.0 + github.com/honeycombio/husky v0.10.6 github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.8 + github.com/klauspost/compress v1.15.6 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 @@ -26,14 +26,12 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tidwall/gjson v1.14.1 github.com/vmihailenco/msgpack/v4 v4.3.11 - go.opentelemetry.io/proto/otlp v0.18.0 + go.opentelemetry.io/proto/otlp v0.11.0 golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect - google.golang.org/grpc v1.48.0 + google.golang.org/grpc v1.46.2 gopkg.in/alexcesaro/statsd.v2 v2.0.0 ) -require google.golang.org/protobuf v1.28.0 - require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect @@ -43,7 +41,7 @@ require ( github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect github.com/go-playground/locales v0.13.0 // indirect github.com/go-playground/universal-translator v0.17.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect @@ -71,6 +69,7 @@ require ( golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect + google.golang.org/protobuf v1.28.0 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index ce18d4ca7c..f72c697672 100644 --- a/go.sum +++ b/go.sum @@ -40,7 +40,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -52,7 +51,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -64,7 +62,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -77,7 +74,6 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -121,8 +117,6 @@ github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+Pugk github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -187,9 +181,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -198,8 +191,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.12.0 h1:LjJB7czXUE5AXlXPj+X7eX2gybDpUrhwAtAGmC4iQmA= -github.com/honeycombio/husky v0.12.0/go.mod h1:Wiu2MyV4WimeYzauaMsuSa4O24Jx/JgED+xVDiiUiQ4= +github.com/honeycombio/husky v0.10.6 h1:jU/lXqo7Qz6e9eUJErIH3Lst2gjKWSJ4oAXYjFSXkn0= +github.com/honeycombio/husky v0.10.6/go.mod h1:i69+3xApIcsQn9PPeFRndTOOw6edNG66TxTTd+L6oq0= github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -218,8 +211,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA= -github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= +github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -289,7 +282,6 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= @@ -339,8 +331,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80= -go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -429,7 +421,6 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -620,7 +611,6 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -641,11 +631,10 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/route/otlp_trace_test.go b/route/otlp_trace_test.go index 7ab351a023..a3f13eb877 100644 --- a/route/otlp_trace_test.go +++ b/route/otlp_trace_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/golang/protobuf/proto" huskyotlp "github.com/honeycombio/husky/otlp" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/logger" @@ -23,7 +24,6 @@ import ( resource "go.opentelemetry.io/proto/otlp/resource/v1" trace "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/grpc/metadata" - "google.golang.org/protobuf/proto" ) const legacyAPIKey = "***REMOVED***" @@ -67,7 +67,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("span with status", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - ScopeSpans: []*trace.ScopeSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -83,7 +83,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("span without status", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - ScopeSpans: []*trace.ScopeSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ Spans: helperOTLPRequestSpansWithoutStatus(), }}, }}, @@ -105,7 +105,7 @@ func TestOTLPHandler(t *testing.T) { spanID := []byte{1, 0, 0, 0, 0} req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - ScopeSpans: []*trace.ScopeSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ Spans: []*trace.Span{{ TraceId: traceID, SpanId: spanID, @@ -153,7 +153,7 @@ func TestOTLPHandler(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - ScopeSpans: []*trace.ScopeSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ Spans: []*trace.Span{{ Name: "span_with_link", TraceId: traceID, @@ -191,7 +191,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("can receive OTLP over HTTP/protobuf", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - ScopeSpans: []*trace.ScopeSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -218,7 +218,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("can receive OTLP over HTTP/protobuf with gzip encoding", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - ScopeSpans: []*trace.ScopeSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -254,7 +254,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("can receive OTLP over HTTP/protobuf with zstd encoding", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - ScopeSpans: []*trace.ScopeSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -317,7 +317,7 @@ func TestOTLPHandler(t *testing.T) { {Key: "service.name", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "my-service"}}}, }, }, - ScopeSpans: []*trace.ScopeSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ Spans: []*trace.Span{{ Name: "my-span", }}, @@ -350,7 +350,7 @@ func TestOTLPHandler(t *testing.T) { {Key: "service.name", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "my-service"}}}, }, }, - ScopeSpans: []*trace.ScopeSpans{{ + InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ Spans: []*trace.Span{{ Name: "my-span", }}, diff --git a/route/route.go b/route/route.go index 4f8e59cff9..93f74e4155 100644 --- a/route/route.go +++ b/route/route.go @@ -233,8 +233,7 @@ func (r *Router) LnS(incomingOrPeer string) { } func (r *Router) Stop() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() + ctx, _ := context.WithTimeout(context.Background(), time.Minute) err := r.server.Shutdown(ctx) if err != nil { return err From 3b32b9345a20cd4b31bca440005f0fcae1915945 Mon Sep 17 00:00:00 2001 From: sujitha-pallapothu Date: Thu, 4 Aug 2022 22:40:04 +0530 Subject: [PATCH 208/351] rules_complete.toml --- rules_complete.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rules_complete.toml b/rules_complete.toml index ee27ae4558..be30359fb0 100644 --- a/rules_complete.toml +++ b/rules_complete.toml @@ -32,7 +32,7 @@ SampleRate = 1 # trace and uses them to form a key. This key is handed to the standard dynamic # sampler algorithm which generates a sample rate based on the frequency with # which that key has appeared in the previous ClearFrequencySec seconds. See - # https://github.com/honeycombio/dynsampler-go for more detail on the mechanics + # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from # that package. Sampler = "DynamicSampler" @@ -65,7 +65,7 @@ SampleRate = 1 # sampler as part of the key. The number of spans is exact, so if there are # normally small variations in trace length you may want to leave this off. If # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Honeycomb, set this to true. + # indicator of traces you'd like to see in Opsramp, set this to true. # Eligible for live reload. UseTraceLength = true @@ -139,7 +139,7 @@ SampleRate = 1 # sampler as part of the key. The number of spans is exact, so if there are # normally small variations in trace length you may want to leave this off. If # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Honeycomb, set this to true. + # indicator of traces you'd like to see in Opsramp, set this to true. # Eligible for live reload. UseTraceLength = true From 140faaac21852a5e0975477497dceceecfd28b46 Mon Sep 17 00:00:00 2001 From: sujitha-pallapothu Date: Tue, 23 Aug 2022 16:50:32 +0530 Subject: [PATCH 209/351] proxy support for traces --- config/config.go | 20 ++++++++++++++++++++ config/file_config.go | 37 +++++++++++++++++++++++++++++++++++++ config_complete.toml | 20 ++++++++++++++++++++ transmit/transmit.go | 20 ++++++++++++++++++++ 4 files changed, 97 insertions(+) diff --git a/config/config.go b/config/config.go index 5d60cbd81b..ad453d8627 100644 --- a/config/config.go +++ b/config/config.go @@ -134,4 +134,24 @@ type Config interface { // GetUseTLSInsecureSkip returns false when certificate checks are disabled GetGlobalUseTLSInsecureSkip() bool + + // GetProxyProtocol returns protocol on which to listen for + // proxy traffic + GetProxyProtocol()(string,error ) + + // GetProxyServer returns the address on which to listen for + // proxy traffic + GetProxyServer() (string, error) + + // GetProxyPort returns the port on which to listen for + // proxy traffic + GetProxyPort() (int64) + + // GetProxyUsername returns the username on which to listen for + // proxy traffic + GetProxyUsername()(string,error) + + // GetProxyPassword returns the password of proxy user on which to listen for + // proxy traffic + GetProxyPassword()(string,error) } diff --git a/config/file_config.go b/config/file_config.go index e679b3c50d..14cef339cd 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -50,6 +50,11 @@ type configContents struct { SendMetricsToOpsRamp bool UseTls bool UseTlsInSecure bool + ProxyProtocol string + ProxyServer string + ProxyPort int64 + ProxyUsername string + ProxyPassword string } type InMemoryCollectorCacheCapacity struct { @@ -125,6 +130,12 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("AddHostMetadataToTrace", false) c.SetDefault("SendMetricsToOpsRamp", false) + c.SetDefault("ProxyProtocol","") + c.SetDefault("ProxyServer","" ) + c.SetDefault("ProxyPort",int64(0)) + c.SetDefault("ProxyUsername","") + c.SetDefault("ProxyPassword","") + c.SetConfigFile(config) err := c.ReadInConfig() @@ -412,6 +423,32 @@ func (f *fileConfig) GetRedisPassword() (string, error) { return f.config.GetString("PeerManagement.RedisPassword"), nil } +func (f *fileConfig)GetProxyProtocol()(string,error){ + f.mux.RLock() + defer f.mux.RUnlock() + return f.conf.ProxyProtocol,nil +} +func (f *fileConfig)GetProxyServer()(string,error){ + f.mux.RLock() + defer f.mux.RUnlock() + return f.conf.ProxyServer,nil +} +func (f *fileConfig)GetProxyPort()(int64){ + f.mux.RLock() + defer f.mux.RUnlock() + return f.conf.ProxyPort +} +func (f *fileConfig)GetProxyUsername()(string,error){ + f.mux.RLock() + defer f.mux.RUnlock() + return f.conf.ProxyUsername,nil +} +func (f *fileConfig)GetProxyPassword()(string,error){ + f.mux.RLock() + defer f.mux.RUnlock() + return f.conf.ProxyPassword,nil +} + func (f *fileConfig) GetUseTLS() (bool, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index a239d52a1a..4573bee10a 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -25,6 +25,26 @@ GRPCListenAddr = "0.0.0.0:9090" PeerListenAddr = "0.0.0.0:8083" GRPCPeerListenAddr = "0.0.0.0:8084" +# ProxyProtocol accepts http and https +# Not Eligible for live reload. +ProxyProtocol = "" + +# ProxyServer takes the proxy server address +# Not Eligible for live reload. +ProxyServer= "" + +# ProxyPort takes the proxy server port +# Not Eligible for live reload. +ProxyPort=0 + +# ProxyUserName takes the proxy username +# Not Eligible for live reload. +ProxyUserName = "" + +# ProxyPassword takes the proxy password +# Not Eligible for live reload. +ProxyPassword = "" + # CompressPeerCommunication determines whether refinery will compress span data # it forwards to peers. If it costs money to transmit data between refinery # instances (e.g. they're spread across AWS availability zones), then you diff --git a/transmit/transmit.go b/transmit/transmit.go index 6f80885736..0cd4405f6b 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -2,6 +2,7 @@ package transmit import ( "context" + "fmt" "os" "sync" @@ -77,6 +78,25 @@ func (d *DefaultTransmission) Start() error { d.responseCanceler = canceler go d.processResponses(processCtx, d.LibhClient.TxResponses()) + //proxy support for traces + proto,_:= d.Config.GetProxyProtocol() + server,_ := d.Config.GetProxyServer() + port:= d.Config.GetProxyPort() + username,_ := d.Config.GetProxyUsername() + password,_ := d.Config.GetProxyPassword() + + proxyUrl := "" + if server != "" && proto != "" { + proxyUrl = fmt.Sprintf("%s://%s:%d/", proto, server, port) + if username != "" && password != "" { + proxyUrl = fmt.Sprintf("%s://%s:%s@%s:%d", proto, username, password, server, port) + d.Logger.Debug().Logf("Using Authentication for Proxy Communication for Traces") + } + os.Setenv("HTTPS_PROXY", proxyUrl) + os.Setenv("HTTP_PROXY", proxyUrl) + } + + // listen for config reloads d.Config.RegisterReloadCallback(d.reloadTransmissionBuilder) return nil From 00aac3bc994a0a580861e82d6515f219533b798f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Aug 2022 14:09:26 -0400 Subject: [PATCH 210/351] Bump github.com/sirupsen/logrus from 1.8.1 to 1.9.0 (#484) Bumps [github.com/sirupsen/logrus](https://github.com/sirupsen/logrus) from 1.8.1 to 1.9.0. - [Release notes](https://github.com/sirupsen/logrus/releases) - [Changelog](https://github.com/sirupsen/logrus/blob/master/CHANGELOG.md) - [Commits](https://github.com/sirupsen/logrus/compare/v1.8.1...v1.9.0) --- updated-dependencies: - dependency-name: github.com/sirupsen/logrus dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index b50cdb0998..98b9e92bd9 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 - github.com/sirupsen/logrus v1.8.1 + github.com/sirupsen/logrus v1.9.0 github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.8.0 github.com/tidwall/gjson v1.14.1 @@ -65,7 +65,7 @@ require ( github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect diff --git a/go.sum b/go.sum index f72c697672..ff6083d97b 100644 --- a/go.sum +++ b/go.sum @@ -280,8 +280,8 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= @@ -444,7 +444,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -480,8 +479,8 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 5dd5a24a03518da87553c3bb3a768ea036eafa57 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Aug 2022 14:10:08 -0400 Subject: [PATCH 211/351] Bump google.golang.org/grpc from 1.46.2 to 1.48.0 (#485) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.46.2 to 1.48.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.46.2...v1.48.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 98b9e92bd9..a9cfce40ad 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect - google.golang.org/grpc v1.46.2 + google.golang.org/grpc v1.48.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 ) diff --git a/go.sum b/go.sum index ff6083d97b..81c81ce2af 100644 --- a/go.sum +++ b/go.sum @@ -632,8 +632,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 485c7ff205649329954ddec6217d52e5fcbd0c1b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Aug 2022 14:11:13 -0400 Subject: [PATCH 212/351] Bump github.com/honeycombio/libhoney-go from 1.15.8 to 1.16.0 (#487) Bumps [github.com/honeycombio/libhoney-go](https://github.com/honeycombio/libhoney-go) from 1.15.8 to 1.16.0. - [Release notes](https://github.com/honeycombio/libhoney-go/releases) - [Changelog](https://github.com/honeycombio/libhoney-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/honeycombio/libhoney-go/compare/v1.15.8...v1.16.0) --- updated-dependencies: - dependency-name: github.com/honeycombio/libhoney-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 13 ++++++------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index a9cfce40ad..a5b07f7fb8 100644 --- a/go.mod +++ b/go.mod @@ -14,10 +14,10 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/husky v0.10.6 - github.com/honeycombio/libhoney-go v1.15.8 + github.com/honeycombio/libhoney-go v1.16.0 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.6 + github.com/klauspost/compress v1.15.7 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 diff --git a/go.sum b/go.sum index 81c81ce2af..a29c536e4a 100644 --- a/go.sum +++ b/go.sum @@ -38,8 +38,8 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= -github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -193,8 +193,8 @@ github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+D github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= github.com/honeycombio/husky v0.10.6 h1:jU/lXqo7Qz6e9eUJErIH3Lst2gjKWSJ4oAXYjFSXkn0= github.com/honeycombio/husky v0.10.6/go.mod h1:i69+3xApIcsQn9PPeFRndTOOw6edNG66TxTTd+L6oq0= -github.com/honeycombio/libhoney-go v1.15.8 h1:TECEltZ48K6J4NG1JVYqmi0vCJNnHYooFor83fgKesA= -github.com/honeycombio/libhoney-go v1.15.8/go.mod h1:+tnL2etFnJmVx30yqmoUkVyQjp7uRJw0a2QGu48lSyY= +github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= +github.com/honeycombio/libhoney-go v1.16.0/go.mod h1:izP4fbREuZ3vqC4HlCAmPrcPT9gxyxejRjGtCYpmBn0= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= @@ -210,9 +210,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= -github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok= +github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= From 03b2e919ac1bcb733e3ecc387cf22d5d1a12087b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Aug 2022 14:11:48 -0400 Subject: [PATCH 213/351] Bump github.com/gomodule/redigo from 1.8.8 to 1.8.9 (#488) Bumps [github.com/gomodule/redigo](https://github.com/gomodule/redigo) from 1.8.8 to 1.8.9. - [Release notes](https://github.com/gomodule/redigo/releases) - [Commits](https://github.com/gomodule/redigo/compare/v1.8.8...v1.8.9) --- updated-dependencies: - dependency-name: github.com/gomodule/redigo dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a5b07f7fb8..143ed8c7c2 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/fsnotify/fsnotify v1.5.4 github.com/go-playground/validator v9.31.0+incompatible github.com/golang/protobuf v1.5.2 - github.com/gomodule/redigo v1.8.8 + github.com/gomodule/redigo v1.8.9 github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 diff --git a/go.sum b/go.sum index a29c536e4a..fb23fa7902 100644 --- a/go.sum +++ b/go.sum @@ -144,8 +144,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/gomodule/redigo v1.8.8 h1:f6cXq6RRfiyrOJEV7p3JhLDlmawGBVBBP1MggY8Mo4E= -github.com/gomodule/redigo v1.8.8/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= +github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= +github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= From 3f5ae99ec27b35f757917d0881b9d3be8b7645f6 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Tue, 6 Sep 2022 15:31:32 -0400 Subject: [PATCH 214/351] Add endpoints to help debug refinery rules (#500) ## Which problem is this PR solving? There have been multiple reports of people not realizing their configs were not parsing the way they were intended. This adds two endpoints: - `/debug/allrules/$FORMAT` will retrieve the entire rules configuration - `/debug/rules/$FORMAT/$DATASET` will retrieve the rule set that refinery will use for the specified dataset. It comes back as a map of the sampler type to its rule set. ## Short description of the changes - Add the endpoints - Add a feature to config to return the full rule set - Extend existing config feature to return the sampler name along with its rules struct - Add/expand tests for it all --- README.md | 15 ++++ config/config.go | 7 +- config/config_test.go | 32 ++++--- config/config_test_reload_error_test.go | 10 ++- config/file_config.go | 50 +++++++++-- config/mock.go | 14 ++- go.mod | 10 +-- internal/peer/peers_test.go | 1 + route/otlp_trace_test.go | 2 +- route/proxy.go | 3 +- route/route.go | 69 ++++++++++++++- route/route_test.go | 111 ++++++++++++++++++++++-- sample/sample.go | 2 +- 13 files changed, 281 insertions(+), 45 deletions(-) diff --git a/README.md b/README.md index 0f6cdde79c..2c5112678a 100644 --- a/README.md +++ b/README.md @@ -133,8 +133,23 @@ Refinery emits a number of metrics to give some indication about the health of t ## Troubleshooting +### Logging + The default logging level of `warn` is almost entirely silent. The `debug` level emits too much data to be used in production, but contains excellent information in a pre-production environment. Setting the logging level to `debug` during initial configuration will help understand what's working and what's not, but when traffic volumes increase it should be set to `warn`. +### Configuration + +Because the normal configuration file formats (TOML and YAML) can sometimes be confusing to read and write, it may be valuable to check the loaded configuration by using one of the debug endpoints from the command line: + +`curl --include --get $REFINERY_HOST/debug/allrules/$FORMAT` will retrieve the entire rules configuration. + +`curl --include --get $REFINERY_HOST/debug/rules/$FORMAT/$DATASET` will retrieve the rule set that refinery will use for the specified dataset. It comes back as a map of the sampler type to its rule set. + +- `$REFINERY_HOST` should be the url of your refinery. +- `$FORMAT` can be one of `json`, `yaml`, or `toml`. +- `$DATASET` is the name of the dataset you want to check. + + ## Restarts Refinery does not yet buffer traces or sampling decisions to disk. When you restart the process all in-flight traces will be flushed (sent upstream to Honeycomb), but you will lose the record of past trace decisions. When started back up, it will start with a clean slate. diff --git a/config/config.go b/config/config.go index fe0e70696d..72711000a6 100644 --- a/config/config.go +++ b/config/config.go @@ -99,8 +99,11 @@ type Config interface { // GetInMemCollectorCacheCapacity returns the config specific to the InMemCollector GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCapacity, error) - // GetSamplerConfigForDataset returns the sampler type to use for the given dataset - GetSamplerConfigForDataset(string) (interface{}, error) + // GetSamplerConfigForDataset returns the sampler type and name to use for the given dataset + GetSamplerConfigForDataset(string) (interface{}, string, error) + + // GetAllSamplerRules returns all dataset rules in a map, including the default + GetAllSamplerRules() (map[string]interface{}, error) // GetMetricsType returns the type of metrics to use. Valid types are in the // metrics package diff --git a/config/config_test.go b/config/config_test.go index 5e99906091..edf5e51715 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package config @@ -212,9 +213,10 @@ func TestReadDefaults(t *testing.T) { t.Error("received", d, "expected", time.Hour) } - d, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist") + d, name, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist") assert.NoError(t, err) assert.IsType(t, &DeterministicSamplerConfig{}, d) + assert.Equal(t, "DeterministicSampler", name) type imcConfig struct { CacheCapacity int @@ -234,15 +236,17 @@ func TestReadRulesConfig(t *testing.T) { t.Error(err) } - d, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist") + d, name, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist") assert.NoError(t, err) assert.IsType(t, &DeterministicSamplerConfig{}, d) + assert.Equal(t, "DeterministicSampler", name) - d, err = c.GetSamplerConfigForDataset("dataset1") + d, name, err = c.GetSamplerConfigForDataset("dataset1") assert.NoError(t, err) assert.IsType(t, &DynamicSamplerConfig{}, d) + assert.Equal(t, "DynamicSampler", name) - d, err = c.GetSamplerConfigForDataset("dataset4") + d, name, err = c.GetSamplerConfigForDataset("dataset4") assert.NoError(t, err) switch r := d.(type) { case *RulesBasedSamplerConfig: @@ -268,6 +272,8 @@ func TestReadRulesConfig(t *testing.T) { assert.Equal(t, 10, rule.SampleRate) assert.Equal(t, "", rule.Scope) + assert.Equal(t, "RulesBasedSampler", name) + default: assert.Fail(t, "dataset4 should have a rules based sampler", d) } @@ -512,24 +518,29 @@ func TestGetSamplerTypes(t *testing.T) { t.Error(err) } - if d, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist"); assert.Equal(t, nil, err) { + if d, name, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist"); assert.Equal(t, nil, err) { assert.IsType(t, &DeterministicSamplerConfig{}, d) + assert.Equal(t, "DeterministicSampler", name) } - if d, err := c.GetSamplerConfigForDataset("dataset 1"); assert.Equal(t, nil, err) { + if d, name, err := c.GetSamplerConfigForDataset("dataset 1"); assert.Equal(t, nil, err) { assert.IsType(t, &DynamicSamplerConfig{}, d) + assert.Equal(t, "DynamicSampler", name) } - if d, err := c.GetSamplerConfigForDataset("dataset2"); assert.Equal(t, nil, err) { + if d, name, err := c.GetSamplerConfigForDataset("dataset2"); assert.Equal(t, nil, err) { assert.IsType(t, &DeterministicSamplerConfig{}, d) + assert.Equal(t, "DeterministicSampler", name) } - if d, err := c.GetSamplerConfigForDataset("dataset3"); assert.Equal(t, nil, err) { + if d, name, err := c.GetSamplerConfigForDataset("dataset3"); assert.Equal(t, nil, err) { assert.IsType(t, &EMADynamicSamplerConfig{}, d) + assert.Equal(t, "EMADynamicSampler", name) } - if d, err := c.GetSamplerConfigForDataset("dataset4"); assert.Equal(t, nil, err) { + if d, name, err := c.GetSamplerConfigForDataset("dataset4"); assert.Equal(t, nil, err) { assert.IsType(t, &TotalThroughputSamplerConfig{}, d) + assert.Equal(t, "TotalThroughputSampler", name) } } @@ -563,9 +574,10 @@ func TestDefaultSampler(t *testing.T) { assert.NoError(t, err) - s, err := c.GetSamplerConfigForDataset("nonexistent") + s, name, err := c.GetSamplerConfigForDataset("nonexistent") assert.NoError(t, err) + assert.Equal(t, "DeterministicSampler", name) assert.IsType(t, &DeterministicSamplerConfig{}, s) } diff --git a/config/config_test_reload_error_test.go b/config/config_test_reload_error_test.go index af7447e12f..0ade1fa871 100644 --- a/config/config_test_reload_error_test.go +++ b/config/config_test_reload_error_test.go @@ -1,3 +1,4 @@ +//go:build all || !race // +build all !race package config @@ -55,9 +56,12 @@ func TestErrorReloading(t *testing.T) { t.Error(err) } - d, _ := c.GetSamplerConfigForDataset("dataset5") + d, name, _ := c.GetSamplerConfigForDataset("dataset5") if _, ok := d.(DeterministicSamplerConfig); ok { - t.Error("received", d, "expected", "DeterministicSampler") + t.Error("type received", d, "expected", "DeterministicSampler") + } + if name != "DeterministicSampler" { + t.Error("name received", d, "expected", "DeterministicSampler") } wg := &sync.WaitGroup{} @@ -82,7 +86,7 @@ func TestErrorReloading(t *testing.T) { wg.Wait() // config should error and not update sampler to invalid type - d, _ = c.GetSamplerConfigForDataset("dataset5") + d, _, _ = c.GetSamplerConfigForDataset("dataset5") if _, ok := d.(DeterministicSamplerConfig); ok { t.Error("received", d, "expected", "DeterministicSampler") } diff --git a/config/file_config.go b/config/file_config.go index e4067cd7ce..d881a48519 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -521,10 +521,48 @@ func (f *fileConfig) GetCollectorType() (string, error) { return f.conf.Collector, nil } -func (f *fileConfig) GetSamplerConfigForDataset(dataset string) (interface{}, error) { +func (f *fileConfig) GetAllSamplerRules() (map[string]interface{}, error) { + samplers := make(map[string]interface{}) + + keys := f.rules.AllKeys() + for _, key := range keys { + parts := strings.Split(key, ".") + + // extract default sampler rules + if parts[0] == "sampler" { + err := f.rules.Unmarshal(&samplers) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal sampler rule: %w", err) + } + t := f.rules.GetString(key) + samplers["sampler"] = t + continue + } + + // extract all dataset sampler rules + if len(parts) > 1 && parts[1] == "sampler" { + t := f.rules.GetString(key) + m := make(map[string]interface{}) + datasetName := parts[0] + if sub := f.rules.Sub(datasetName); sub != nil { + err := sub.Unmarshal(&m) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal sampler rule for dataset %s: %w", datasetName, err) + } + } + m["sampler"] = t + samplers[datasetName] = m + } + } + return samplers, nil +} + +func (f *fileConfig) GetSamplerConfigForDataset(dataset string) (interface{}, string, error) { f.mux.RLock() defer f.mux.RUnlock() + const notfound = "not found" + key := fmt.Sprintf("%s.Sampler", dataset) if ok := f.rules.IsSet(key); ok { t := f.rules.GetString(key) @@ -542,11 +580,11 @@ func (f *fileConfig) GetSamplerConfigForDataset(dataset string) (interface{}, er case "TotalThroughputSampler": i = &TotalThroughputSamplerConfig{} default: - return nil, errors.New("No Sampler found") + return nil, notfound, errors.New("No Sampler found") } if sub := f.rules.Sub(dataset); sub != nil { - return i, sub.Unmarshal(i) + return i, t, sub.Unmarshal(i) } } else if ok := f.rules.IsSet("Sampler"); ok { @@ -565,13 +603,13 @@ func (f *fileConfig) GetSamplerConfigForDataset(dataset string) (interface{}, er case "TotalThroughputSampler": i = &TotalThroughputSamplerConfig{} default: - return nil, errors.New("No Sampler found") + return nil, notfound, errors.New("No Sampler found") } - return i, f.rules.Unmarshal(i) + return i, t, f.rules.Unmarshal(i) } - return nil, errors.New("No Sampler found") + return nil, notfound, errors.New("No Sampler found") } func (f *fileConfig) GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCapacity, error) { diff --git a/config/mock.go b/config/mock.go index 706003c18a..1d0611fcfe 100644 --- a/config/mock.go +++ b/config/mock.go @@ -47,6 +47,7 @@ type MockConfig struct { GetUseTLSInsecureErr error GetUseTLSInsecureVal bool GetSamplerTypeErr error + GetSamplerTypeName string GetSamplerTypeVal interface{} GetMetricsTypeErr error GetMetricsTypeVal string @@ -240,11 +241,20 @@ func (m *MockConfig) GetMaxBatchSize() uint { } // TODO: allow per-dataset mock values -func (m *MockConfig) GetSamplerConfigForDataset(dataset string) (interface{}, error) { +func (m *MockConfig) GetSamplerConfigForDataset(dataset string) (interface{}, string, error) { m.Mux.RLock() defer m.Mux.RUnlock() - return m.GetSamplerTypeVal, m.GetSamplerTypeErr + return m.GetSamplerTypeVal, m.GetSamplerTypeName, m.GetSamplerTypeErr +} + +// GetAllSamplerRules returns all dataset rules, including the default +func (m *MockConfig) GetAllSamplerRules() (map[string]interface{}, error) { + m.Mux.RLock() + defer m.Mux.RUnlock() + + v := map[string]interface{}{"dataset1": m.GetSamplerTypeVal} + return v, m.GetSamplerTypeErr } func (m *MockConfig) GetUpstreamBufferSize() int { diff --git a/go.mod b/go.mod index 143ed8c7c2..7ead605200 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d github.com/fsnotify/fsnotify v1.5.4 github.com/go-playground/validator v9.31.0+incompatible - github.com/golang/protobuf v1.5.2 github.com/gomodule/redigo v1.8.9 github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 @@ -18,6 +17,7 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.7 + github.com/pelletier/go-toml/v2 v2.0.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 @@ -27,9 +27,10 @@ require ( github.com/tidwall/gjson v1.14.1 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 - golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect google.golang.org/grpc v1.48.0 + google.golang.org/protobuf v1.28.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 + gopkg.in/yaml.v2 v2.4.0 ) require ( @@ -41,6 +42,7 @@ require ( github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect github.com/go-playground/locales v0.13.0 // indirect github.com/go-playground/universal-translator v0.17.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect @@ -50,7 +52,6 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect @@ -65,13 +66,12 @@ require ( github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/internal/peer/peers_test.go b/internal/peer/peers_test.go index 5d11be5085..067ae47957 100644 --- a/internal/peer/peers_test.go +++ b/internal/peer/peers_test.go @@ -1,3 +1,4 @@ +//go:build all || race // +build all race package peer diff --git a/route/otlp_trace_test.go b/route/otlp_trace_test.go index a3f13eb877..17c1ee76ee 100644 --- a/route/otlp_trace_test.go +++ b/route/otlp_trace_test.go @@ -11,7 +11,6 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" huskyotlp "github.com/honeycombio/husky/otlp" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/logger" @@ -24,6 +23,7 @@ import ( resource "go.opentelemetry.io/proto/otlp/resource/v1" trace "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) const legacyAPIKey = "***REMOVED***" diff --git a/route/proxy.go b/route/proxy.go index 354acd2571..faefceaef0 100644 --- a/route/proxy.go +++ b/route/proxy.go @@ -3,7 +3,6 @@ package route import ( "bytes" "io" - "io/ioutil" "net/http" "strings" ) @@ -25,7 +24,7 @@ func (r *Router) proxy(w http.ResponseWriter, req *http.Request) { // let's copy the request over to a new one and // dispatch it upstream defer req.Body.Close() - reqBod, _ := ioutil.ReadAll(req.Body) + reqBod, _ := io.ReadAll(req.Body) buf := bytes.NewBuffer(reqBod) upstreamReq, err := http.NewRequest(req.Method, upstreamTarget+req.URL.String(), buf) if err != nil { diff --git a/route/route.go b/route/route.go index 93f74e4155..51275a1dcd 100644 --- a/route/route.go +++ b/route/route.go @@ -8,22 +8,24 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "net" "net/http" "net/url" "strconv" + "strings" "sync" "time" "github.com/gorilla/mux" jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" + "github.com/pelletier/go-toml/v2" "github.com/vmihailenco/msgpack/v4" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "gopkg.in/yaml.v2" // grpc/gzip compressor, auto registers on import _ "google.golang.org/grpc/encoding/gzip" @@ -154,6 +156,8 @@ func (r *Router) LnS(incomingOrPeer string) { muxxer.HandleFunc("/panic", r.panic).Name("intentional panic") muxxer.HandleFunc("/version", r.version).Name("report version info") muxxer.HandleFunc("/debug/trace/{traceID}", r.debugTrace).Name("get debug information for given trace ID") + muxxer.HandleFunc("/debug/rules/{format}/{dataset}", r.getSamplerRules).Name("get formatted sampler rules for given dataset") + muxxer.HandleFunc("/debug/allrules/{format}", r.getAllSamplerRules).Name("get formatted sampler rules for all datasets") // require an auth header for events and batches authedMuxxer := muxxer.PathPrefix("/1/").Methods("POST").Subrouter() @@ -233,7 +237,8 @@ func (r *Router) LnS(incomingOrPeer string) { } func (r *Router) Stop() error { - ctx, _ := context.WithTimeout(context.Background(), time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() err := r.server.Shutdown(ctx) if err != nil { return err @@ -264,6 +269,62 @@ func (r *Router) debugTrace(w http.ResponseWriter, req *http.Request) { w.Write([]byte(fmt.Sprintf(`{"traceID":"%s","node":"%s"}`, traceID, shard.GetAddress()))) } +func (r *Router) getSamplerRules(w http.ResponseWriter, req *http.Request) { + format := strings.ToLower(mux.Vars(req)["format"]) + dataset := mux.Vars(req)["dataset"] + cfg, name, err := r.Config.GetSamplerConfigForDataset(dataset) + if err != nil { + w.Write([]byte(fmt.Sprintf("got error %v trying to fetch config for dataset %s\n", err, dataset))) + w.WriteHeader(http.StatusBadRequest) + return + } + r.marshalToFormat(w, map[string]interface{}{name: cfg}, format) +} + +func (r *Router) getAllSamplerRules(w http.ResponseWriter, req *http.Request) { + format := strings.ToLower(mux.Vars(req)["format"]) + cfgs, err := r.Config.GetAllSamplerRules() + if err != nil { + w.Write([]byte(fmt.Sprintf("got error %v trying to fetch configs", err))) + w.WriteHeader(http.StatusBadRequest) + return + } + r.marshalToFormat(w, cfgs, format) +} + +func (r *Router) marshalToFormat(w http.ResponseWriter, obj interface{}, format string) { + var body []byte + var err error + switch format { + case "json": + body, err = json.Marshal(obj) + if err != nil { + w.Write([]byte(fmt.Sprintf("got error %v trying to marshal to json\n", err))) + w.WriteHeader(http.StatusBadRequest) + return + } + case "toml": + body, err = toml.Marshal(obj) + if err != nil { + w.Write([]byte(fmt.Sprintf("got error %v trying to marshal to toml\n", err))) + w.WriteHeader(http.StatusBadRequest) + return + } + case "yaml": + body, err = yaml.Marshal(obj) + if err != nil { + w.Write([]byte(fmt.Sprintf("got error %v trying to marshal to toml\n", err))) + w.WriteHeader(http.StatusBadRequest) + return + } + default: + w.Write([]byte(fmt.Sprintf("invalid format '%s' when marshaling\n", format))) + w.WriteHeader(http.StatusBadRequest) + return + } + w.Write(body) +} + // event is handler for /1/event/ func (r *Router) event(w http.ResponseWriter, req *http.Request) { r.Metrics.Increment(r.incomingOrPeer + "_router_event") @@ -275,7 +336,7 @@ func (r *Router) event(w http.ResponseWriter, req *http.Request) { return } - reqBod, err := ioutil.ReadAll(bodyReader) + reqBod, err := io.ReadAll(bodyReader) if err != nil { r.handlerReturnWithError(w, ErrPostBody, err) return @@ -351,7 +412,7 @@ func (r *Router) batch(w http.ResponseWriter, req *http.Request) { return } - reqBod, err := ioutil.ReadAll(bodyReader) + reqBod, err := io.ReadAll(bodyReader) if err != nil { r.handlerReturnWithError(w, ErrPostBody, err) return diff --git a/route/route_test.go b/route/route_test.go index c0932cbc27..1dec93a6a6 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "strings" @@ -19,6 +18,7 @@ import ( "github.com/honeycombio/refinery/logger" "github.com/honeycombio/refinery/metrics" "github.com/honeycombio/refinery/transmit" + "github.com/stretchr/testify/assert" "github.com/gorilla/mux" "github.com/honeycombio/refinery/sharder" @@ -38,7 +38,7 @@ func TestDecompression(t *testing.T) { router := &Router{zstdDecoders: decoders} req := &http.Request{ - Body: ioutil.NopCloser(pReader), + Body: io.NopCloser(pReader), Header: http.Header{}, } reader, err := router.getMaybeCompressedBody(req) @@ -46,7 +46,7 @@ func TestDecompression(t *testing.T) { t.Errorf("unexpected err: %s", err.Error()) } - b, err := ioutil.ReadAll(reader) + b, err := io.ReadAll(reader) if err != nil { t.Errorf("unexpected err: %s", err.Error()) } @@ -62,14 +62,14 @@ func TestDecompression(t *testing.T) { } w.Close() - req.Body = ioutil.NopCloser(buf) + req.Body = io.NopCloser(buf) req.Header.Set("Content-Encoding", "gzip") reader, err = router.getMaybeCompressedBody(req) if err != nil { t.Errorf("unexpected err: %s", err.Error()) } - b, err = ioutil.ReadAll(reader) + b, err = io.ReadAll(reader) if err != nil { t.Errorf("unexpected err: %s", err.Error()) } @@ -88,14 +88,14 @@ func TestDecompression(t *testing.T) { } zstdW.Close() - req.Body = ioutil.NopCloser(buf) + req.Body = io.NopCloser(buf) req.Header.Set("Content-Encoding", "zstd") reader, err = router.getMaybeCompressedBody(req) if err != nil { t.Errorf("unexpected err: %s", err.Error()) } - b, err = ioutil.ReadAll(reader) + b, err = io.ReadAll(reader) if err != nil { t.Errorf("unexpected err: %s", err.Error()) } @@ -123,7 +123,7 @@ func unmarshalRequest(w *httptest.ResponseRecorder, content string, body io.Read w.Write([]byte(traceID)) }).ServeHTTP(w, &http.Request{ - Body: ioutil.NopCloser(body), + Body: io.NopCloser(body), Header: http.Header{ "Content-Type": []string{content}, }, @@ -143,7 +143,7 @@ func unmarshalBatchRequest(w *httptest.ResponseRecorder, content string, body io w.Write([]byte(e.getEventTime().Format(time.RFC3339Nano))) }).ServeHTTP(w, &http.Request{ - Body: ioutil.NopCloser(body), + Body: io.NopCloser(body), Header: http.Header{ "Content-Type": []string{content}, }, @@ -319,6 +319,99 @@ func TestDebugTrace(t *testing.T) { } } +func TestDebugAllRules(t *testing.T) { + tests := []struct { + format string + expect string + }{ + { + format: "json", + expect: `{"dataset1":"FakeSamplerType"}`, + }, + { + format: "toml", + expect: "dataset1 = 'FakeSamplerType'\n", + }, + { + format: "yaml", + expect: "dataset1: FakeSamplerType\n", + }, + { + format: "bogus", + expect: "invalid format 'bogus' when marshaling\n", + }, + } + + for _, tt := range tests { + t.Run(tt.format, func(t *testing.T) { + + req, _ := http.NewRequest("GET", "/debug/allrules/"+tt.format, nil) + req = mux.SetURLVars(req, map[string]string{"format": tt.format}) + + rr := httptest.NewRecorder() + router := &Router{ + Config: &config.MockConfig{ + GetSamplerTypeVal: "FakeSamplerType", + }, + } + + router.getAllSamplerRules(rr, req) + assert.Equal(t, tt.expect, rr.Body.String()) + }) + } +} + +func TestDebugRules(t *testing.T) { + tests := []struct { + format string + dataset string + expect string + }{ + { + format: "json", + dataset: "dataset1", + expect: `{"FakeSamplerName":"FakeSamplerType"}`, + }, + { + format: "toml", + dataset: "dataset1", + expect: "FakeSamplerName = 'FakeSamplerType'\n", + }, + { + format: "yaml", + dataset: "dataset1", + expect: "FakeSamplerName: FakeSamplerType\n", + }, + { + format: "bogus", + dataset: "dataset1", + expect: "invalid format 'bogus' when marshaling\n", + }, + } + + for _, tt := range tests { + t.Run(tt.format, func(t *testing.T) { + + req, _ := http.NewRequest("GET", "/debug/rules/"+tt.format+"/"+tt.format, nil) + req = mux.SetURLVars(req, map[string]string{ + "format": tt.format, + "dataset": tt.dataset, + }) + + rr := httptest.NewRecorder() + router := &Router{ + Config: &config.MockConfig{ + GetSamplerTypeVal: "FakeSamplerType", + GetSamplerTypeName: "FakeSamplerName", + }, + } + + router.getSamplerRules(rr, req) + assert.Equal(t, tt.expect, rr.Body.String()) + }) + } +} + func TestDependencyInjection(t *testing.T) { var g inject.Graph err := g.Provide( diff --git a/sample/sample.go b/sample/sample.go index 1186005b26..70e1024dd3 100644 --- a/sample/sample.go +++ b/sample/sample.go @@ -31,7 +31,7 @@ func (s *SamplerFactory) GetSamplerImplementationForKey(samplerKey string, isLeg } } - c, err := s.Config.GetSamplerConfigForDataset(samplerKey) + c, _, err := s.Config.GetSamplerConfigForDataset(samplerKey) if err != nil { return nil } From 3e149f66df6bb8aeac2e07da758d8ffd273e95fa Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Tue, 6 Sep 2022 15:41:16 -0400 Subject: [PATCH 215/351] Rename /debug endpoints to /query and add auth (#502) ## Which problem is this PR solving? - Adds a local query token to allow the changes in #500 to be protected by a locally-defined token in a header. ## Short description of the changes - Before #500, there was a single /debug/trace endpoint, but there is also an optional /debug endpoint intended for use with a debugger and profiler. So I renamed /debug to /query for the trace and config. - I added to these new /query endpoints a local configuration value `QueryAuthToken` that can be specified in the config file or in the environment. If specified, the `X-Honeycomb-Refinery-Query` header must be specified on a query request. - If the QueryAuthToken is not specified in the configuration, the /query endpoints are not accessible. Note that because the `/debug/trace` request has been renamed and is now protected by a token, this is technically a breaking change (although `/debug/trace` isn't intended for regular operation and wasn't documented outside of the source code). --- README.md | 9 +- config/config.go | 3 + config/config_test.go | 306 ++++++++++++++++-------------------------- config/file_config.go | 9 ++ config/mock.go | 8 ++ config_complete.toml | 8 ++ route/middleware.go | 20 +++ route/route.go | 11 +- types/event.go | 1 + 9 files changed, 179 insertions(+), 196 deletions(-) diff --git a/README.md b/README.md index 2c5112678a..eed8dbbcef 100644 --- a/README.md +++ b/README.md @@ -139,17 +139,18 @@ The default logging level of `warn` is almost entirely silent. The `debug` level ### Configuration -Because the normal configuration file formats (TOML and YAML) can sometimes be confusing to read and write, it may be valuable to check the loaded configuration by using one of the debug endpoints from the command line: +Because the normal configuration file formats (TOML and YAML) can sometimes be confusing to read and write, it may be valuable to check the loaded configuration by using one of the `/query` endpoints from the command line on a server that can access a refinery host. -`curl --include --get $REFINERY_HOST/debug/allrules/$FORMAT` will retrieve the entire rules configuration. +The `/query` endpoints are protected and can be enabled by specifying `QueryAuthToken` in the configuration file or specifying `REFINERY_QUERY_AUTH_TOKEN` in the environment. All requests to any `/query` endpoint must include the header `X-Honeycomb-Refinery-Query` set to the value of the specified token. -`curl --include --get $REFINERY_HOST/debug/rules/$FORMAT/$DATASET` will retrieve the rule set that refinery will use for the specified dataset. It comes back as a map of the sampler type to its rule set. +`curl --include --get $REFINERY_HOST/query/allrules/$FORMAT --header "x-honeycomb-refinery-query: my-local-token"` will retrieve the entire rules configuration. + +`curl --include --get $REFINERY_HOST/query/rules/$FORMAT/$DATASET --header "x-honeycomb-refinery-query: my-local-token"` will retrieve the rule set that refinery will use for the specified dataset. It comes back as a map of the sampler type to its rule set. - `$REFINERY_HOST` should be the url of your refinery. - `$FORMAT` can be one of `json`, `yaml`, or `toml`. - `$DATASET` is the name of the dataset you want to check. - ## Restarts Refinery does not yet buffer traces or sampling decisions to disk. When you restart the process all in-flight traces will be flushed (sent upstream to Honeycomb), but you will lose the record of past trace decisions. When started back up, it will start with a clean slate. diff --git a/config/config.go b/config/config.go index 72711000a6..0c3c1457a6 100644 --- a/config/config.go +++ b/config/config.go @@ -144,4 +144,7 @@ type Config interface { GetEnvironmentCacheTTL() time.Duration GetDatasetPrefix() string + + // GetQueryAuthToken returns the token that must be used to access the /query endpoints + GetQueryAuthToken() string } diff --git a/config/config_test.go b/config/config_test.go index edf5e51715..b5a1ea4b71 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -81,18 +81,33 @@ func TestRedisPasswordEnvVar(t *testing.T) { } } -func TestReload(t *testing.T) { +func createTempConfigs(t *testing.T, configBody string, rulesBody string) (string, string) { tmpDir, err := ioutil.TempDir("", "") assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + configFile, err := ioutil.TempFile(tmpDir, "*.toml") assert.NoError(t, err) - configFile, err := ioutil.TempFile(tmpDir, "*.toml") + if configBody != "" { + _, err = configFile.WriteString(configBody) + assert.NoError(t, err) + } + configFile.Close() + + rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") assert.NoError(t, err) - dummy := []byte(` + if rulesBody != "" { + _, err = rulesFile.WriteString(rulesBody) + assert.NoError(t, err) + } + rulesFile.Close() + + return configFile.Name(), rulesFile.Name() +} + +func TestReload(t *testing.T) { + config, rules := createTempConfigs(t, ` ListenAddr="0.0.0.0:8080" [InMemCollector] @@ -103,13 +118,12 @@ func TestReload(t *testing.T) { MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 - `) + `, "") + defer os.Remove(rules) + defer os.Remove(config) - _, err = configFile.Write(dummy) + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) - configFile.Close() - - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) if err != nil { t.Error(err) @@ -153,7 +167,7 @@ func TestReload(t *testing.T) { } }() - if file, err := os.OpenFile(configFile.Name(), os.O_RDWR, 0644); err == nil { + if file, err := os.OpenFile(config, os.O_RDWR, 0644); err == nil { file.WriteString(`ListenAddr = "0.0.0.0:9000"`) file.Close() } @@ -280,17 +294,7 @@ func TestReadRulesConfig(t *testing.T) { } func TestPeerManagementType(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - _, err = configFile.Write([]byte(` + config, rules := createTempConfigs(t, ` [InMemCollector] CacheCapacity=1000 @@ -303,9 +307,11 @@ func TestPeerManagementType(t *testing.T) { [PeerManagement] Type = "redis" Peers = ["http://refinery-1231:8080"] - `)) + `, "") + defer os.Remove(rules) + defer os.Remove(config) - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) if d, _ := c.GetPeerManagementType(); d != "redis" { @@ -314,57 +320,34 @@ func TestPeerManagementType(t *testing.T) { } func TestAbsentTraceKeyField(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - _, err = configFile.Write([]byte(` - [InMemCollector] - CacheCapacity=1000 - - [HoneycombMetrics] - MetricsHoneycombAPI="http://honeycomb.io" - MetricsAPIKey="1234" - MetricsDataset="testDatasetName" - MetricsReportingInterval=3 - `)) - assert.NoError(t, err) - - _, err = rulesFile.Write([]byte(` - [dataset1] - Sampler = "EMADynamicSampler" - GoalSampleRate = 10 - UseTraceLength = true - AddSampleRateKeyToTrace = true - FieldList = "[request.method]" - Weight = 0.4 - `)) + config, rules := createTempConfigs(t, ` + [InMemCollector] + CacheCapacity=1000 - rulesFile.Close() + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + `, ` + [dataset1] + Sampler = "EMADynamicSampler" + GoalSampleRate = 10 + UseTraceLength = true + AddSampleRateKeyToTrace = true + FieldList = "[request.method]" + Weight = 0.4 + `) + defer os.Remove(rules) + defer os.Remove(config) - _, err = NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + _, err := NewConfig(config, rules, func(err error) {}) assert.Error(t, err) assert.Contains(t, err.Error(), "Error:Field validation for 'AddSampleRateKeyToTraceField'") } func TestDebugServiceAddr(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - _, err = configFile.Write([]byte(` + config, rules := createTempConfigs(t, ` DebugServiceAddr = "localhost:8085" [InMemCollector] @@ -375,9 +358,11 @@ func TestDebugServiceAddr(t *testing.T) { MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 - `)) + `, "") + defer os.Remove(rules) + defer os.Remove(config) - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) if d, _ := c.GetDebugServiceAddr(); d != "localhost:8085" { @@ -386,14 +371,7 @@ func TestDebugServiceAddr(t *testing.T) { } func TestDryRun(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - _, err = configFile.Write([]byte(` + config, rules := createTempConfigs(t, ` [InMemCollector] CacheCapacity=1000 @@ -402,16 +380,13 @@ func TestDryRun(t *testing.T) { MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 - `)) - - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - _, err = rulesFile.Write([]byte(` + `, ` DryRun=true - `)) + `) + defer os.Remove(rules) + defer os.Remove(config) - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) if d := c.GetIsDryRun(); d != true { @@ -420,17 +395,7 @@ func TestDryRun(t *testing.T) { } func TestMaxAlloc(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - _, err = configFile.Write([]byte(` + config, rules := createTempConfigs(t, ` [InMemCollector] CacheCapacity=1000 MaxAlloc=17179869184 @@ -440,9 +405,11 @@ func TestMaxAlloc(t *testing.T) { MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 - `)) + `, "") + defer os.Remove(rules) + defer os.Remove(config) - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) expected := uint64(16 * 1024 * 1024 * 1024) @@ -452,14 +419,7 @@ func TestMaxAlloc(t *testing.T) { } func TestGetSamplerTypes(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - _, err = configFile.Write([]byte(` + config, rules := createTempConfigs(t, ` [InMemCollector] CacheCapacity=1000 @@ -468,12 +428,7 @@ func TestGetSamplerTypes(t *testing.T) { MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 - `)) - - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - dummyConfig := []byte(` + `, ` Sampler = "DeterministicSampler" SampleRate = 2 @@ -506,17 +461,12 @@ func TestGetSamplerTypes(t *testing.T) { Sampler = "TotalThroughputSampler" GoalThroughputPerSec = 100 FieldList = "[request.method]" -`) + `) + defer os.Remove(rules) + defer os.Remove(config) - _, err = rulesFile.Write(dummyConfig) + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) - rulesFile.Close() - - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) - - if err != nil { - t.Error(err) - } if d, name, err := c.GetSamplerConfigForDataset("dataset-doesnt-exist"); assert.Equal(t, nil, err) { assert.IsType(t, &DeterministicSamplerConfig{}, d) @@ -545,17 +495,7 @@ func TestGetSamplerTypes(t *testing.T) { } func TestDefaultSampler(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - dummy := []byte(` + config, rules := createTempConfigs(t, ` [InMemCollector] CacheCapacity=1000 @@ -564,13 +504,11 @@ func TestDefaultSampler(t *testing.T) { MetricsAPIKey="1234" MetricsDataset="testDatasetName" MetricsReportingInterval=3 - `) + `, "") + defer os.Remove(rules) + defer os.Remove(config) - _, err = configFile.Write(dummy) - assert.NoError(t, err) - configFile.Close() - - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) @@ -583,17 +521,7 @@ func TestDefaultSampler(t *testing.T) { } func TestHoneycombLoggerConfig(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - dummy := []byte(` + config, rules := createTempConfigs(t, ` [InMemCollector] CacheCapacity=1000 @@ -609,14 +537,11 @@ func TestHoneycombLoggerConfig(t *testing.T) { LoggerDataset="loggerDataset" LoggerSamplerEnabled=true LoggerSamplerThroughput=10 - `) - - _, err = configFile.Write(dummy) - assert.NoError(t, err) - configFile.Close() - - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + `, "") + defer os.Remove(rules) + defer os.Remove(config) + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) loggerConfig, err := c.GetHoneycombLoggerConfig() @@ -631,17 +556,7 @@ func TestHoneycombLoggerConfig(t *testing.T) { } func TestHoneycombLoggerConfigDefaults(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - dummy := []byte(` + config, rules := createTempConfigs(t, ` [InMemCollector] CacheCapacity=1000 @@ -655,14 +570,11 @@ func TestHoneycombLoggerConfigDefaults(t *testing.T) { LoggerHoneycombAPI="http://honeycomb.io" LoggerAPIKey="1234" LoggerDataset="loggerDataset" - `) - - _, err = configFile.Write(dummy) - assert.NoError(t, err) - configFile.Close() - - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + `, "") + defer os.Remove(rules) + defer os.Remove(config) + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) loggerConfig, err := c.GetHoneycombLoggerConfig() @@ -674,14 +586,7 @@ func TestHoneycombLoggerConfigDefaults(t *testing.T) { } func TestDatasetPrefix(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) - - configFile, err := ioutil.TempFile(tmpDir, "*.toml") - assert.NoError(t, err) - - _, err = configFile.Write([]byte(` + config, rules := createTempConfigs(t, ` DatasetPrefix = "dataset" [InMemCollector] @@ -697,15 +602,38 @@ func TestDatasetPrefix(t *testing.T) { LoggerHoneycombAPI="http://honeycomb.io" LoggerAPIKey="1234" LoggerDataset="loggerDataset" - `)) - assert.NoError(t, err) - configFile.Close() + `, "") + defer os.Remove(rules) + defer os.Remove(config) - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) - c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + assert.Equal(t, "dataset", c.GetDatasetPrefix()) +} + +func TestQueryAuthToken(t *testing.T) { + config, rules := createTempConfigs(t, ` + QueryAuthToken = "MySeekretToken" + + [InMemCollector] + CacheCapacity=1000 + + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + + [HoneycombLogger] + LoggerHoneycombAPI="http://honeycomb.io" + LoggerAPIKey="1234" + LoggerDataset="loggerDataset" `, "") + defer os.Remove(rules) + defer os.Remove(config) + + c, err := NewConfig(config, rules, func(err error) {}) assert.NoError(t, err) - assert.Equal(t, "dataset", c.GetDatasetPrefix()) + assert.Equal(t, "MySeekretToken", c.GetQueryAuthToken()) } diff --git a/config/file_config.go b/config/file_config.go index d881a48519..8488c8afcb 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -50,6 +50,7 @@ type configContents struct { AddHostMetadataToTrace bool EnvironmentCacheTTL time.Duration DatasetPrefix string + QueryAuthToken string } type InMemoryCollectorCacheCapacity struct { @@ -103,6 +104,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.BindEnv("PeerManagement.RedisPassword", "REFINERY_REDIS_PASSWORD") c.BindEnv("HoneycombLogger.LoggerAPIKey", "REFINERY_HONEYCOMB_API_KEY") c.BindEnv("HoneycombMetrics.MetricsAPIKey", "REFINERY_HONEYCOMB_API_KEY") + c.BindEnv("QueryAuthToken", "REFINERY_QUERY_AUTH_TOKEN") c.SetDefault("ListenAddr", "0.0.0.0:8080") c.SetDefault("PeerListenAddr", "0.0.0.0:8081") c.SetDefault("CompressPeerCommunication", true) @@ -782,3 +784,10 @@ func (f *fileConfig) GetDatasetPrefix() string { return f.conf.DatasetPrefix } + +func (f *fileConfig) GetQueryAuthToken() string { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.QueryAuthToken +} diff --git a/config/mock.go b/config/mock.go index 1d0611fcfe..1b53b38552 100644 --- a/config/mock.go +++ b/config/mock.go @@ -73,6 +73,7 @@ type MockConfig struct { AddHostMetadataToTrace bool EnvironmentCacheTTL time.Duration DatasetPrefix string + QueryAuthToken string Mux sync.RWMutex } @@ -346,3 +347,10 @@ func (f *MockConfig) GetDatasetPrefix() string { return f.DatasetPrefix } + +func (f *MockConfig) GetQueryAuthToken() string { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.QueryAuthToken +} diff --git a/config_complete.toml b/config_complete.toml index 0401a4c902..d62d8dd95a 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -105,6 +105,14 @@ AddHostMetadataToTrace = false # Not eligible for live reload. EnvironmentCacheTTL = "1h" +# QueryAuthToken, if specified, provides a token that must be specified with +# the header "X-Honeycomb-Refinery-Query" in order for a /query request to succeed. +# These /query requests are intended for debugging refinery installations and +# are not typically needed in normal operation. +# Can be specified in the environment as REFINERY_QUERY_AUTH_TOKEN. +# If left unspecified, the /query endpoints are inaccessible. +# QueryAuthToken = "some-random-value" + ############################ ## Implementation Choices ## ############################ diff --git a/route/middleware.go b/route/middleware.go index 7836433853..262908dfce 100644 --- a/route/middleware.go +++ b/route/middleware.go @@ -17,6 +17,26 @@ func init() { rand.Seed(time.Now().UnixNano()) } +func (r *Router) queryTokenChecker(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + requiredToken := r.Config.GetQueryAuthToken() + if requiredToken == "" { + err := fmt.Errorf("/query endpoint is not authorized for use (specify QueryAuthToken in config)") + r.handlerReturnWithError(w, ErrAuthNeeded, err) + } + + token := req.Header.Get(types.QueryTokenHeader) + if token == requiredToken { + // if they're equal (including both blank) we're good + next.ServeHTTP(w, req) + return + } + + err := fmt.Errorf("token %s found in %s not authorized for query", token, types.QueryTokenHeader) + r.handlerReturnWithError(w, ErrAuthNeeded, err) + }) +} + func (r *Router) apiKeyChecker(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { apiKey := req.Header.Get(types.APIKeyHeader) diff --git a/route/route.go b/route/route.go index 51275a1dcd..8c5730ae55 100644 --- a/route/route.go +++ b/route/route.go @@ -155,9 +155,14 @@ func (r *Router) LnS(incomingOrPeer string) { muxxer.HandleFunc("/alive", r.alive).Name("local health") muxxer.HandleFunc("/panic", r.panic).Name("intentional panic") muxxer.HandleFunc("/version", r.version).Name("report version info") - muxxer.HandleFunc("/debug/trace/{traceID}", r.debugTrace).Name("get debug information for given trace ID") - muxxer.HandleFunc("/debug/rules/{format}/{dataset}", r.getSamplerRules).Name("get formatted sampler rules for given dataset") - muxxer.HandleFunc("/debug/allrules/{format}", r.getAllSamplerRules).Name("get formatted sampler rules for all datasets") + + // require a local auth for query usage + queryMuxxer := muxxer.PathPrefix("/query/").Methods("GET").Subrouter() + queryMuxxer.Use(r.queryTokenChecker) + + queryMuxxer.HandleFunc("/trace/{traceID}", r.debugTrace).Name("get debug information for given trace ID") + queryMuxxer.HandleFunc("/rules/{format}/{dataset}", r.getSamplerRules).Name("get formatted sampler rules for given dataset") + queryMuxxer.HandleFunc("/allrules/{format}", r.getAllSamplerRules).Name("get formatted sampler rules for all datasets") // require an auth header for events and batches authedMuxxer := muxxer.PathPrefix("/1/").Methods("POST").Subrouter() diff --git a/types/event.go b/types/event.go index 5eea293c1b..43ca9a2d48 100644 --- a/types/event.go +++ b/types/event.go @@ -12,6 +12,7 @@ const ( DatasetHeader = "X-Honeycomb-Dataset" SampleRateHeader = "X-Honeycomb-Samplerate" TimestampHeader = "X-Honeycomb-Event-Time" + QueryTokenHeader = "X-Honeycomb-Refinery-Query" ) // used to put a request ID into the request context for logging From ffaf5a249ed934d696efd7baea3e2b70e0929cb8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Sep 2022 21:10:10 +0000 Subject: [PATCH 216/351] Bump google.golang.org/grpc from 1.48.0 to 1.49.0 (#494) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7ead605200..7bab3ab9fb 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/tidwall/gjson v1.14.1 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 - google.golang.org/grpc v1.48.0 + google.golang.org/grpc v1.49.0 google.golang.org/protobuf v1.28.0 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index fb23fa7902..71acd07473 100644 --- a/go.sum +++ b/go.sum @@ -631,8 +631,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 8fb4c869279d7c5e8ef551b4624d0d845dd2899b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Sep 2022 21:11:04 +0000 Subject: [PATCH 217/351] Bump github.com/klauspost/compress from 1.15.7 to 1.15.9 (#495) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 7bab3ab9fb..2f27c1df77 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/honeycombio/libhoney-go v1.16.0 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.7 + github.com/klauspost/compress v1.15.9 github.com/pelletier/go-toml/v2 v2.0.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 diff --git a/go.sum b/go.sum index 71acd07473..473733fa79 100644 --- a/go.sum +++ b/go.sum @@ -210,8 +210,9 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= From de050e5a201382309928b22dd3d60f8355c55134 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Sep 2022 21:16:55 +0000 Subject: [PATCH 218/351] Bump github.com/tidwall/gjson from 1.14.1 to 1.14.3 (#497) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2f27c1df77..ba5b03c5bd 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/sirupsen/logrus v1.9.0 github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.8.0 - github.com/tidwall/gjson v1.14.1 + github.com/tidwall/gjson v1.14.3 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 google.golang.org/grpc v1.49.0 diff --git a/go.sum b/go.sum index 473733fa79..e6297e8132 100644 --- a/go.sum +++ b/go.sum @@ -306,8 +306,8 @@ github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PK github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= -github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= -github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= +github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= From f510e49d5d043ebbfb960d21816dab0c4feabc54 Mon Sep 17 00:00:00 2001 From: Aaron Batilo Date: Wed, 7 Sep 2022 03:48:14 -0600 Subject: [PATCH 219/351] Implement grpc-health-probe (#498) --- go.mod | 8 ++++---- go.sum | 23 ++++++++--------------- route/mock.go | 17 +++++++++++++++++ route/route.go | 16 ++++++++++++++++ route/route_test.go | 39 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 84 insertions(+), 19 deletions(-) create mode 100644 route/mock.go diff --git a/go.mod b/go.mod index ba5b03c5bd..82d01e15f5 100644 --- a/go.mod +++ b/go.mod @@ -27,8 +27,9 @@ require ( github.com/tidwall/gjson v1.14.3 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.11.0 + golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect google.golang.org/grpc v1.49.0 - google.golang.org/protobuf v1.28.0 + google.golang.org/protobuf v1.28.1 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -66,11 +67,10 @@ require ( github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect - golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect + golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect + google.golang.org/genproto v0.0.0-20220902135211-223410557253 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index e6297e8132..ef04f2a52b 100644 --- a/go.sum +++ b/go.sum @@ -64,7 +64,6 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -75,7 +74,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= @@ -158,7 +156,6 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -407,10 +404,9 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -471,16 +467,15 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -488,7 +483,6 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -610,8 +604,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= -google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220902135211-223410557253 h1:vXJMM8Shg7TGaYxZsQ++A/FOSlbDmDtWhS/o+3w/hj4= +google.golang.org/genproto v0.0.0-20220902135211-223410557253/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -631,7 +625,6 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -647,8 +640,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= diff --git a/route/mock.go b/route/mock.go new file mode 100644 index 0000000000..a2c8abd9fa --- /dev/null +++ b/route/mock.go @@ -0,0 +1,17 @@ +package route + +import "google.golang.org/grpc/health/grpc_health_v1" + +type MockGRPCHealthWatchServer struct { + grpc_health_v1.Health_WatchServer + sentMessages []*grpc_health_v1.HealthCheckResponse +} + +func (m *MockGRPCHealthWatchServer) Send(msg *grpc_health_v1.HealthCheckResponse) error { + m.sentMessages = append(m.sentMessages, msg) + return nil +} + +func (m *MockGRPCHealthWatchServer) GetSentMessages() []*grpc_health_v1.HealthCheckResponse { + return m.sentMessages +} diff --git a/route/route.go b/route/route.go index 8c5730ae55..5cfa633bf1 100644 --- a/route/route.go +++ b/route/route.go @@ -23,6 +23,7 @@ import ( "github.com/pelletier/go-toml/v2" "github.com/vmihailenco/msgpack/v4" "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "gopkg.in/yaml.v2" @@ -227,6 +228,7 @@ func (r *Router) LnS(incomingOrPeer string) { } r.grpcServer = grpc.NewServer(serverOpts...) collectortrace.RegisterTraceServiceServer(r.grpcServer, r) + grpc_health_v1.RegisterHealthServer(r.grpcServer, r) go r.grpcServer.Serve(l) } @@ -843,3 +845,17 @@ func (r *Router) lookupEnvironment(apiKey string) (string, error) { r.Logger.Debug().WithString("environment", authinfo.Environment.Name).Logf("Got environment") return authinfo.Environment.Name, nil } + +func (r *Router) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { + r.iopLogger.Debug().Logf("answered grpc_health_v1 check") + return &grpc_health_v1.HealthCheckResponse{ + Status: grpc_health_v1.HealthCheckResponse_SERVING, + }, nil +} + +func (r *Router) Watch(req *grpc_health_v1.HealthCheckRequest, server grpc_health_v1.Health_WatchServer) error { + r.iopLogger.Debug().Logf("serving grpc_health_v1 watch") + return server.Send(&grpc_health_v1.HealthCheckResponse{ + Status: grpc_health_v1.HealthCheckResponse_SERVING, + }) +} diff --git a/route/route_test.go b/route/route_test.go index 1dec93a6a6..cbfc6d9260 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -3,6 +3,7 @@ package route import ( "bytes" "compress/gzip" + "context" "errors" "fmt" "io" @@ -24,6 +25,7 @@ import ( "github.com/honeycombio/refinery/sharder" "github.com/klauspost/compress/zstd" "github.com/vmihailenco/msgpack/v4" + "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/metadata" ) @@ -518,3 +520,40 @@ func TestEnvironmentCache(t *testing.T) { } }) } + +func TestGRPCHealthProbeCheck(t *testing.T) { + router := &Router{ + Config: &config.MockConfig{}, + iopLogger: iopLogger{ + Logger: &logger.MockLogger{}, + incomingOrPeer: "incoming", + }, + } + + req := &grpc_health_v1.HealthCheckRequest{} + resp, err := router.Check(context.Background(), req) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, resp.Status) +} + +func TestGRPCHealthProbeWatch(t *testing.T) { + router := &Router{ + Config: &config.MockConfig{}, + iopLogger: iopLogger{ + Logger: &logger.MockLogger{}, + incomingOrPeer: "incoming", + }, + } + + mockServer := &MockGRPCHealthWatchServer{} + err := router.Watch(&grpc_health_v1.HealthCheckRequest{}, mockServer) + if err != nil { + t.Errorf(`Unexpected error: %s`, err) + } + assert.Equal(t, 1, len(mockServer.GetSentMessages())) + + sentMessage := mockServer.GetSentMessages()[0] + assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, sentMessage.Status) +} From e9da6eaa54a642288fc24f09e26b7d5dafcb774f Mon Sep 17 00:00:00 2001 From: Aaron Batilo Date: Wed, 7 Sep 2022 06:35:56 -0600 Subject: [PATCH 220/351] Make gRPC ServerParameters configurable (#499) --- config/config.go | 10 +++++++ config/config_test.go | 46 ++++++++++++++++++++++++++++++ config/file_config.go | 52 ++++++++++++++++++++++++++++++++++ config/mock.go | 65 +++++++++++++++++++++++++++++++++++++++++++ config_complete.toml | 49 ++++++++++++++++++++++++++++++++ route/route.go | 8 ++++-- 6 files changed, 227 insertions(+), 3 deletions(-) diff --git a/config/config.go b/config/config.go index 0c3c1457a6..4c35458424 100644 --- a/config/config.go +++ b/config/config.go @@ -147,4 +147,14 @@ type Config interface { // GetQueryAuthToken returns the token that must be used to access the /query endpoints GetQueryAuthToken() string + + GetGRPCMaxConnectionIdle() time.Duration + + GetGRPCMaxConnectionAge() time.Duration + + GetGRPCMaxConnectionAgeGrace() time.Duration + + GetGRPCTime() time.Duration + + GetGRPCTimeout() time.Duration } diff --git a/config/config_test.go b/config/config_test.go index b5a1ea4b71..2baf79f408 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -637,3 +637,49 @@ func TestQueryAuthToken(t *testing.T) { assert.Equal(t, "MySeekretToken", c.GetQueryAuthToken()) } + +func TestGRPCServerParameters(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + assert.NoError(t, err) + defer os.RemoveAll(tmpDir) + + configFile, err := ioutil.TempFile(tmpDir, "*.toml") + assert.NoError(t, err) + + _, err = configFile.Write([]byte(` + [GRPCServerParameters] + MaxConnectionIdle = "1m" + MaxConnectionAge = "2m" + MaxConnectionAgeGrace = "3m" + Time = "4m" + Timeout = "5m" + + [InMemCollector] + CacheCapacity=1000 + + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + + [HoneycombLogger] + LoggerHoneycombAPI="http://honeycomb.io" + LoggerAPIKey="1234" + LoggerDataset="loggerDataset" + `)) + assert.NoError(t, err) + configFile.Close() + + rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + assert.NoError(t, err) + + c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) + assert.NoError(t, err) + + assert.Equal(t, 1*time.Minute, c.GetGRPCMaxConnectionIdle()) + assert.Equal(t, 2*time.Minute, c.GetGRPCMaxConnectionAge()) + assert.Equal(t, 3*time.Minute, c.GetGRPCMaxConnectionAgeGrace()) + assert.Equal(t, 4*time.Minute, c.GetGRPCTime()) + assert.Equal(t, 5*time.Minute, c.GetGRPCTimeout()) +} diff --git a/config/file_config.go b/config/file_config.go index 8488c8afcb..dc59da8c9c 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -51,6 +51,7 @@ type configContents struct { EnvironmentCacheTTL time.Duration DatasetPrefix string QueryAuthToken string + GRPCServerParameters GRPCServerParameters } type InMemoryCollectorCacheCapacity struct { @@ -94,6 +95,17 @@ type PeerManagementConfig struct { RedisIdentifier string } +// GRPCServerParameters allow you to configure the GRPC ServerParameters used +// by refinery's own GRPC server: +// https://pkg.go.dev/google.golang.org/grpc/keepalive#ServerParameters +type GRPCServerParameters struct { + MaxConnectionIdle time.Duration + MaxConnectionAge time.Duration + MaxConnectionAgeGrace time.Duration + Time time.Duration + Timeout time.Duration +} + // NewConfig creates a new config struct func NewConfig(config, rules string, errorCallback func(error)) (Config, error) { c := viper.New() @@ -130,6 +142,11 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("HoneycombLogger.LoggerSamplerThroughput", 5) c.SetDefault("AddHostMetadataToTrace", false) c.SetDefault("EnvironmentCacheTTL", time.Hour) + c.SetDefault("GRPCServerParameters.MaxConnectionIdle", 1*time.Minute) + c.SetDefault("GRPCServerParameters.MaxConnectionAge", time.Duration(0)) + c.SetDefault("GRPCServerParameters.MaxConnectionAgeGrace", time.Duration(0)) + c.SetDefault("GRPCServerParameters.Time", 10*time.Second) + c.SetDefault("GRPCServerParameters.Timeout", 2*time.Second) c.SetConfigFile(config) err := c.ReadInConfig() @@ -791,3 +808,38 @@ func (f *fileConfig) GetQueryAuthToken() string { return f.conf.QueryAuthToken } + +func (f *fileConfig) GetGRPCMaxConnectionIdle() time.Duration { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.GRPCServerParameters.MaxConnectionIdle +} + +func (f *fileConfig) GetGRPCMaxConnectionAge() time.Duration { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.GRPCServerParameters.MaxConnectionAge +} + +func (f *fileConfig) GetGRPCMaxConnectionAgeGrace() time.Duration { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.GRPCServerParameters.MaxConnectionAgeGrace +} + +func (f *fileConfig) GetGRPCTime() time.Duration { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.GRPCServerParameters.Time +} + +func (f *fileConfig) GetGRPCTimeout() time.Duration { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.GRPCServerParameters.Timeout +} diff --git a/config/mock.go b/config/mock.go index 1b53b38552..f6a621f1fb 100644 --- a/config/mock.go +++ b/config/mock.go @@ -74,6 +74,11 @@ type MockConfig struct { EnvironmentCacheTTL time.Duration DatasetPrefix string QueryAuthToken string + GRPCMaxConnectionIdle time.Duration + GRPCMaxConnectionAge time.Duration + GRPCMaxConnectionAgeGrace time.Duration + GRPCTime time.Duration + GRPCTimeout time.Duration Mux sync.RWMutex } @@ -86,77 +91,90 @@ func (m *MockConfig) ReloadConfig() { callback() } } + func (m *MockConfig) RegisterReloadCallback(callback func()) { m.Mux.Lock() m.Callbacks = append(m.Callbacks, callback) m.Mux.Unlock() } + func (m *MockConfig) GetAPIKeys() ([]string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetAPIKeysVal, m.GetAPIKeysErr } + func (m *MockConfig) GetCollectorType() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetCollectorTypeVal, m.GetCollectorTypeErr } + func (m *MockConfig) GetInMemCollectorCacheCapacity() (InMemoryCollectorCacheCapacity, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetInMemoryCollectorCacheCapacityVal, m.GetInMemoryCollectorCacheCapacityErr } + func (m *MockConfig) GetHoneycombAPI() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetHoneycombAPIVal, m.GetHoneycombAPIErr } + func (m *MockConfig) GetListenAddr() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetListenAddrVal, m.GetListenAddrErr } + func (m *MockConfig) GetPeerListenAddr() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetPeerListenAddrVal, m.GetPeerListenAddrErr } + func (m *MockConfig) GetCompressPeerCommunication() bool { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetCompressPeerCommunicationsVal } + func (m *MockConfig) GetGRPCListenAddr() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetGRPCListenAddrVal, m.GetGRPCListenAddrErr } + func (m *MockConfig) GetLoggerType() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetLoggerTypeVal, m.GetLoggerTypeErr } + func (m *MockConfig) GetHoneycombLoggerConfig() (HoneycombLoggerConfig, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetHoneycombLoggerConfigVal, m.GetHoneycombLoggerConfigErr } + func (m *MockConfig) GetLoggingLevel() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetLoggingLevelVal, m.GetLoggingLevelErr } + func (m *MockConfig) GetOtherConfig(name string, iface interface{}) error { m.Mux.RLock() defer m.Mux.RUnlock() @@ -167,66 +185,77 @@ func (m *MockConfig) GetOtherConfig(name string, iface interface{}) error { } return m.GetOtherConfigErr } + func (m *MockConfig) GetPeers() ([]string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetPeersVal, m.GetPeersErr } + func (m *MockConfig) GetRedisHost() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetRedisHostVal, m.GetRedisHostErr } + func (m *MockConfig) GetRedisUsername() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetRedisUsernameVal, m.GetRedisUsernameErr } + func (m *MockConfig) GetRedisPassword() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetRedisPasswordVal, m.GetRedisPasswordErr } + func (m *MockConfig) GetUseTLS() (bool, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetUseTLSVal, m.GetUseTLSErr } + func (m *MockConfig) GetUseTLSInsecure() (bool, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetUseTLSInsecureVal, m.GetUseTLSInsecureErr } + func (m *MockConfig) GetMetricsType() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetMetricsTypeVal, m.GetMetricsTypeErr } + func (m *MockConfig) GetHoneycombMetricsConfig() (HoneycombMetricsConfig, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetHoneycombMetricsConfigVal, m.GetHoneycombMetricsConfigErr } + func (m *MockConfig) GetPrometheusMetricsConfig() (PrometheusMetricsConfig, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetPrometheusMetricsConfigVal, m.GetPrometheusMetricsConfigErr } + func (m *MockConfig) GetSendDelay() (time.Duration, error) { m.Mux.RLock() defer m.Mux.RUnlock() return m.GetSendDelayVal, m.GetSendDelayErr } + func (m *MockConfig) GetTraceTimeout() (time.Duration, error) { m.Mux.RLock() defer m.Mux.RUnlock() @@ -264,6 +293,7 @@ func (m *MockConfig) GetUpstreamBufferSize() int { return m.GetUpstreamBufferSizeVal } + func (m *MockConfig) GetPeerBufferSize() int { m.Mux.RLock() defer m.Mux.RUnlock() @@ -354,3 +384,38 @@ func (f *MockConfig) GetQueryAuthToken() string { return f.QueryAuthToken } + +func (f *MockConfig) GetGRPCMaxConnectionIdle() time.Duration { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.GRPCMaxConnectionIdle +} + +func (f *MockConfig) GetGRPCMaxConnectionAge() time.Duration { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.GRPCMaxConnectionAge +} + +func (f *MockConfig) GetGRPCMaxConnectionAgeGrace() time.Duration { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.GRPCMaxConnectionAgeGrace +} + +func (f *MockConfig) GetGRPCTime() time.Duration { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.GRPCTime +} + +func (f *MockConfig) GetGRPCTimeout() time.Duration { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.GRPCTimeout +} diff --git a/config_complete.toml b/config_complete.toml index d62d8dd95a..a4f08fa870 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -323,3 +323,52 @@ MetricsReportingInterval = 3 # listener. # Not eligible for live reload. # MetricsListenAddr = "localhost:2112" + +########################### +## gRPC ServerParameters ## +########################### + +# Reflects: https://pkg.go.dev/google.golang.org/grpc/keepalive#ServerParameters + +[GRPCServerParameters] + +# MaxConnectionIdle is a duration for the amount of time after which an +# idle connection would be closed by sending a GoAway. Idleness duration is +# defined since the most recent time the number of outstanding RPCs became +# zero or the connection establishment. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 +# Not eligible for live reload. +# MaxConnectionIdle = "1m" + +# MaxConnectionAge is a duration for the maximum amount of time a +# connection may exist before it will be closed by sending a GoAway. A +# random jitter of +/-10% will be added to MaxConnectionAge to spread out +# connection storms. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 +# Not eligible for live reload. +# MaxConnectionAge = "0s" + +# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after +# which the connection will be forcibly closed. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 +# Not eligible for live reload. +# MaxConnectionAgeGrace = "0s" + +# After a duration of this time if the server doesn't see any activity it +# pings the client to see if the transport is still alive. +# If set below 1s, a minimum value of 1s will be used instead. +# 0s sets duration to 2 hours which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 +# Not eligible for live reload. +# Time = "10s" + +# After having pinged for keepalive check, the server waits for a duration +# of Timeout and if no activity is seen even after that the connection is +# closed. +# 0s sets duration to 20 seconds which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 +# Not eligible for live reload. +# Timeout = "2s" diff --git a/route/route.go b/route/route.go index 5cfa633bf1..18af0b03a8 100644 --- a/route/route.go +++ b/route/route.go @@ -221,9 +221,11 @@ func (r *Router) LnS(incomingOrPeer string) { grpc.MaxSendMsgSize(GRPCMessageSizeMax), // default is math.MaxInt32 grpc.MaxRecvMsgSize(GRPCMessageSizeMax), // default is 4MB grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: 10 * time.Second, - Timeout: 2 * time.Second, - MaxConnectionIdle: time.Minute, + MaxConnectionIdle: r.Config.GetGRPCMaxConnectionIdle(), + MaxConnectionAge: r.Config.GetGRPCMaxConnectionAge(), + MaxConnectionAgeGrace: r.Config.GetGRPCMaxConnectionAgeGrace(), + Time: r.Config.GetGRPCTime(), + Timeout: r.Config.GetGRPCTimeout(), }), } r.grpcServer = grpc.NewServer(serverOpts...) From 1e52617b2a9ed55d9251975986d25ce23b0a26ca Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 7 Sep 2022 08:47:29 -0400 Subject: [PATCH 221/351] Fix sample rate for late spans (#504) --- collect/collect.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index e6d23f3c0c..7fdff1b474 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -358,7 +358,7 @@ func (i *InMemCollector) processSpan(sp *types.Span) { } // if the trace we got back from the cache has already been sent, deal with the // span. - if trace.Sent == true { + if trace.Sent { i.dealWithSentTrace(trace.KeepSample, trace.SampleRate, sp) } @@ -394,6 +394,9 @@ func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, sp *types } if keep { i.Logger.Debug().WithField("trace_id", sp.TraceID).Logf("Sending span because of previous decision to send trace") + if sp.SampleRate < 1 { + sp.SampleRate = 1 + } sp.SampleRate *= sampleRate i.Transmission.EnqueueSpan(sp) return @@ -414,7 +417,7 @@ func isRootSpan(sp *types.Span) bool { } func (i *InMemCollector) send(trace *types.Trace) { - if trace.Sent == true { + if trace.Sent { // someone else already sent this so we shouldn't also send it. This happens // when two timers race and two signals for the same trace are sent down the // toSend channel @@ -426,7 +429,7 @@ func (i *InMemCollector) send(trace *types.Trace) { } trace.Sent = true - traceDur := time.Now().Sub(trace.StartTime) + traceDur := time.Since(trace.StartTime) i.Metrics.Histogram("trace_duration_ms", float64(traceDur.Milliseconds())) i.Metrics.Histogram("trace_span_count", float64(len(trace.GetSpans()))) if trace.HasRootSpan { From 24402b7c5f517073d88694e44e7ca9c6acd8f5c7 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 7 Sep 2022 09:52:52 -0400 Subject: [PATCH 222/351] Optionally record why a sample decision was made (#503) ## Which problem is this PR solving? - Fixes #473 ## Short description of the changes - Adds a config flag, `AddRuleReasonToTrace` that adds a new field to traces called `meta.refinery.reason`, which is a string field that contains information about the sampler(s) that made a trace decision. If it was a rule-based sampler, it includes the rule name. - Also removes unnecessary build flags that were causing tests not to run. - Updates README and config file --- README.md | 4 +++ app/app_test.go | 10 +++----- collect/cache/cache_test.go | 2 -- collect/collect.go | 19 +++++++++----- collect/collect_benchmark_test.go | 2 -- collect/collect_test.go | 8 +++--- config/config.go | 2 ++ config/config_test.go | 3 --- config/config_test_reload_error_test.go | 1 - config/file_config.go | 9 +++++++ config/mock.go | 8 ++++++ config_complete.toml | 4 +++ internal/peer/file_test.go | 2 -- internal/peer/peers_test.go | 3 --- logger/logger_test.go | 2 -- metrics/metrics_test.go | 2 -- metrics/prometheus_test.go | 2 -- route/errors_test.go | 2 -- sample/deterministic.go | 6 ++--- sample/deterministic_test.go | 5 ++-- sample/dynamic.go | 4 +-- sample/dynamic_ema.go | 4 +-- sample/dynamic_ema_test.go | 2 -- sample/dynamic_test.go | 2 -- sample/rules.go | 17 +++++++++---- sample/rules_test.go | 34 +++++++++++++++++++++---- sample/sample.go | 2 +- sample/sample_test.go | 2 -- sample/totalthroughput.go | 6 ++--- sample/totalthroughput_test.go | 2 -- sample/trace_key_test.go | 2 -- sharder/deterministic_test.go | 3 --- sharder/sharder_test.go | 2 -- transmit/transmit_test.go | 2 -- 34 files changed, 100 insertions(+), 80 deletions(-) diff --git a/README.md b/README.md index eed8dbbcef..e7c8f4b7d6 100644 --- a/README.md +++ b/README.md @@ -151,6 +151,10 @@ The `/query` endpoints are protected and can be enabled by specifying `QueryAuth - `$FORMAT` can be one of `json`, `yaml`, or `toml`. - `$DATASET` is the name of the dataset you want to check. +### Sampling + +Refinery can send telemetry that includes information that can help debug the sampling decisions that are made. To enable it, in the config file, set `AddRuleReasonToTrace` to `true`. This will cause traces that are sent to Honeycomb to include a field `meta.refinery.reason`, which will contain text indicating which rule was evaluated that caused the trace to be included. + ## Restarts Refinery does not yet buffer traces or sampling decisions to disk. When you restart the process all in-flight traces will be flushed (sent upstream to Honeycomb), but you will lose the record of past trace decisions. When started back up, it will start with a clean slate. diff --git a/app/app_test.go b/app/app_test.go index 5c464ca5f6..b2ff8b2379 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -1,5 +1,3 @@ -// +build all race - package app import ( @@ -262,8 +260,8 @@ func TestAppIntegrationWithNonLegacyKey(t *testing.T) { var out bytes.Buffer a, graph := newStartedApp(t, &transmission.WriterSender{W: &out}, 10500, nil, false) - a.IncomingRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) {return "test", nil}) - a.PeerRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) {return "test", nil}) + a.IncomingRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) { return "test", nil }) + a.PeerRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) { return "test", nil }) // Send a root span, it should be sent in short order. req := httptest.NewRequest( @@ -561,8 +559,8 @@ func TestEventsEndpointWithNonLegacyKey(t *testing.T) { basePort := 15000 + (i * 2) senders[i] = &transmission.MockSender{} app, graph := newStartedApp(t, senders[i], basePort, peers, false) - app.IncomingRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) { return "test", nil}) - app.PeerRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) { return "test", nil}) + app.IncomingRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) { return "test", nil }) + app.PeerRouter.SetEnvironmentCache(time.Second, func(s string) (string, error) { return "test", nil }) apps[i] = app defer startstop.Stop(graph.Objects(), nil) diff --git a/collect/cache/cache_test.go b/collect/cache/cache_test.go index bc14b14d6f..d385aa0259 100644 --- a/collect/cache/cache_test.go +++ b/collect/cache/cache_test.go @@ -1,5 +1,3 @@ -// +build all race - package cache import ( diff --git a/collect/collect.go b/collect/collect.go index 7fdff1b474..fbdece7540 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -343,12 +343,13 @@ func (i *InMemCollector) processSpan(sp *types.Span) { } trace = &types.Trace{ - APIHost: sp.APIHost, - APIKey: sp.APIKey, - Dataset: sp.Dataset, - TraceID: sp.TraceID, - StartTime: time.Now(), - SendBy: time.Now().Add(timeout), + APIHost: sp.APIHost, + APIKey: sp.APIKey, + Dataset: sp.Dataset, + TraceID: sp.TraceID, + StartTime: time.Now(), + SendBy: time.Now().Add(timeout), + SampleRate: sp.SampleRate, // if it had a sample rate, we want to keep it } // push this into the cache and if we eject an unsent trace, send it ASAP ejectedTrace := i.cache.Set(trace) @@ -459,9 +460,10 @@ func (i *InMemCollector) send(trace *types.Trace) { } // make sampling decision and update the trace - rate, shouldSend := sampler.GetSampleRate(trace) + rate, shouldSend, reason := sampler.GetSampleRate(trace) trace.SampleRate = rate trace.KeepSample = shouldSend + logFields["reason"] = reason // record this decision in the sent record LRU for future spans sentRecord := traceSentRecord{ @@ -484,6 +486,9 @@ func (i *InMemCollector) send(trace *types.Trace) { } i.Logger.Info().WithFields(logFields).Logf("Sending trace") for _, sp := range trace.GetSpans() { + if i.Config.GetAddRuleReasonToTrace() { + sp.Data["meta.refinery.reason"] = reason + } if sp.SampleRate < 1 { sp.SampleRate = 1 } diff --git a/collect/collect_benchmark_test.go b/collect/collect_benchmark_test.go index 5fbc753fec..fda9ea7014 100644 --- a/collect/collect_benchmark_test.go +++ b/collect/collect_benchmark_test.go @@ -1,5 +1,3 @@ -// +build all race - package collect import ( diff --git a/collect/collect_test.go b/collect/collect_test.go index 968ba002f3..25fdc3d851 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -1,5 +1,3 @@ -// +build all race - package collect import ( @@ -210,12 +208,12 @@ func TestDryRunMode(t *testing.T) { var traceID2 = "def456" var traceID3 = "ghi789" // sampling decisions based on trace ID - _, keepTraceID1 := sampler.GetSampleRate(&types.Trace{TraceID: traceID1}) + _, keepTraceID1, _ := sampler.GetSampleRate(&types.Trace{TraceID: traceID1}) // would be dropped if dry run mode was not enabled assert.False(t, keepTraceID1) - _, keepTraceID2 := sampler.GetSampleRate(&types.Trace{TraceID: traceID2}) + _, keepTraceID2, _ := sampler.GetSampleRate(&types.Trace{TraceID: traceID2}) assert.True(t, keepTraceID2) - _, keepTraceID3 := sampler.GetSampleRate(&types.Trace{TraceID: traceID3}) + _, keepTraceID3, _ := sampler.GetSampleRate(&types.Trace{TraceID: traceID3}) // would be dropped if dry run mode was not enabled assert.False(t, keepTraceID3) diff --git a/config/config.go b/config/config.go index 4c35458424..60f8989e79 100644 --- a/config/config.go +++ b/config/config.go @@ -141,6 +141,8 @@ type Config interface { GetAddHostMetadataToTrace() bool + GetAddRuleReasonToTrace() bool + GetEnvironmentCacheTTL() time.Duration GetDatasetPrefix() string diff --git a/config/config_test.go b/config/config_test.go index 2baf79f408..59c87c14f5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,6 +1,3 @@ -//go:build all || race -// +build all race - package config import ( diff --git a/config/config_test_reload_error_test.go b/config/config_test_reload_error_test.go index 0ade1fa871..d86574cdb8 100644 --- a/config/config_test_reload_error_test.go +++ b/config/config_test_reload_error_test.go @@ -1,5 +1,4 @@ //go:build all || !race -// +build all !race package config diff --git a/config/file_config.go b/config/file_config.go index dc59da8c9c..8853830327 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -48,6 +48,7 @@ type configContents struct { PeerManagement PeerManagementConfig `validate:"required"` InMemCollector InMemoryCollectorCacheCapacity `validate:"required"` AddHostMetadataToTrace bool + AddRuleReasonToTrace bool EnvironmentCacheTTL time.Duration DatasetPrefix string QueryAuthToken string @@ -141,6 +142,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("HoneycombLogger.LoggerSamplerEnabled", false) c.SetDefault("HoneycombLogger.LoggerSamplerThroughput", 5) c.SetDefault("AddHostMetadataToTrace", false) + c.SetDefault("AddRuleReasonToTrace", false) c.SetDefault("EnvironmentCacheTTL", time.Hour) c.SetDefault("GRPCServerParameters.MaxConnectionIdle", 1*time.Minute) c.SetDefault("GRPCServerParameters.MaxConnectionAge", time.Duration(0)) @@ -788,6 +790,13 @@ func (f *fileConfig) GetAddHostMetadataToTrace() bool { return f.conf.AddHostMetadataToTrace } +func (f *fileConfig) GetAddRuleReasonToTrace() bool { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.AddRuleReasonToTrace +} + func (f *fileConfig) GetEnvironmentCacheTTL() time.Duration { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index f6a621f1fb..aa90acca43 100644 --- a/config/mock.go +++ b/config/mock.go @@ -71,6 +71,7 @@ type MockConfig struct { DryRun bool DryRunFieldName string AddHostMetadataToTrace bool + AddRuleReasonToTrace bool EnvironmentCacheTTL time.Duration DatasetPrefix string QueryAuthToken string @@ -364,6 +365,13 @@ func (m *MockConfig) GetAddHostMetadataToTrace() bool { return m.AddHostMetadataToTrace } +func (m *MockConfig) GetAddRuleReasonToTrace() bool { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.AddRuleReasonToTrace +} + func (f *MockConfig) GetEnvironmentCacheTTL() time.Duration { f.Mux.RLock() defer f.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index a4f08fa870..2fd4c02174 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -113,6 +113,10 @@ EnvironmentCacheTTL = "1h" # If left unspecified, the /query endpoints are inaccessible. # QueryAuthToken = "some-random-value" +# AddRuleReasonToTrace causes traces that are sent to Honeycomb to include the field `meta.refinery.reason`. +# This field contains text indicating which rule was evaluated that caused the trace to be included. +# AddRuleReasonToTrace = true + ############################ ## Implementation Choices ## ############################ diff --git a/internal/peer/file_test.go b/internal/peer/file_test.go index b8e453087a..f913cbb385 100644 --- a/internal/peer/file_test.go +++ b/internal/peer/file_test.go @@ -1,5 +1,3 @@ -// +build all race - package peer import ( diff --git a/internal/peer/peers_test.go b/internal/peer/peers_test.go index 067ae47957..b411477981 100644 --- a/internal/peer/peers_test.go +++ b/internal/peer/peers_test.go @@ -1,6 +1,3 @@ -//go:build all || race -// +build all race - package peer import ( diff --git a/logger/logger_test.go b/logger/logger_test.go index b0ed975630..f4241db311 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -1,5 +1,3 @@ -// +build all race - package logger import ( diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go index 0d4b6f0765..1abe097a36 100644 --- a/metrics/metrics_test.go +++ b/metrics/metrics_test.go @@ -1,3 +1 @@ -// +build all race - package metrics diff --git a/metrics/prometheus_test.go b/metrics/prometheus_test.go index 539f926a41..6ff27073e7 100644 --- a/metrics/prometheus_test.go +++ b/metrics/prometheus_test.go @@ -1,5 +1,3 @@ -// +build all race - package metrics import ( diff --git a/route/errors_test.go b/route/errors_test.go index 4351dec469..7ceaba5a69 100644 --- a/route/errors_test.go +++ b/route/errors_test.go @@ -1,5 +1,3 @@ -// +build all race - package route import ( diff --git a/sample/deterministic.go b/sample/deterministic.go index 57f4c761c7..fce06adebe 100644 --- a/sample/deterministic.go +++ b/sample/deterministic.go @@ -35,11 +35,11 @@ func (d *DeterministicSampler) Start() error { return nil } -func (d *DeterministicSampler) GetSampleRate(trace *types.Trace) (rate uint, keep bool) { +func (d *DeterministicSampler) GetSampleRate(trace *types.Trace) (rate uint, keep bool, reason string) { if d.sampleRate <= 1 { - return 1, true + return 1, true, "deterministic/always" } sum := sha1.Sum([]byte(trace.TraceID + shardingSalt)) v := binary.BigEndian.Uint32(sum[:4]) - return uint(d.sampleRate), v <= d.upperBound + return uint(d.sampleRate), v <= d.upperBound, "deterministic/chance" } diff --git a/sample/deterministic_test.go b/sample/deterministic_test.go index 01d1f4af53..cd9e129ace 100644 --- a/sample/deterministic_test.go +++ b/sample/deterministic_test.go @@ -1,5 +1,3 @@ -// +build all race - package sample import ( @@ -52,9 +50,10 @@ func TestGetSampleRate(t *testing.T) { ds.Start() for i, tst := range tsts { - rate, keep := ds.GetSampleRate(tst.trace) + rate, keep, reason := ds.GetSampleRate(tst.trace) assert.Equal(t, uint(10), rate, "sample rate should be fixed") assert.Equal(t, tst.sampled, keep, "%d: trace ID %s should be %v", i, tst.trace.TraceID, tst.sampled) + assert.Equal(t, "deterministic/chance", reason) } } diff --git a/sample/dynamic.go b/sample/dynamic.go index eda9e3d3ec..f543ac36c0 100644 --- a/sample/dynamic.go +++ b/sample/dynamic.go @@ -50,7 +50,7 @@ func (d *DynamicSampler) Start() error { return nil } -func (d *DynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) { +func (d *DynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool, string) { key := d.key.buildAndAdd(trace) rate := d.dynsampler.GetSampleRate(key) if rate < 1 { // protect against dynsampler being broken even though it shouldn't be @@ -69,5 +69,5 @@ func (d *DynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) { d.Metrics.Increment("dynsampler_num_dropped") } d.Metrics.Histogram("dynsampler_sample_rate", float64(rate)) - return uint(rate), shouldKeep + return uint(rate), shouldKeep, "dynamic/" + key } diff --git a/sample/dynamic_ema.go b/sample/dynamic_ema.go index 1fd44d8b21..3483e1a94b 100644 --- a/sample/dynamic_ema.go +++ b/sample/dynamic_ema.go @@ -62,7 +62,7 @@ func (d *EMADynamicSampler) Start() error { return nil } -func (d *EMADynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) { +func (d *EMADynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool, string) { key := d.key.buildAndAdd(trace) rate := d.dynsampler.GetSampleRate(key) if rate < 1 { // protect against dynsampler being broken even though it shouldn't be @@ -81,5 +81,5 @@ func (d *EMADynamicSampler) GetSampleRate(trace *types.Trace) (uint, bool) { d.Metrics.Increment("dynsampler_num_dropped") } d.Metrics.Histogram("dynsampler_sample_rate", float64(rate)) - return uint(rate), shouldKeep + return uint(rate), shouldKeep, "emadynamic/" + key } diff --git a/sample/dynamic_ema_test.go b/sample/dynamic_ema_test.go index c295938038..6b36e56a96 100644 --- a/sample/dynamic_ema_test.go +++ b/sample/dynamic_ema_test.go @@ -1,5 +1,3 @@ -// +build all race - package sample import ( diff --git a/sample/dynamic_test.go b/sample/dynamic_test.go index f472d234d3..e2405f4c75 100644 --- a/sample/dynamic_test.go +++ b/sample/dynamic_test.go @@ -1,5 +1,3 @@ -// +build all race - package sample import ( diff --git a/sample/rules.go b/sample/rules.go index 7aa1664e77..e1219a2621 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -59,30 +59,35 @@ func (s *RulesBasedSampler) Start() error { return nil } -func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep bool) { +func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep bool, reason string) { logger := s.Logger.Debug().WithFields(map[string]interface{}{ "trace_id": trace.TraceID, }) for _, rule := range s.Config.Rule { var matched bool + var reason string switch rule.Scope { case "span": matched = ruleMatchesSpanInTrace(trace, rule, s.Config.CheckNestedFields) + reason = "rules/span/" case "trace", "": matched = ruleMatchesTrace(trace, rule, s.Config.CheckNestedFields) + reason = "rules/trace/" default: logger.WithFields(map[string]interface{}{ "rule_name": rule.Name, "scope": rule.Scope, }).Logf("invalid scope %s given for rule: %s", rule.Scope, rule.Name) matched = true + reason = "rules/invalid scope/" } if matched { var rate uint var keep bool + var samplerReason string if rule.Sampler != nil { var sampler Sampler @@ -91,12 +96,14 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b logger.WithFields(map[string]interface{}{ "rule_name": rule.Name, }).Logf("could not find downstream sampler for rule: %s", rule.Name) - return 1, true + return 1, true, reason + "bad_rule:" + rule.Name } - rate, keep = sampler.GetSampleRate(trace) + rate, keep, samplerReason = sampler.GetSampleRate(trace) + reason += rule.Name + ":" + samplerReason } else { rate = uint(rule.SampleRate) keep = !rule.Drop && rule.SampleRate > 0 && rand.Intn(rule.SampleRate) == 0 + reason += rule.Name } s.Metrics.Histogram("rulessampler_sample_rate", float64(rule.SampleRate)) @@ -110,11 +117,11 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b "keep": keep, "drop_rule": rule.Drop, }).Logf("got sample rate and decision") - return rate, keep + return rate, keep, reason } } - return 1, true + return 1, true, "no rule matched" } func ruleMatchesTrace(t *types.Trace, rule *config.RulesBasedSamplerRule, checkNestedFields bool) bool { diff --git a/sample/rules_test.go b/sample/rules_test.go index 2f7183f1bb..3957844790 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -15,6 +15,7 @@ type TestRulesData struct { Spans []*types.Span ExpectedRate uint ExpectedKeep bool + ExpectedName string } func TestRules(t *testing.T) { @@ -162,6 +163,7 @@ func TestRules(t *testing.T) { }, ExpectedKeep: true, ExpectedRate: 10, + ExpectedName: "fallback", }, { Rules: &config.RulesBasedSamplerConfig{ @@ -298,6 +300,7 @@ func TestRules(t *testing.T) { ExpectedKeep: true, // the trace does not match all the rules so we expect the default sample rate ExpectedRate: 1, + ExpectedName: "no rule matched", }, { Rules: &config.RulesBasedSamplerConfig{ @@ -508,9 +511,14 @@ func TestRules(t *testing.T) { trace.AddSpan(span) } - rate, keep := sampler.GetSampleRate(trace) + rate, keep, reason := sampler.GetSampleRate(trace) assert.Equal(t, d.ExpectedRate, rate, d.Rules) + name := d.ExpectedName + if name == "" { + name = d.Rules.Rule[0].Name + } + assert.Contains(t, reason, name) // we can only test when we don't expect to keep the trace if !d.ExpectedKeep { @@ -640,6 +648,7 @@ func TestRulesWithNestedFields(t *testing.T) { }, ExpectedKeep: true, ExpectedRate: 1, + ExpectedName: "no rule matched", }, } @@ -656,9 +665,14 @@ func TestRulesWithNestedFields(t *testing.T) { trace.AddSpan(span) } - rate, keep := sampler.GetSampleRate(trace) + rate, keep, reason := sampler.GetSampleRate(trace) assert.Equal(t, d.ExpectedRate, rate, d.Rules) + name := d.ExpectedName + if name == "" { + name = d.Rules.Rule[0].Name + } + assert.Contains(t, reason, name) // we can only test when we don't expect to keep the trace if !d.ExpectedKeep { @@ -729,9 +743,14 @@ func TestRulesWithDynamicSampler(t *testing.T) { } sampler.Start() - rate, keep := sampler.GetSampleRate(trace) + rate, keep, reason := sampler.GetSampleRate(trace) assert.Equal(t, d.ExpectedRate, rate, d.Rules) + name := d.ExpectedName + if name == "" { + name = d.Rules.Rule[0].Name + } + assert.Contains(t, reason, name) // we can only test when we don't expect to keep the trace if !d.ExpectedKeep { @@ -812,9 +831,14 @@ func TestRulesWithEMADynamicSampler(t *testing.T) { } sampler.Start() - rate, keep := sampler.GetSampleRate(trace) + rate, keep, reason := sampler.GetSampleRate(trace) assert.Equal(t, d.ExpectedRate, rate, d.Rules) + name := d.ExpectedName + if name == "" { + name = d.Rules.Rule[0].Name + } + assert.Contains(t, reason, name) // we can only test when we don't expect to keep the trace if !d.ExpectedKeep { @@ -929,7 +953,7 @@ func TestRuleMatchesSpanMatchingSpan(t *testing.T) { } sampler.Start() - rate, keep := sampler.GetSampleRate(trace) + rate, keep, _ := sampler.GetSampleRate(trace) assert.Equal(t, uint(1), rate, rate) if scope == "span" { diff --git a/sample/sample.go b/sample/sample.go index 70e1024dd3..5b0031ab70 100644 --- a/sample/sample.go +++ b/sample/sample.go @@ -11,7 +11,7 @@ import ( ) type Sampler interface { - GetSampleRate(trace *types.Trace) (rate uint, keep bool) + GetSampleRate(trace *types.Trace) (rate uint, keep bool, reason string) Start() error } diff --git a/sample/sample_test.go b/sample/sample_test.go index aa74161837..e06f70aeb7 100644 --- a/sample/sample_test.go +++ b/sample/sample_test.go @@ -1,5 +1,3 @@ -// +build all race - package sample import ( diff --git a/sample/totalthroughput.go b/sample/totalthroughput.go index d31ca68b81..53ac306412 100644 --- a/sample/totalthroughput.go +++ b/sample/totalthroughput.go @@ -45,7 +45,7 @@ func (d *TotalThroughputSampler) Start() error { } d.dynsampler.Start() - // Register stastics this package will produce + // Register statistics this package will produce d.Metrics.Register("dynsampler_num_dropped", "counter") d.Metrics.Register("dynsampler_num_kept", "counter") d.Metrics.Register("dynsampler_sample_rate", "histogram") @@ -53,7 +53,7 @@ func (d *TotalThroughputSampler) Start() error { return nil } -func (d *TotalThroughputSampler) GetSampleRate(trace *types.Trace) (uint, bool) { +func (d *TotalThroughputSampler) GetSampleRate(trace *types.Trace) (uint, bool, string) { key := d.key.buildAndAdd(trace) rate := d.dynsampler.GetSampleRate(key) if rate < 1 { // protect against dynsampler being broken even though it shouldn't be @@ -72,5 +72,5 @@ func (d *TotalThroughputSampler) GetSampleRate(trace *types.Trace) (uint, bool) d.Metrics.Increment("dynsampler_num_dropped") } d.Metrics.Histogram("dynsampler_sample_rate", float64(rate)) - return uint(rate), shouldKeep + return uint(rate), shouldKeep, "totalthroughput/" + key } diff --git a/sample/totalthroughput_test.go b/sample/totalthroughput_test.go index edef7d42a0..2d885bb799 100644 --- a/sample/totalthroughput_test.go +++ b/sample/totalthroughput_test.go @@ -1,5 +1,3 @@ -// +build all race - package sample import ( diff --git a/sample/trace_key_test.go b/sample/trace_key_test.go index e074b8fce8..c6ce600004 100644 --- a/sample/trace_key_test.go +++ b/sample/trace_key_test.go @@ -1,5 +1,3 @@ -// +build all race - package sample import ( diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go index 9fc85839c7..ab99d1133f 100644 --- a/sharder/deterministic_test.go +++ b/sharder/deterministic_test.go @@ -1,6 +1,3 @@ -//go:build all || race -// +build all race - package sharder import ( diff --git a/sharder/sharder_test.go b/sharder/sharder_test.go index f2a5fde6ee..d286617db2 100644 --- a/sharder/sharder_test.go +++ b/sharder/sharder_test.go @@ -1,3 +1 @@ -// +build all race - package sharder diff --git a/transmit/transmit_test.go b/transmit/transmit_test.go index efb0a954b0..ba18895375 100644 --- a/transmit/transmit_test.go +++ b/transmit/transmit_test.go @@ -1,5 +1,3 @@ -// +build all race - package transmit import ( From dc5cee176ebec87d18c5fee3b5558ff064031867 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 7 Sep 2022 11:22:23 -0400 Subject: [PATCH 223/351] Update Husky to latest (#505) ## Which problem is this PR solving? - Updates Husky to our 0.15.0 - Closes #493 ## Short description of the changes - Update husky - Replace otel package imports with husky versions, eg[go.opentelemetry.io/otel](http://go.opentelemetry.io/otel) => [github.com/honeycombio/husky/otel](http://github.com/honeycombio/husky/otel) - Replace any usage of InstrumentationLibrary* with Scope equivalents, eg InstrumentationLibrary => InstrumentationScope - Fix test that verified that OTLP/HTTP didn't work to one that shows it does (at a basic level) --- go.mod | 3 ++- go.sum | 7 ++++-- route/otlp_trace.go | 2 +- route/otlp_trace_test.go | 49 +++++++++++++++++++++++++--------------- route/route.go | 2 +- 5 files changed, 40 insertions(+), 23 deletions(-) diff --git a/go.mod b/go.mod index 82d01e15f5..312221d127 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.10.6 + github.com/honeycombio/husky v0.15.0 github.com/honeycombio/libhoney-go v1.16.0 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 @@ -45,6 +45,7 @@ require ( github.com/go-playground/universal-translator v0.17.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect diff --git a/go.sum b/go.sum index ef04f2a52b..89b1b2abd7 100644 --- a/go.sum +++ b/go.sum @@ -115,6 +115,7 @@ github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+Pugk github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -180,6 +181,8 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -188,8 +191,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.10.6 h1:jU/lXqo7Qz6e9eUJErIH3Lst2gjKWSJ4oAXYjFSXkn0= -github.com/honeycombio/husky v0.10.6/go.mod h1:i69+3xApIcsQn9PPeFRndTOOw6edNG66TxTTd+L6oq0= +github.com/honeycombio/husky v0.15.0 h1:RuYmKC1jHSB6y3C7pSXf889w8AaSzZ1JkIkLaiXlC5o= +github.com/honeycombio/husky v0.15.0/go.mod h1:ZO2AzJnnJkUoXbIQ5qLCvQuWdQOG/BZEup8pf2y5ua0= github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/honeycombio/libhoney-go v1.16.0/go.mod h1:izP4fbREuZ3vqC4HlCAmPrcPT9gxyxejRjGtCYpmBn0= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 6b75b29acd..f4e86c4d9f 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -8,7 +8,7 @@ import ( huskyotlp "github.com/honeycombio/husky/otlp" "github.com/honeycombio/refinery/types" - collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" + collectortrace "github.com/honeycombio/husky/proto/otlp/collector/trace/v1" ) func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { diff --git a/route/otlp_trace_test.go b/route/otlp_trace_test.go index 17c1ee76ee..1cfb6d24f9 100644 --- a/route/otlp_trace_test.go +++ b/route/otlp_trace_test.go @@ -12,17 +12,18 @@ import ( "time" huskyotlp "github.com/honeycombio/husky/otlp" + collectortrace "github.com/honeycombio/husky/proto/otlp/collector/trace/v1" + common "github.com/honeycombio/husky/proto/otlp/common/v1" + resource "github.com/honeycombio/husky/proto/otlp/resource/v1" + trace "github.com/honeycombio/husky/proto/otlp/trace/v1" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/logger" "github.com/honeycombio/refinery/metrics" "github.com/honeycombio/refinery/transmit" "github.com/klauspost/compress/zstd" "github.com/stretchr/testify/assert" - collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" - common "go.opentelemetry.io/proto/otlp/common/v1" - resource "go.opentelemetry.io/proto/otlp/resource/v1" - trace "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" ) @@ -67,7 +68,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("span with status", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -83,7 +84,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("span without status", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithoutStatus(), }}, }}, @@ -105,7 +106,7 @@ func TestOTLPHandler(t *testing.T) { spanID := []byte{1, 0, 0, 0, 0} req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: []*trace.Span{{ TraceId: traceID, SpanId: spanID, @@ -153,7 +154,7 @@ func TestOTLPHandler(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: []*trace.Span{{ Name: "span_with_link", TraceId: traceID, @@ -191,7 +192,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("can receive OTLP over HTTP/protobuf", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -218,7 +219,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("can receive OTLP over HTTP/protobuf with gzip encoding", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -254,7 +255,7 @@ func TestOTLPHandler(t *testing.T) { t.Run("can receive OTLP over HTTP/protobuf with zstd encoding", func(t *testing.T) { req := &collectortrace.ExportTraceServiceRequest{ ResourceSpans: []*trace.ResourceSpans{{ - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: helperOTLPRequestSpansWithStatus(), }}, }}, @@ -290,8 +291,20 @@ func TestOTLPHandler(t *testing.T) { mockTransmission.Flush() }) - t.Run("rejects OTLP over HTTP/JSON ", func(t *testing.T) { - request, _ := http.NewRequest("POST", "/v1/traces", strings.NewReader("{}")) + t.Run("accepts OTLP over HTTP/JSON ", func(t *testing.T) { + req := &collectortrace.ExportTraceServiceRequest{ + ResourceSpans: []*trace.ResourceSpans{{ + ScopeSpans: []*trace.ScopeSpans{{ + Spans: helperOTLPRequestSpansWithStatus(), + }}, + }}, + } + body, err := protojson.Marshal(req) + if err != nil { + t.Error(err) + } + + request, _ := http.NewRequest("POST", "/v1/traces", bytes.NewReader(body)) request.Header = http.Header{} request.Header.Set("content-type", "application/json") request.Header.Set("x-honeycomb-team", legacyAPIKey) @@ -299,10 +312,10 @@ func TestOTLPHandler(t *testing.T) { w := httptest.NewRecorder() router.postOTLP(w, request) - assert.Equal(t, w.Code, http.StatusNotImplemented) - assert.Equal(t, `{"source":"refinery","error":"invalid content-type - only 'application/protobuf' is supported"}`, string(w.Body.String())) + assert.Equal(t, w.Code, http.StatusOK) + assert.Equal(t, "", w.Body.String()) - assert.Equal(t, 0, len(mockTransmission.Events)) + assert.Equal(t, 2, len(mockTransmission.Events)) mockTransmission.Flush() }) @@ -317,7 +330,7 @@ func TestOTLPHandler(t *testing.T) { {Key: "service.name", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "my-service"}}}, }, }, - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: []*trace.Span{{ Name: "my-span", }}, @@ -350,7 +363,7 @@ func TestOTLPHandler(t *testing.T) { {Key: "service.name", Value: &common.AnyValue{Value: &common.AnyValue_StringValue{StringValue: "my-service"}}}, }, }, - InstrumentationLibrarySpans: []*trace.InstrumentationLibrarySpans{{ + ScopeSpans: []*trace.ScopeSpans{{ Spans: []*trace.Span{{ Name: "my-span", }}, diff --git a/route/route.go b/route/route.go index 18af0b03a8..8fbac4ffaf 100644 --- a/route/route.go +++ b/route/route.go @@ -39,7 +39,7 @@ import ( "github.com/honeycombio/refinery/transmit" "github.com/honeycombio/refinery/types" - collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" + collectortrace "github.com/honeycombio/husky/proto/otlp/collector/trace/v1" ) const ( From f677c20097a2824faf5d6a9231da4a21b586cde9 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 7 Sep 2022 15:56:10 -0400 Subject: [PATCH 224/351] Go mod tidy (#507) ## Which problem is this PR solving? - Fixes failure to clean up go mod in last PR - Closes #496 because we no longer depend on that ## Short description of the changes - go mod tidy --- go.mod | 2 -- go.sum | 19 ------------------- 2 files changed, 21 deletions(-) diff --git a/go.mod b/go.mod index 312221d127..de45166d51 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,6 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tidwall/gjson v1.14.3 github.com/vmihailenco/msgpack/v4 v4.3.11 - go.opentelemetry.io/proto/otlp v0.11.0 golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect google.golang.org/grpc v1.49.0 google.golang.org/protobuf v1.28.1 @@ -44,7 +43,6 @@ require ( github.com/go-playground/locales v0.13.0 // indirect github.com/go-playground/universal-translator v0.17.0 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect diff --git a/go.sum b/go.sum index 89b1b2abd7..13ceeb8ee1 100644 --- a/go.sum +++ b/go.sum @@ -45,7 +45,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -61,10 +60,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -73,7 +68,6 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= @@ -96,7 +90,6 @@ github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+ne github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -179,8 +172,6 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -274,7 +265,6 @@ github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -330,9 +320,6 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -593,7 +580,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -622,12 +608,9 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -642,7 +625,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -659,7 +641,6 @@ gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From 363fd56aa091033ea3579b6d322b2812fdba3c4a Mon Sep 17 00:00:00 2001 From: "Derrick J. Wippler" Date: Thu, 8 Sep 2022 06:24:41 -0500 Subject: [PATCH 225/351] Added PeerManagement.Timeout config option (#491) Co-authored-by: Shawn Poulson --- .gitignore | 3 +++ app/app_test.go | 5 ++++- cmd/refinery/main.go | 17 ++++++++------- config/config.go | 2 ++ config/file_config.go | 9 ++++++++ config/mock.go | 8 +++++++ config_complete.toml | 4 ++++ internal/peer/peers.go | 8 +++---- internal/peer/peers_test.go | 20 ++++++++++++------ internal/peer/redis.go | 39 +++++++++++++++++++++++++++-------- route/route.go | 1 + sharder/deterministic_test.go | 5 +++-- 12 files changed, 92 insertions(+), 29 deletions(-) diff --git a/.gitignore b/.gitignore index 91efd2d6de..5ad34c0d18 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,6 @@ test_redimem !/cmd/test_redimem dockerize* + +# IDE configs +.idea/ diff --git a/app/app_test.go b/app/app_test.go index b2ff8b2379..f0a38bbb23 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -1,3 +1,6 @@ +//go:build all || race +// +build all race + package app import ( @@ -122,7 +125,7 @@ func newStartedApp( var err error if peers == nil { - peers, err = peer.NewPeers(c) + peers, err = peer.NewPeers(context.Background(), c) assert.NoError(t, err) } diff --git a/cmd/refinery/main.go b/cmd/refinery/main.go index 532929f970..b2f6da9a2f 100644 --- a/cmd/refinery/main.go +++ b/cmd/refinery/main.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "net" "net/http" @@ -85,13 +86,6 @@ func main() { os.Exit(1) } - peers, err := peer.NewPeers(c) - - if err != nil { - fmt.Printf("unable to load peers: %+v\n", err) - os.Exit(1) - } - // get desired implementation for each dependency to inject lgr := logger.GetLoggerImplementation(c) collector := collect.GetCollectorImplementation(c) @@ -110,6 +104,15 @@ func main() { os.Exit(1) } + ctx, cancel := context.WithTimeout(context.Background(), c.GetPeerTimeout()) + defer cancel() + peers, err := peer.NewPeers(ctx, c) + + if err != nil { + fmt.Printf("unable to load peers: %+v\n", err) + os.Exit(1) + } + // upstreamTransport is the http transport used to send things on to Honeycomb upstreamTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, diff --git a/config/config.go b/config/config.go index 60f8989e79..29e5c0e1e8 100644 --- a/config/config.go +++ b/config/config.go @@ -159,4 +159,6 @@ type Config interface { GetGRPCTime() time.Duration GetGRPCTimeout() time.Duration + + GetPeerTimeout() time.Duration } diff --git a/config/file_config.go b/config/file_config.go index 8853830327..74f210cc05 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -94,6 +94,7 @@ type PeerManagementConfig struct { IdentifierInterfaceName string UseIPV6Identifier bool RedisIdentifier string + Timeout time.Duration } // GRPCServerParameters allow you to configure the GRPC ServerParameters used @@ -127,6 +128,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerManagement.UseTLS", false) c.SetDefault("PeerManagement.UseTLSInsecure", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) + c.SetDefault("PeerManagement.Timeout", 5*time.Second) c.SetDefault("HoneycombAPI", "https://api.honeycomb.io") c.SetDefault("Logger", "logrus") c.SetDefault("LoggingLevel", "debug") @@ -852,3 +854,10 @@ func (f *fileConfig) GetGRPCTimeout() time.Duration { return f.conf.GRPCServerParameters.Timeout } + +func (f *fileConfig) GetPeerTimeout() time.Duration { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.PeerManagement.Timeout +} diff --git a/config/mock.go b/config/mock.go index aa90acca43..e17c5daa1e 100644 --- a/config/mock.go +++ b/config/mock.go @@ -80,6 +80,7 @@ type MockConfig struct { GRPCMaxConnectionAgeGrace time.Duration GRPCTime time.Duration GRPCTimeout time.Duration + PeerTimeout time.Duration Mux sync.RWMutex } @@ -427,3 +428,10 @@ func (f *MockConfig) GetGRPCTimeout() time.Duration { return f.GRPCTimeout } + +func (f *MockConfig) GetPeerTimeout() time.Duration { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.PeerTimeout +} diff --git a/config_complete.toml b/config_complete.toml index 2fd4c02174..fe9e240756 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -212,6 +212,10 @@ Metrics = "honeycomb" # Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. # RedisIdentifier = "192.168.1.1" +# Timeout is optional. By default, when using RedisHost, Refinery will timeout +# after 5s when communicating with Redis. +# Timeout = "5s" + ######################### ## In-Memory Collector ## ######################### diff --git a/internal/peer/peers.go b/internal/peer/peers.go index ff5a2615c2..c17000ef1d 100644 --- a/internal/peer/peers.go +++ b/internal/peer/peers.go @@ -1,8 +1,8 @@ package peer import ( + "context" "errors" - "github.com/honeycombio/refinery/config" ) @@ -13,7 +13,7 @@ type Peers interface { RegisterUpdatedPeersCallback(callback func()) } -func NewPeers(c config.Config) (Peers, error) { +func NewPeers(ctx context.Context, c config.Config) (Peers, error) { t, err := c.GetPeerManagementType() if err != nil { @@ -24,8 +24,8 @@ func NewPeers(c config.Config) (Peers, error) { case "file": return newFilePeers(c), nil case "redis": - return newRedisPeers(c) + return newRedisPeers(ctx, c) default: - return nil, errors.New("Invalid PeerManagement Type") + return nil, errors.New("invalid config option 'PeerManagement.Type'") } } diff --git a/internal/peer/peers_test.go b/internal/peer/peers_test.go index b411477981..5e6917e4ab 100644 --- a/internal/peer/peers_test.go +++ b/internal/peer/peers_test.go @@ -1,20 +1,27 @@ +//go:build all || race +// +build all race + package peer import ( + "context" "testing" + "time" "github.com/honeycombio/refinery/config" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewPeers(t *testing.T) { c := &config.MockConfig{ PeerManagementType: "file", + PeerTimeout: 5 * time.Second, } - p, err := NewPeers(c) - - assert.Equal(t, nil, err) + p, err := NewPeers(context.Background(), c) + assert.NoError(t, err) + require.NotNil(t, p) switch i := p.(type) { case *filePeers: @@ -25,11 +32,12 @@ func TestNewPeers(t *testing.T) { c = &config.MockConfig{ GetPeerListenAddrVal: "0.0.0.0:8081", PeerManagementType: "redis", + PeerTimeout: 5 * time.Second, } - p, err = NewPeers(c) - - assert.Equal(t, nil, err) + p, err = NewPeers(context.Background(), c) + assert.NoError(t, err) + require.NotNil(t, p) switch i := p.(type) { case *redisPeers: diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 4507e17843..041433ac72 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -45,7 +45,7 @@ type redisPeers struct { } // NewRedisPeers returns a peers collection backed by redis -func newRedisPeers(c config.Config) (Peers, error) { +func newRedisPeers(ctx context.Context, c config.Config) (Peers, error) { redisHost, _ := c.GetRedisHost() if redisHost == "" { @@ -101,9 +101,9 @@ func newRedisPeers(c config.Config) (Peers, error) { } // register myself once - err = peers.store.Register(context.TODO(), address, peerEntryTimeout) + err = peers.store.Register(ctx, address, peerEntryTimeout) if err != nil { - logrus.WithError(err).Errorf("failed to register self with peer store") + logrus.WithError(err).Errorf("failed to register self with redis peer store") return nil, err } @@ -139,15 +139,26 @@ func (p *redisPeers) RegisterUpdatedPeersCallback(cb func()) { func (p *redisPeers) registerSelf() { tk := time.NewTicker(refreshCacheInterval) for range tk.C { - // every 5 seconds, insert a 30sec timeout record - p.store.Register(context.TODO(), p.publicAddr, peerEntryTimeout) + ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout()) + // every 5 seconds, insert a 30sec timeout record. we ignore the error + // here since Register() logs the error for us. + p.store.Register(ctx, p.publicAddr, peerEntryTimeout) + cancel() } } func (p *redisPeers) updatePeerListOnce() { - currentPeers, err := p.store.GetMembers(context.TODO()) + ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout()) + defer cancel() + + currentPeers, err := p.store.GetMembers(ctx) if err != nil { - // TODO maybe do something better here? + logrus.WithError(err). + WithFields(logrus.Fields{ + "name": p.publicAddr, + "timeout": p.c.GetPeerTimeout().String(), + }). + Error("get members failed") return } sort.Strings(currentPeers) @@ -163,11 +174,21 @@ func (p *redisPeers) watchPeers() { tk := time.NewTicker(refreshCacheInterval) for range tk.C { - currentPeers, err := p.store.GetMembers(context.TODO()) + ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout()) + currentPeers, err := p.store.GetMembers(ctx) + cancel() + if err != nil { - // TODO maybe do something better here? + logrus.WithError(err). + WithFields(logrus.Fields{ + "name": p.publicAddr, + "timeout": p.c.GetPeerTimeout().String(), + "oldPeers": oldPeerList, + }). + Error("get members failed during watch") continue } + sort.Strings(currentPeers) if !equal(oldPeerList, currentPeers) { // update peer list and trigger callbacks saying the peer list has changed diff --git a/route/route.go b/route/route.go index 8fbac4ffaf..8727ac637b 100644 --- a/route/route.go +++ b/route/route.go @@ -248,6 +248,7 @@ func (r *Router) LnS(incomingOrPeer string) { func (r *Router) Stop() error { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() + err := r.server.Shutdown(ctx) if err != nil { return err diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go index ab99d1133f..8d32287697 100644 --- a/sharder/deterministic_test.go +++ b/sharder/deterministic_test.go @@ -1,6 +1,7 @@ package sharder import ( + "context" "testing" "github.com/honeycombio/refinery/config" @@ -25,7 +26,7 @@ func TestWhichShard(t *testing.T) { GetPeersVal: peers, PeerManagementType: "file", } - filePeers, err := peer.NewPeers(config) + filePeers, err := peer.NewPeers(context.Background(), config) assert.Equal(t, nil, err) sharder := DeterministicSharder{ Config: config, @@ -66,7 +67,7 @@ func TestWhichShardAtEdge(t *testing.T) { GetPeersVal: peers, PeerManagementType: "file", } - filePeers, err := peer.NewPeers(config) + filePeers, err := peer.NewPeers(context.Background(), config) assert.Equal(t, nil, err) sharder := DeterministicSharder{ Config: config, From 1e1a175631751340d77403d010d8cf233baee9f8 Mon Sep 17 00:00:00 2001 From: epvanhouten Date: Fri, 9 Sep 2022 07:53:23 -0500 Subject: [PATCH 226/351] Add 'meta.refinery.original_sample_rate' (#508) ## Which problem is this PR solving? Refinery re-writing the sample rate was causing some concern. Writing down the sample rate sent to refinery prior to updating the sample rate should allow for more/better debug. This is implementing the suggestion of @robbkidd in #490 to add annotations to the spans of their original sample rate. This will allow for better understanding of what is happening when there are multiple tiers of sampling happening in the telemetry processing. ## Short description of the changes At the places where the Span SampleRate was combined with the Trace SampleRate, the original span SampleRate is copied down into a meta.refinery annotation. --- collect/collect.go | 35 ++++++++---- collect/collect_test.go | 122 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+), 10 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index fbdece7540..59619c3e7a 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -395,16 +395,35 @@ func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, sp *types } if keep { i.Logger.Debug().WithField("trace_id", sp.TraceID).Logf("Sending span because of previous decision to send trace") - if sp.SampleRate < 1 { - sp.SampleRate = 1 - } - sp.SampleRate *= sampleRate + mergeTraceAndSpanSampleRates(sp, sampleRate) i.Transmission.EnqueueSpan(sp) return } i.Logger.Debug().WithField("trace_id", sp.TraceID).Logf("Dropping span because of previous decision to drop trace") } +func mergeTraceAndSpanSampleRates(sp *types.Span, traceSampleRate uint) { + if traceSampleRate != 1 { + // When the sample rate from the trace is not 1 that means we are + // going to mangle the span sample rate. Write down the original sample + // rate so that that information is more easily recovered + sp.Data["meta.refinery.original_sample_rate"] = sp.SampleRate + } + + if sp.SampleRate < 1 { + // See https://docs.honeycomb.io/manage-data-volume/sampling/ + // SampleRate is the denominator of the ratio of sampled spans + // HoneyComb treats a missing or 0 SampleRate the same as 1, but + // behaves better/more consistently if the SampleRate is explicitly + // set instead of inferred + sp.SampleRate = 1 + } + + // if spans are already sampled, take that in to account when computing + // the final rate + sp.SampleRate *= traceSampleRate +} + func isRootSpan(sp *types.Span) bool { parentID := sp.Data["trace.parent_id"] if parentID == nil { @@ -489,9 +508,7 @@ func (i *InMemCollector) send(trace *types.Trace) { if i.Config.GetAddRuleReasonToTrace() { sp.Data["meta.refinery.reason"] = reason } - if sp.SampleRate < 1 { - sp.SampleRate = 1 - } + if i.Config.GetIsDryRun() { field := i.Config.GetDryRunFieldName() sp.Data[field] = shouldSend @@ -499,9 +516,7 @@ func (i *InMemCollector) send(trace *types.Trace) { if i.hostname != "" { sp.Data["meta.refinery.local_hostname"] = i.hostname } - // if spans are already sampled, take that in to account when computing - // the final rate - sp.SampleRate *= trace.SampleRate + mergeTraceAndSpanSampleRates(sp, trace.SampleRate) i.Transmission.EnqueueSpan(sp) } } diff --git a/collect/collect_test.go b/collect/collect_test.go index 25fdc3d851..4eea2bee08 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -1,6 +1,7 @@ package collect import ( + "fmt" "runtime" "strconv" "testing" @@ -98,6 +99,127 @@ func TestAddRootSpan(t *testing.T) { transmission.Mux.RUnlock() } +// #490, SampleRate getting stomped could cause confusion if sampling was +// happening upstream of refinery. Writing down what got sent to refinery +// will help people figure out what is going on. +func TestOriginalSampleRateIsNotedInMetaField(t *testing.T) { + transmission := &transmit.MockTransmission{} + transmission.Start() + conf := &config.MockConfig{ + GetSendDelayVal: 0, + GetTraceTimeoutVal: 60 * time.Second, + GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 2}, + SendTickerVal: 2 * time.Millisecond, + } + coll := &InMemCollector{ + Config: conf, + Logger: &logger.NullLogger{}, + Transmission: transmission, + Metrics: &metrics.NullMetrics{}, + SamplerFactory: &sample.SamplerFactory{ + Config: conf, + Logger: &logger.NullLogger{}, + }, + } + + c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) + coll.cache = c + stc, err := lru.New(15) + assert.NoError(t, err, "lru cache should start") + coll.sentTraceCache = stc + + coll.incoming = make(chan *types.Span, 5) + coll.fromPeer = make(chan *types.Span, 5) + coll.datasetSamplers = make(map[string]sample.Sampler) + go coll.collect() + defer coll.Stop() + + // Spin until a sample gets triggered + sendAttemptCount := 0 + for getEventsLength(transmission) < 1 || sendAttemptCount > 10 { + sendAttemptCount++ + span := &types.Span{ + TraceID: fmt.Sprintf("trace-%v", sendAttemptCount), + Event: types.Event{ + Dataset: "aoeu", + APIKey: legacyAPIKey, + SampleRate: 50, + Data: make(map[string]interface{}), + }, + } + coll.AddSpan(span) + time.Sleep(conf.SendTickerVal * 2) + } + + transmission.Mux.RLock() + assert.Greater(t, len(transmission.Events), 0, "should be some events transmitted") + assert.Equal(t, uint(50), transmission.Events[0].Data["meta.refinery.original_sample_rate"], "metadata should be populated with original sample rate") + transmission.Mux.RUnlock() +} + +// HoneyComb treats a missing or 0 SampleRate the same as 1, but +// behaves better/more consistently if the SampleRate is explicitly +// set instead of inferred +func TestTransmittedSpansShouldHaveASampleRateOfAtLeastOne(t *testing.T) { + transmission := &transmit.MockTransmission{} + transmission.Start() + conf := &config.MockConfig{ + GetSendDelayVal: 0, + GetTraceTimeoutVal: 60 * time.Second, + GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1}, + SendTickerVal: 2 * time.Millisecond, + } + coll := &InMemCollector{ + Config: conf, + Logger: &logger.NullLogger{}, + Transmission: transmission, + Metrics: &metrics.NullMetrics{}, + SamplerFactory: &sample.SamplerFactory{ + Config: conf, + Logger: &logger.NullLogger{}, + }, + } + + c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) + coll.cache = c + stc, err := lru.New(15) + assert.NoError(t, err, "lru cache should start") + coll.sentTraceCache = stc + + coll.incoming = make(chan *types.Span, 5) + coll.fromPeer = make(chan *types.Span, 5) + coll.datasetSamplers = make(map[string]sample.Sampler) + go coll.collect() + defer coll.Stop() + + span := &types.Span{ + TraceID: fmt.Sprintf("trace-%v", 1), + Event: types.Event{ + Dataset: "aoeu", + APIKey: legacyAPIKey, + SampleRate: 0, // This should get lifted to 1 + Data: make(map[string]interface{}), + }, + } + + coll.AddSpan(span) + + time.Sleep(conf.SendTickerVal * 2) + + transmission.Mux.RLock() + assert.Equal(t, 1, len(transmission.Events), "should be some events transmitted") + assert.Equal(t, uint(1), transmission.Events[0].SampleRate, + "SampleRate should be reset to one after starting at zero") + transmission.Mux.RUnlock() +} + +func getEventsLength(transmission *transmit.MockTransmission) int { + transmission.Mux.RLock() + defer transmission.Mux.RUnlock() + + return len(transmission.Events) +} + // TestAddSpan tests that adding a span winds up with a trace object in the // cache func TestAddSpan(t *testing.T) { From fcc3e0d364d22aef7ebe6c2ec9bfed0516de36b1 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Mon, 12 Sep 2022 08:49:30 -0400 Subject: [PATCH 227/351] Prepare v1.16.0 release (#510) Prep for release 1.16.0. Includes update of OSS metadata badge to active. --- CHANGELOG.md | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++ OSSMETADATA | 2 +- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d27e57b9f..3b6e3e5cea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,71 @@ # Refinery Changelog +## 1.16.0 2022-09-09 + +This release contains a number of small new features to assist in running refinery more effectively: +- Adds new endpoints to help in debugging refinery rules (see README.md) +- Fixes issues with SampleRate +- Adds some new configuration parameters (see the *_complete.toml files for more) +- Conforms to the GRPC standard for health probes +- Accepts OTLP/JSON traces and conforms to the most recent OTLP trace specification + +### Enhancements + +- Add /query endpoints to help debug refinery rules (#500, #502) | [kentquirk](https://github.com/kentquirk) +- Implement grpc-health-probe (#498) | [abatilo](https://github.com/abatilo) +- Make gRPC ServerParameters configurable (#499) | [abatilo](https://github.com/abatilo) +- Fix sample rate for late spans (#504) | [kentquirk](https://github.com/kentquirk) +- Optionally record why a sample decision was made (#503) | [kentquirk](https://github.com/kentquirk) +- Added PeerManagement.Timeout config option (#491) | [thrawn01](https://github.com/thrawn01) +- Add 'meta.refinery.original_sample_rate' (#508) | [epvanhouten](https://github.com/epvanhouten) + +### Maintenance + +- maint: improvements to GitHub operation (#474, #477, #478) | [JamieDanielson](https://github.com/JamieDanielson), [vreynolds](https://github.com/vreynolds) + +### Dependencies + +- Bump github.com/stretchr/testify from 1.7.2 to 1.8.0 (#472) | [dependabot](https://github.com/dependabot) +- Bump github.com/sirupsen/logrus from 1.8.1 to 1.9.0 (#484) | [dependabot](https://github.com/dependabot) +- Bump google.golang.org/grpc from 1.46.2 to 1.49.0 (#485, 494) | [dependabot](https://github.com/dependabot) +- Bump github.com/honeycombio/libhoney-go from 1.15.8 to 1.16.0 (#487) | [dependabot](https://github.com/dependabot) +- Bump github.com/gomodule/redigo from 1.8.8 to 1.8.9 (#488) | [dependabot](https://github.com/dependabot) +- Bump github.com/klauspost/compress from 1.15.7 to 1.15.9 (#495) | [dependabot](https://github.com/dependabot) +- Bump github.com/tidwall/gjson from 1.14.1 to 1.14.3 (#497) | [dependabot](https://github.com/dependabot) +- Update github.com/honeycombio/husky to latest and fix breaking changes (#505) | [kentquirk](https://github.com/kentquirk) +- Go mod tidy (#507) | [kentquirk](https://github.com/kentquirk) + +## New Contributors +- @abatilo made their first contribution in https://github.com/honeycombio/refinery/pull/498 +- @thrawn01 made their first contribution in https://github.com/honeycombio/refinery/pull/491 +- @epvanhouten made their first contribution in https://github.com/honeycombio/refinery/pull/508 + +**Full Changelog**: https://github.com/honeycombio/refinery/compare/v1.15.0...v1.16.0 + +## 1.15.0 2022-07-01 + +### Enhancements + +- Add rule Scope configuration option to rules-based sampler (#440) | [isnotajoke](https://github.com/isnotajoke) +- Replace hand-rolled binary.BigEndian.Uint32 with the real deal (#459) | [toshok](https://github.com/toshok) +- Validate successful span scoped rules test (#465) | [MikeGoldsmith](https://github.com/MikeGoldsmith) +- Create helm-chart issue on release (#458) | [MikeGoldsmith](https://github.com/MikeGoldsmith) +- github_token needs underscore not hyphen (#464) | [@JamieDanielson](https://github.com/JamieDanielson) + +### Maintenance + +- Replace legacy with classic in readme (#457) | [MikeGoldsmith](https://github.com/MikeGoldsmith) + +### Dependencies + +- Bump github.com/spf13/viper from 1.10.1 to 1.12.0 (#461) +- Bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#467) +- Bump github.com/honeycombio/husky from 0.10.5 to 0.10.6 (#460) +- Bump github.com/klauspost/compress from 1.15.4 to 1.15.6 (#466) +- Bump github.com/prometheus/client_golang from 1.12.1 to 1.12.2 (#463) + + + ## 1.15.0 2022-07-01 ### Enhancements diff --git a/OSSMETADATA b/OSSMETADATA index 58d43b81d1..b96d4a4dfa 100755 --- a/OSSMETADATA +++ b/OSSMETADATA @@ -1 +1 @@ -osslifecycle=maintained +osslifecycle=active From 652a78637d30d34237abd614df151dbf6718dd21 Mon Sep 17 00:00:00 2001 From: Mike Goldsmith Date: Wed, 14 Sep 2022 11:12:11 +0100 Subject: [PATCH 228/351] Consolidate honeycomb metrics to use single lock & fix concurrent read/write (#511) Co-authored-by: Vera Reynolds Co-authored-by: Jamie Danielson --- metrics/honeycomb.go | 129 +++++++++++++++++++++++-------------------- 1 file changed, 69 insertions(+), 60 deletions(-) diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go index c89d731168..32f0a462d9 100644 --- a/metrics/honeycomb.go +++ b/metrics/honeycomb.go @@ -23,12 +23,10 @@ type HoneycombMetrics struct { UpstreamTransport *http.Transport `inject:"upstreamTransport"` Version string `inject:"version"` - countersLock sync.Mutex - counters map[string]*counter - gaugesLock sync.Mutex - gauges map[string]*gauge - histogramsLock sync.Mutex - histograms map[string]*histogram + lock sync.RWMutex + counters map[string]*counter + gauges map[string]*gauge + histograms map[string]*histogram libhClient *libhoney.Client @@ -228,16 +226,15 @@ func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) { "api_host": ev.APIHost, "dataset": ev.Dataset, } - h.countersLock.Lock() + + h.lock.RLock() for _, count := range h.counters { count.lock.Lock() ev.AddField(PrefixMetricName(h.prefix, count.name), count.val) count.val = 0 count.lock.Unlock() } - h.countersLock.Unlock() - h.gaugesLock.Lock() for _, gauge := range h.gauges { gauge.lock.Lock() ev.AddField(PrefixMetricName(h.prefix, gauge.name), gauge.val) @@ -245,9 +242,7 @@ func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) { // gauge.val = 0 gauge.lock.Unlock() } - h.gaugesLock.Unlock() - h.histogramsLock.Lock() for _, histogram := range h.histograms { histogram.lock.Lock() if len(histogram.vals) != 0 { @@ -265,7 +260,7 @@ func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) { } histogram.lock.Unlock() } - h.histogramsLock.Unlock() + h.lock.RUnlock() ev.Send() } @@ -283,51 +278,69 @@ func average(vals []float64) float64 { func (h *HoneycombMetrics) Register(name string, metricType string) { switch metricType { case "counter": - h.countersLock.Lock() - defer h.countersLock.Unlock() - // inside the lock, let's not race to create the counter - _, ok := h.counters[name] - if !ok { - newCounter := &counter{ - name: name, - } - h.counters[name] = newCounter - } + getOrAdd(&h.lock, name, h.counters, createCounter) case "gauge": - h.gaugesLock.Lock() - defer h.gaugesLock.Unlock() - _, ok := h.gauges[name] - if !ok { - newGauge := &gauge{ - name: name, - } - h.gauges[name] = newGauge - } + getOrAdd(&h.lock, name, h.gauges, createGauge) case "histogram": - h.histogramsLock.Lock() - defer h.histogramsLock.Unlock() - _, ok := h.histograms[name] - if !ok { - newGauge := &histogram{ - name: name, - vals: make([]float64, 0), - } - h.histograms[name] = newGauge - } + getOrAdd(&h.lock, name, h.histograms, createHistogram) default: h.Logger.Debug().Logf("unspported metric type %s", metricType) } } -func (h *HoneycombMetrics) Count(name string, n interface{}) { - count, ok := h.counters[name] +// getOrAdd attempts to retrieve a (generic) metric from the provided map by name, wrapping the read operation +// with a read lock (RLock). If the metric is not present in the map, it acquires a write lock and executes +// a create function to add it to the map. +func getOrAdd[T *counter | *gauge | *histogram](lock *sync.RWMutex, name string, metrics map[string]T, createMetric func(name string) T) T { + // attempt to get metric by name using read lock + lock.RLock() + metric, ok := metrics[name] + lock.RUnlock() + + // if found, return existing metric + if ok { + return metric + } + + // acquire write lock + lock.Lock() + // check again to see if it's been added while waiting for write lock + metric, ok = metrics[name] if !ok { - h.Register(name, "counter") - count = h.counters[name] + // create new metric using create function and add to map + metric := createMetric(name) + metrics[name] = metric } - count.lock.Lock() - defer count.lock.Unlock() - count.val = count.val + int(ConvertNumeric(n)) + lock.Unlock() + return metric +} + +func createCounter(name string) *counter { + return &counter{ + name: name, + } +} + +func createGauge(name string) *gauge { + return &gauge{ + name: name, + } +} + +func createHistogram(name string) *histogram { + return &histogram{ + name: name, + vals: make([]float64, 0), + } +} + +func (h *HoneycombMetrics) Count(name string, n interface{}) { + counter := getOrAdd(&h.lock, name, h.counters, createCounter) + + // update value, using counter's lock + counter.lock.Lock() + counter.val = counter.val + int(ConvertNumeric(n)) + counter.lock.Unlock() } func (h *HoneycombMetrics) Increment(name string) { @@ -335,23 +348,19 @@ func (h *HoneycombMetrics) Increment(name string) { } func (h *HoneycombMetrics) Gauge(name string, val interface{}) { - gauge, ok := h.gauges[name] - if !ok { - h.Register(name, "gauge") - gauge = h.gauges[name] - } + gauge := getOrAdd(&h.lock, name, h.gauges, createGauge) + + // update value, using gauge's lock gauge.lock.Lock() - defer gauge.lock.Unlock() gauge.val = ConvertNumeric(val) + gauge.lock.Unlock() } func (h *HoneycombMetrics) Histogram(name string, obs interface{}) { - histogram, ok := h.histograms[name] - if !ok { - h.Register(name, "histogram") - histogram = h.histograms[name] - } + histogram := getOrAdd(&h.lock, name, h.histograms, createHistogram) + + // update value, using histogram's lock histogram.lock.Lock() - defer histogram.lock.Unlock() histogram.vals = append(histogram.vals, ConvertNumeric(obs)) + histogram.lock.Unlock() } From 03fc13644495625d0375455a98e7250ccd8aac57 Mon Sep 17 00:00:00 2001 From: Levi Wilson Date: Wed, 14 Sep 2022 15:01:25 -0600 Subject: [PATCH 229/351] allow BatchTimeout to be overriden on the libhoney Transmission (#509) This PR allows for the BatchTimeout to be configured for refinery rather than explicitly using DefaultBatchTimeout from libhoney * allow BatchTimeout to be overridden on the libhoney Transmission * add an example of modifying BatchTimeout in config_complete.toml --- cmd/refinery/main.go | 4 ++-- config/config.go | 3 +++ config/file_config.go | 9 +++++++++ config/mock.go | 8 ++++++++ config_complete.toml | 5 +++++ 5 files changed, 27 insertions(+), 2 deletions(-) diff --git a/cmd/refinery/main.go b/cmd/refinery/main.go index b2f6da9a2f..0690d96509 100644 --- a/cmd/refinery/main.go +++ b/cmd/refinery/main.go @@ -138,7 +138,7 @@ func main() { upstreamClient, err := libhoney.NewClient(libhoney.ClientConfig{ Transmission: &transmission.Honeycomb{ MaxBatchSize: c.GetMaxBatchSize(), - BatchTimeout: libhoney.DefaultBatchTimeout, + BatchTimeout: c.GetBatchTimeout(), MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetUpstreamBufferSize()), UserAgentAddition: userAgentAddition, @@ -156,7 +156,7 @@ func main() { peerClient, err := libhoney.NewClient(libhoney.ClientConfig{ Transmission: &transmission.Honeycomb{ MaxBatchSize: c.GetMaxBatchSize(), - BatchTimeout: libhoney.DefaultBatchTimeout, + BatchTimeout: c.GetBatchTimeout(), MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetPeerBufferSize()), UserAgentAddition: userAgentAddition, diff --git a/config/config.go b/config/config.go index 29e5c0e1e8..3212c85d3b 100644 --- a/config/config.go +++ b/config/config.go @@ -71,6 +71,9 @@ type Config interface { // complete before sending it, to allow stragglers to arrive GetSendDelay() (time.Duration, error) + // GetBatchTimeout returns how often to send off batches in seconds + GetBatchTimeout() time.Duration + // GetTraceTimeout is how long to wait before sending a trace even if it's // not complete. This should be longer than the longest expected trace // duration. diff --git a/config/file_config.go b/config/file_config.go index 74f210cc05..5e30aae6fb 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -37,6 +37,7 @@ type configContents struct { Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"` Metrics string `validate:"required,oneof= prometheus honeycomb"` SendDelay time.Duration `validate:"required"` + BatchTimeout time.Duration TraceTimeout time.Duration `validate:"required"` MaxBatchSize uint `validate:"required"` SendTicker time.Duration `validate:"required"` @@ -135,6 +136,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("Collector", "InMemCollector") c.SetDefault("Metrics", "honeycomb") c.SetDefault("SendDelay", 2*time.Second) + c.SetDefault("BatchTimeout", libhoney.DefaultBatchTimeout) c.SetDefault("TraceTimeout", 60*time.Second) c.SetDefault("MaxBatchSize", 500) c.SetDefault("SendTicker", 100*time.Millisecond) @@ -710,6 +712,13 @@ func (f *fileConfig) GetSendDelay() (time.Duration, error) { return f.conf.SendDelay, nil } +func (f *fileConfig) GetBatchTimeout() time.Duration { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.BatchTimeout +} + func (f *fileConfig) GetTraceTimeout() (time.Duration, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index e17c5daa1e..83a4395a2d 100644 --- a/config/mock.go +++ b/config/mock.go @@ -57,6 +57,7 @@ type MockConfig struct { GetPrometheusMetricsConfigVal PrometheusMetricsConfig GetSendDelayErr error GetSendDelayVal time.Duration + GetBatchTimeoutVal time.Duration GetTraceTimeoutErr error GetTraceTimeoutVal time.Duration GetMaxBatchSizeVal uint @@ -258,6 +259,13 @@ func (m *MockConfig) GetSendDelay() (time.Duration, error) { return m.GetSendDelayVal, m.GetSendDelayErr } +func (m *MockConfig) GetBatchTimeout() time.Duration { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.GetBatchTimeoutVal +} + func (m *MockConfig) GetTraceTimeout() (time.Duration, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index fe9e240756..3fdfdd6be5 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -58,6 +58,11 @@ HoneycombAPI = "https://api.honeycomb.io" # Eligible for live reload. SendDelay = "2s" +# BatchTimeout dictates how frequently to send unfulfilled batches. By default +# this will use the DefaultBatchTimeout in libhoney as its value, which is 100ms. +# Eligible for live reload. +BatchTimeout = "1s" + # TraceTimeout is a long timer; it represents the outside boundary of how long # to wait before sending an incomplete trace. Normally traces are sent when the # root span arrives. Sometimes the root span never arrives (due to crashes or From da2fc59f2693e4379a12283b47574c7e4f1a4e22 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Thu, 15 Sep 2022 20:08:25 -0400 Subject: [PATCH 230/351] Allow adding extra fields to error logs (#514) ## Which problem is this PR solving? As noted in #513, errors that occur while Refinery is trying to send data to Honeycomb happen asynchronously. It's hard to relate any errors that show up in the logs with the spans that caused them to occur. However, there is a way provided by libhoney ([metadata](https://github.com/honeycombio/libhoney-go/blob/25068939fe8240ccc45e4c86271b6f057c5f833d/transmission/response.go#L11)) that makes it possible to attach data to the error. Before now, Refinery has attached the dataset, api host, and environment to the metadata. This helps but is not enough. ## Short description of the changes This PR attempts to leverage that technique further by adding a new configuration value called `AdditionalErrorFields`. It allows the user to specify a list of field names. In the event of a transmission error, these fields (if they exist) will be copied from the failing span into the metadata, and will therefore show up as identified fields in the logs. The default value is `trace.span_id`. This also removes the remaining instances of the obsolete standard library `ioutil`. Closes #513 --- app/app_test.go | 34 ++++++------ config/config.go | 2 + config/config_test.go | 73 ++++++++++++++++++++++--- config/config_test_reload_error_test.go | 9 ++- config/file_config.go | 9 +++ config/mock.go | 8 +++ config_complete.toml | 13 +++++ internal/peer/peers_test.go | 3 - logger/honeycomb.go | 2 +- metrics/honeycomb.go | 2 +- sample/sample_test.go | 7 +-- transmit/transmit.go | 14 ++++- 12 files changed, 136 insertions(+), 40 deletions(-) diff --git a/app/app_test.go b/app/app_test.go index f0a38bbb23..1eb44d035b 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -1,5 +1,4 @@ //go:build all || race -// +build all race package app @@ -9,7 +8,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/httptest" @@ -215,7 +213,7 @@ func post(t testing.TB, req *http.Request) { resp, err := httpClient.Do(req) assert.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } @@ -333,7 +331,7 @@ func TestPeerRouting(t *testing.T) { req.Header.Set("Content-Type", "application/json") blob := `[` + string(spans[0]) + `]` - req.Body = ioutil.NopCloser(strings.NewReader(blob)) + req.Body = io.NopCloser(strings.NewReader(blob)) post(t, req) assert.Eventually(t, func() bool { return len(senders[0].Events()) == 1 @@ -364,7 +362,7 @@ func TestPeerRouting(t *testing.T) { "long": "this is a test of the emergency broadcast system", "foo": "bar", }, - Metadata: map[string]string{ + Metadata: map[string]any{ "api_host": "http://api.honeycomb.io", "dataset": "dataset", "environment": "", @@ -383,7 +381,7 @@ func TestPeerRouting(t *testing.T) { req.Header.Set("X-Honeycomb-Team", legacyAPIKey) req.Header.Set("Content-Type", "application/json") - req.Body = ioutil.NopCloser(strings.NewReader(blob)) + req.Body = io.NopCloser(strings.NewReader(blob)) post(t, req) assert.Eventually(t, func() bool { return len(senders[1].Events()) == 1 @@ -488,7 +486,7 @@ func TestEventsEndpoint(t *testing.T) { "trace.trace_id": "1", "foo": "bar", }, - Metadata: map[string]string{ + Metadata: map[string]any{ "api_host": "http://api.honeycomb.io", "dataset": "dataset", "environment": "", @@ -535,7 +533,7 @@ func TestEventsEndpoint(t *testing.T) { "trace.trace_id": "1", "foo": "bar", }, - Metadata: map[string]string{ + Metadata: map[string]any{ "api_host": "http://api.honeycomb.io", "dataset": "dataset", "environment": "", @@ -602,7 +600,7 @@ func TestEventsEndpointWithNonLegacyKey(t *testing.T) { "trace.trace_id": "1", "foo": "bar", }, - Metadata: map[string]string{ + Metadata: map[string]any{ "api_host": "http://api.honeycomb.io", "dataset": "dataset", "environment": "test", @@ -649,7 +647,7 @@ func TestEventsEndpointWithNonLegacyKey(t *testing.T) { "trace.trace_id": "1", "foo": "bar", }, - Metadata: map[string]string{ + Metadata: map[string]any{ "api_host": "http://api.honeycomb.io", "dataset": "dataset", "environment": "test", @@ -716,7 +714,7 @@ func BenchmarkTraces(b *testing.B) { sender := &countingWriterSender{ WriterSender: transmission.WriterSender{ - W: ioutil.Discard, + W: io.Discard, }, } _, graph := newStartedApp(b, sender, 11000, nil, false) @@ -734,7 +732,7 @@ func BenchmarkTraces(b *testing.B) { sender.resetCount() for n := 0; n < b.N; n++ { blob := `[` + string(spans[n%len(spans)]) + `]` - req.Body = ioutil.NopCloser(strings.NewReader(blob)) + req.Body = io.NopCloser(strings.NewReader(blob)) post(b, req) } sender.waitForCount(b, b.N) @@ -752,7 +750,7 @@ func BenchmarkTraces(b *testing.B) { blob = append(blob, ',') } blob[len(blob)-1] = ']' - req.Body = ioutil.NopCloser(bytes.NewReader(blob)) + req.Body = io.NopCloser(bytes.NewReader(blob)) post(b, req) } @@ -776,13 +774,13 @@ func BenchmarkTraces(b *testing.B) { blob = append(blob, ',') } blob[len(blob)-1] = ']' - req.Body = ioutil.NopCloser(bytes.NewReader(blob)) + req.Body = io.NopCloser(bytes.NewReader(blob)) resp, err := httpClient.Do(req) assert.NoError(b, err) if resp != nil { assert.Equal(b, http.StatusOK, resp.StatusCode) - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } } @@ -799,7 +797,7 @@ func BenchmarkTraces(b *testing.B) { func BenchmarkDistributedTraces(b *testing.B) { sender := &countingWriterSender{ WriterSender: transmission.WriterSender{ - W: ioutil.Discard, + W: io.Discard, }, } @@ -837,7 +835,7 @@ func BenchmarkDistributedTraces(b *testing.B) { sender.resetCount() for n := 0; n < b.N; n++ { blob := `[` + string(spans[n%len(spans)]) + `]` - req.Body = ioutil.NopCloser(strings.NewReader(blob)) + req.Body = io.NopCloser(strings.NewReader(blob)) req.URL.Host = addrs[n%len(addrs)] post(b, req) } @@ -856,7 +854,7 @@ func BenchmarkDistributedTraces(b *testing.B) { blob = append(blob, ',') } blob[len(blob)-1] = ']' - req.Body = ioutil.NopCloser(bytes.NewReader(blob)) + req.Body = io.NopCloser(bytes.NewReader(blob)) req.URL.Host = addrs[n%len(addrs)] post(b, req) diff --git a/config/config.go b/config/config.go index 3212c85d3b..4bfe3924b3 100644 --- a/config/config.go +++ b/config/config.go @@ -164,4 +164,6 @@ type Config interface { GetGRPCTimeout() time.Duration GetPeerTimeout() time.Duration + + GetAdditionalErrorFields() []string } diff --git a/config/config_test.go b/config/config_test.go index 59c87c14f5..b187fa2019 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,7 +1,6 @@ package config import ( - "io/ioutil" "os" "sync" "testing" @@ -78,11 +77,12 @@ func TestRedisPasswordEnvVar(t *testing.T) { } } +// creates two temporary toml files from the strings passed in and returns their filenames func createTempConfigs(t *testing.T, configBody string, rulesBody string) (string, string) { - tmpDir, err := ioutil.TempDir("", "") + tmpDir, err := os.MkdirTemp("", "") assert.NoError(t, err) - configFile, err := ioutil.TempFile(tmpDir, "*.toml") + configFile, err := os.CreateTemp(tmpDir, "*.toml") assert.NoError(t, err) if configBody != "" { @@ -91,7 +91,7 @@ func createTempConfigs(t *testing.T, configBody string, rulesBody string) (strin } configFile.Close() - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + rulesFile, err := os.CreateTemp(tmpDir, "*.toml") assert.NoError(t, err) if rulesBody != "" { @@ -636,11 +636,11 @@ func TestQueryAuthToken(t *testing.T) { } func TestGRPCServerParameters(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") + tmpDir, err := os.MkdirTemp("", "") assert.NoError(t, err) defer os.RemoveAll(tmpDir) - configFile, err := ioutil.TempFile(tmpDir, "*.toml") + configFile, err := os.CreateTemp(tmpDir, "*.toml") assert.NoError(t, err) _, err = configFile.Write([]byte(` @@ -668,7 +668,7 @@ func TestGRPCServerParameters(t *testing.T) { assert.NoError(t, err) configFile.Close() - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + rulesFile, err := os.CreateTemp(tmpDir, "*.toml") assert.NoError(t, err) c, err := NewConfig(configFile.Name(), rulesFile.Name(), func(err error) {}) @@ -680,3 +680,62 @@ func TestGRPCServerParameters(t *testing.T) { assert.Equal(t, 4*time.Minute, c.GetGRPCTime()) assert.Equal(t, 5*time.Minute, c.GetGRPCTimeout()) } + +func TestHoneycombAdditionalErrorConfig(t *testing.T) { + config, rules := createTempConfigs(t, ` + AdditionalErrorFields = [ + "first", + "second" + ] + + [InMemCollector] + CacheCapacity=1000 + + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + + [HoneycombLogger] + LoggerHoneycombAPI="http://honeycomb.io" + LoggerAPIKey="1234" + LoggerDataset="loggerDataset" + LoggerSamplerEnabled=true + LoggerSamplerThroughput=10 + `, "") + defer os.Remove(rules) + defer os.Remove(config) + + c, err := NewConfig(config, rules, func(err error) {}) + assert.NoError(t, err) + + assert.Equal(t, []string{"first", "second"}, c.GetAdditionalErrorFields()) +} + +func TestHoneycombAdditionalErrorDefaults(t *testing.T) { + config, rules := createTempConfigs(t, ` + [InMemCollector] + CacheCapacity=1000 + + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + + [HoneycombLogger] + LoggerHoneycombAPI="http://honeycomb.io" + LoggerAPIKey="1234" + LoggerDataset="loggerDataset" + LoggerSamplerEnabled=true + LoggerSamplerThroughput=10 + `, "") + defer os.Remove(rules) + defer os.Remove(config) + + c, err := NewConfig(config, rules, func(err error) {}) + assert.NoError(t, err) + + assert.Equal(t, []string{"trace.span_id"}, c.GetAdditionalErrorFields()) +} diff --git a/config/config_test_reload_error_test.go b/config/config_test_reload_error_test.go index d86574cdb8..307166b27a 100644 --- a/config/config_test_reload_error_test.go +++ b/config/config_test_reload_error_test.go @@ -3,7 +3,6 @@ package config import ( - "io/ioutil" "os" "sync" "testing" @@ -13,14 +12,14 @@ import ( ) func TestErrorReloading(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") + tmpDir, err := os.MkdirTemp("", "") assert.NoError(t, err) defer os.RemoveAll(tmpDir) - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + rulesFile, err := os.CreateTemp(tmpDir, "*.toml") assert.NoError(t, err) - configFile, err := ioutil.TempFile(tmpDir, "*.toml") + configFile, err := os.CreateTemp(tmpDir, "*.toml") assert.NoError(t, err) dummy := []byte(` @@ -76,7 +75,7 @@ func TestErrorReloading(t *testing.T) { } }() - err = ioutil.WriteFile(rulesFile.Name(), []byte(`Sampler="InvalidSampler"`), 0644) + err = os.WriteFile(rulesFile.Name(), []byte(`Sampler="InvalidSampler"`), 0644) if err != nil { t.Error(err) diff --git a/config/file_config.go b/config/file_config.go index 5e30aae6fb..5e84578848 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -54,6 +54,7 @@ type configContents struct { DatasetPrefix string QueryAuthToken string GRPCServerParameters GRPCServerParameters + AdditionalErrorFields []string } type InMemoryCollectorCacheCapacity struct { @@ -153,6 +154,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("GRPCServerParameters.MaxConnectionAgeGrace", time.Duration(0)) c.SetDefault("GRPCServerParameters.Time", 10*time.Second) c.SetDefault("GRPCServerParameters.Timeout", 2*time.Second) + c.SetDefault("AdditionalErrorFields", []string{"trace.span_id"}) c.SetConfigFile(config) err := c.ReadInConfig() @@ -870,3 +872,10 @@ func (f *fileConfig) GetPeerTimeout() time.Duration { return f.conf.PeerManagement.Timeout } + +func (f *fileConfig) GetAdditionalErrorFields() []string { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.AdditionalErrorFields +} diff --git a/config/mock.go b/config/mock.go index 83a4395a2d..3cc5911b2c 100644 --- a/config/mock.go +++ b/config/mock.go @@ -82,6 +82,7 @@ type MockConfig struct { GRPCTime time.Duration GRPCTimeout time.Duration PeerTimeout time.Duration + AdditionalErrorFields []string Mux sync.RWMutex } @@ -443,3 +444,10 @@ func (f *MockConfig) GetPeerTimeout() time.Duration { return f.PeerTimeout } + +func (f *MockConfig) GetAdditionalErrorFields() []string { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.AdditionalErrorFields +} diff --git a/config_complete.toml b/config_complete.toml index 3fdfdd6be5..265e95a719 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -116,12 +116,25 @@ EnvironmentCacheTTL = "1h" # are not typically needed in normal operation. # Can be specified in the environment as REFINERY_QUERY_AUTH_TOKEN. # If left unspecified, the /query endpoints are inaccessible. +# Not eligible for live reload. # QueryAuthToken = "some-random-value" # AddRuleReasonToTrace causes traces that are sent to Honeycomb to include the field `meta.refinery.reason`. # This field contains text indicating which rule was evaluated that caused the trace to be included. +# Eligible for live reload. # AddRuleReasonToTrace = true +# AdditionalErrorFields should be a list of span fields that should be included when logging +# errors that happen during ingestion of events (for example, the span too large error). +# This is primarily useful in trying to track down misbehaving senders in a large installation. +# The fields `dataset`, `apihost`, and `environment` are always included. +# If a field is not present in the span, it will not be present in the error log. +# Default is ["trace.span_id"]. +# Eligible for live reload. +AdditionalErrorFields = [ + "trace.span_id" +] + ############################ ## Implementation Choices ## ############################ diff --git a/internal/peer/peers_test.go b/internal/peer/peers_test.go index 5e6917e4ab..5ec7f8137a 100644 --- a/internal/peer/peers_test.go +++ b/internal/peer/peers_test.go @@ -1,6 +1,3 @@ -//go:build all || race -// +build all race - package peer import ( diff --git a/logger/honeycomb.go b/logger/honeycomb.go index f5f6c76fd9..85ef57c049 100644 --- a/logger/honeycomb.go +++ b/logger/honeycomb.go @@ -240,7 +240,7 @@ func (h *HoneycombEntry) Logf(f string, args ...interface{}) { ev := h.builder.NewEvent() msg := fmt.Sprintf(f, args...) ev.AddField("msg", msg) - ev.Metadata = map[string]string{ + ev.Metadata = map[string]any{ "api_host": ev.APIHost, "dataset": ev.Dataset, } diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go index 32f0a462d9..43a2c6d944 100644 --- a/metrics/honeycomb.go +++ b/metrics/honeycomb.go @@ -222,7 +222,7 @@ func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) { return case <-tick.C: ev := h.libhClient.NewEvent() - ev.Metadata = map[string]string{ + ev.Metadata = map[string]any{ "api_host": ev.APIHost, "dataset": ev.Dataset, } diff --git a/sample/sample_test.go b/sample/sample_test.go index e06f70aeb7..b1b804b968 100644 --- a/sample/sample_test.go +++ b/sample/sample_test.go @@ -1,7 +1,6 @@ package sample import ( - "io/ioutil" "os" "testing" @@ -30,11 +29,11 @@ func TestDependencyInjection(t *testing.T) { } func TestDatasetPrefix(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "") + tmpDir, err := os.MkdirTemp("", "") assert.NoError(t, err) defer os.RemoveAll(tmpDir) - configFile, err := ioutil.TempFile(tmpDir, "*.toml") + configFile, err := os.CreateTemp(tmpDir, "*.toml") assert.NoError(t, err) _, err = configFile.Write([]byte(` @@ -57,7 +56,7 @@ func TestDatasetPrefix(t *testing.T) { assert.NoError(t, err) configFile.Close() - rulesFile, err := ioutil.TempFile(tmpDir, "*.toml") + rulesFile, err := os.CreateTemp(tmpDir, "*.toml") assert.NoError(t, err) _, err = rulesFile.Write([]byte(` diff --git a/transmit/transmit.go b/transmit/transmit.go index 30309c7a47..64db70f359 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -98,12 +98,19 @@ func (d *DefaultTransmission) EnqueueEvent(ev *types.Event) { libhEv.SampleRate = ev.SampleRate libhEv.Timestamp = ev.Timestamp // metadata is used to make error logs more helpful when processing libhoney responses - libhEv.Metadata = map[string]string{ + metadata := map[string]any{ "api_host": ev.APIHost, "dataset": ev.Dataset, "environment": ev.Environment, } + for _, k := range d.Config.GetAdditionalErrorFields() { + if v, ok := ev.Data[k]; ok { + metadata[k] = v + } + } + libhEv.Metadata = metadata + for k, v := range ev.Data { libhEv.AddField(k, v) } @@ -160,6 +167,11 @@ func (d *DefaultTransmission) processResponses( "dataset": dataset, "environment": environment, }) + for _, k := range d.Config.GetAdditionalErrorFields() { + if v, ok := r.Metadata.(map[string]any)[k]; ok { + log = log.WithField(k, v) + } + } if r.Err != nil { log = log.WithField("error", r.Err.Error()) } From 7529d6b847cf4138df24f14f8e4e48de307ac4a2 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Mon, 19 Sep 2022 09:02:38 -0400 Subject: [PATCH 231/351] Fix variable shadowing bug (#519) ## Which problem is this PR solving? The tip of refinery was crashing in metrics with a nil pointer error. It was puzzling why a function that was supposed to never return nil was in fact returning nil. The problem was a subtle variable shadowing issue that I only found by writing a test script and running it in the debugger: ```go metric, ok = metrics[name] if !ok { // create new metric using create function and add to map metric := createMetric(name) metrics[name] = metric } lock.Unlock() return metric ``` The := on the createMetric created a new variable called `metric` that was scoped to the body of the if clause. The next line properly assigned it to the map and then threw it away and returned the nil pointer created on the top line. ## Short description of the changes - Removed an unneeded `:` - Added some tests for the getOrAdd function - Fixed a couple of spelling errors and a linter nit --- metrics/honeycomb.go | 10 ++--- metrics/honeycomb_test.go | 94 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+), 5 deletions(-) create mode 100644 metrics/honeycomb_test.go diff --git a/metrics/honeycomb.go b/metrics/honeycomb.go index 43a2c6d944..790117e82d 100644 --- a/metrics/honeycomb.go +++ b/metrics/honeycomb.go @@ -138,9 +138,9 @@ func (h *HoneycombMetrics) initLibhoney(mc config.HoneycombMetricsConfig) error h.libhClient.AddDynamicField("memory_inuse", getAlloc) startTime := time.Now() h.libhClient.AddDynamicField("process_uptime_seconds", func() interface{} { - return time.Now().Sub(startTime) / time.Second + return time.Since(startTime) / time.Second }) - go h.reportToHoneycommb(ctx) + go h.reportToHoneycomb(ctx) return nil } @@ -213,7 +213,7 @@ func (h *HoneycombMetrics) readMemStats(mem *runtime.MemStats) { *mem = h.latestMemStats } -func (h *HoneycombMetrics) reportToHoneycommb(ctx context.Context) { +func (h *HoneycombMetrics) reportToHoneycomb(ctx context.Context) { tick := time.NewTicker(time.Duration(h.reportingFreq) * time.Second) for { select { @@ -284,7 +284,7 @@ func (h *HoneycombMetrics) Register(name string, metricType string) { case "histogram": getOrAdd(&h.lock, name, h.histograms, createHistogram) default: - h.Logger.Debug().Logf("unspported metric type %s", metricType) + h.Logger.Debug().Logf("unsupported metric type %s", metricType) } } @@ -308,7 +308,7 @@ func getOrAdd[T *counter | *gauge | *histogram](lock *sync.RWMutex, name string, metric, ok = metrics[name] if !ok { // create new metric using create function and add to map - metric := createMetric(name) + metric = createMetric(name) metrics[name] = metric } lock.Unlock() diff --git a/metrics/honeycomb_test.go b/metrics/honeycomb_test.go new file mode 100644 index 0000000000..3cd8af604c --- /dev/null +++ b/metrics/honeycomb_test.go @@ -0,0 +1,94 @@ +package metrics + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +// These tests do a concurrency check for the getOrAdd lock semantics, and generally verify that getOrAdd +// is functional under load. +func Test_getOrAdd_counter(t *testing.T) { + var lock *sync.RWMutex = &sync.RWMutex{} + var metrics map[string]*counter = make(map[string]*counter) + + const nthreads = 5 + + wg := sync.WaitGroup{} + + for i := 0; i < nthreads; i++ { + wg.Add(1) + go func() { + for j := 0; j < 1000; j++ { + name := "foo" + var ctr *counter = getOrAdd(lock, name, metrics, createCounter) + ctr.lock.Lock() + ctr.val++ + ctr.lock.Unlock() + } + wg.Done() + }() + } + wg.Wait() + + var ctr *counter = getOrAdd(lock, "foo", metrics, createCounter) + assert.Equal(t, nthreads*1000, ctr.val) +} + +func Test_getOrAdd_gauge(t *testing.T) { + var lock *sync.RWMutex = &sync.RWMutex{} + var metrics map[string]*gauge = make(map[string]*gauge) + + const nthreads = 5 + + wg := sync.WaitGroup{} + + for i := 0; i < nthreads; i++ { + wg.Add(1) + go func() { + for j := 0; j < 1000; j++ { + name := "foo" + var g *gauge = getOrAdd(lock, name, metrics, createGauge) + g.lock.Lock() + g.val++ + g.lock.Unlock() + } + wg.Done() + }() + } + wg.Wait() + + var g *gauge = getOrAdd(lock, "foo", metrics, createGauge) + assert.Equal(t, float64(nthreads*1000), g.val) +} + +func Test_getOrAdd_histogram(t *testing.T) { + var lock *sync.RWMutex = &sync.RWMutex{} + var metrics map[string]*histogram = make(map[string]*histogram) + + const nthreads = 5 + + wg := sync.WaitGroup{} + + for i := 0; i < nthreads; i++ { + wg.Add(1) + go func() { + for j := 0; j < 1000; j++ { + name := "foo" + var h *histogram = getOrAdd(lock, name, metrics, createHistogram) + h.lock.Lock() + if len(h.vals) == 0 { + h.vals = append(h.vals, 0) + } + h.vals[0]++ + h.lock.Unlock() + } + wg.Done() + }() + } + wg.Wait() + + var h *histogram = getOrAdd(lock, "foo", metrics, createHistogram) + assert.Equal(t, float64(nthreads*1000), h.vals[0]) +} From c4ccf3cbdd484c30a95cf19183fee0459e124558 Mon Sep 17 00:00:00 2001 From: Jamie Danielson Date: Mon, 19 Sep 2022 12:26:46 -0400 Subject: [PATCH 232/351] prepare release v1.17.0 (#517) Co-authored-by: Vera Reynolds --- CHANGELOG.md | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b6e3e5cea..8fa03a4dac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,21 @@ # Refinery Changelog +## 1.17.0 2022-09-16 + +### Enhancements + +- Allow adding extra fields to error logs (#514) | [@kentquirk](https://github.com/kentquirk) +- Allow BatchTimeout to be overriden on the libhoney Transmission (#509) | [@leviwilson](https://github.com/leviwilson) + +### Fixes + +- Consolidate honeycomb metrics to use single lock & fix concurrent read/write (#511)| [@MikeGoldsmith](https://github.com/MikeGoldsmith) +- Fix variable shadowing bug (#519)| [@kentquirk](https://github.com/kentquirk) + ## 1.16.0 2022-09-09 This release contains a number of small new features to assist in running refinery more effectively: + - Adds new endpoints to help in debugging refinery rules (see README.md) - Fixes issues with SampleRate - Adds some new configuration parameters (see the *_complete.toml files for more) @@ -36,36 +49,13 @@ This release contains a number of small new features to assist in running refine - Go mod tidy (#507) | [kentquirk](https://github.com/kentquirk) ## New Contributors + - @abatilo made their first contribution in https://github.com/honeycombio/refinery/pull/498 - @thrawn01 made their first contribution in https://github.com/honeycombio/refinery/pull/491 - @epvanhouten made their first contribution in https://github.com/honeycombio/refinery/pull/508 **Full Changelog**: https://github.com/honeycombio/refinery/compare/v1.15.0...v1.16.0 -## 1.15.0 2022-07-01 - -### Enhancements - -- Add rule Scope configuration option to rules-based sampler (#440) | [isnotajoke](https://github.com/isnotajoke) -- Replace hand-rolled binary.BigEndian.Uint32 with the real deal (#459) | [toshok](https://github.com/toshok) -- Validate successful span scoped rules test (#465) | [MikeGoldsmith](https://github.com/MikeGoldsmith) -- Create helm-chart issue on release (#458) | [MikeGoldsmith](https://github.com/MikeGoldsmith) -- github_token needs underscore not hyphen (#464) | [@JamieDanielson](https://github.com/JamieDanielson) - -### Maintenance - -- Replace legacy with classic in readme (#457) | [MikeGoldsmith](https://github.com/MikeGoldsmith) - -### Dependencies - -- Bump github.com/spf13/viper from 1.10.1 to 1.12.0 (#461) -- Bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#467) -- Bump github.com/honeycombio/husky from 0.10.5 to 0.10.6 (#460) -- Bump github.com/klauspost/compress from 1.15.4 to 1.15.6 (#466) -- Bump github.com/prometheus/client_golang from 1.12.1 to 1.12.2 (#463) - - - ## 1.15.0 2022-07-01 ### Enhancements From 998d4a77270a8e013f82d83199df8658baee1a91 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Thu, 29 Sep 2022 14:46:44 -0400 Subject: [PATCH 233/351] Properly set metadata to values that will work. (#523) ## Which problem is this PR solving? - A previous PR (#514) changed the type of the metadata object from `map[string]string` to `map[string]any`, but I must have forgotten to hit save at some point, and this part of it (4 lines) didn't get committed. This bug actually will cause that feature to omit fields it knows about. ## Short description of the changes - Change the type appropriately in processResponses --- transmit/transmit.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/transmit/transmit.go b/transmit/transmit.go index 64db70f359..b869a940b3 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -156,10 +156,10 @@ func (d *DefaultTransmission) processResponses( case r := <-responses: if r.Err != nil || r.StatusCode > 202 { var apiHost, dataset, environment string - if metadata, ok := r.Metadata.(map[string]string); ok { - apiHost = metadata["api_host"] - dataset = metadata["dataset"] - environment = metadata["environment"] + if metadata, ok := r.Metadata.(map[string]any); ok { + apiHost = metadata["api_host"].(string) + dataset = metadata["dataset"].(string) + environment = metadata["environment"].(string) } log := d.Logger.Error().WithFields(map[string]interface{}{ "status_code": r.StatusCode, From b0c5cd0fd1cba63ea7a5ffb0274d1a58c06fe4d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 18:41:01 +0000 Subject: [PATCH 234/351] Bump github.com/spf13/viper from 1.12.0 to 1.13.0 (#527) --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index de45166d51..5878df96a9 100644 --- a/go.mod +++ b/go.mod @@ -17,12 +17,12 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.9 - github.com/pelletier/go-toml/v2 v2.0.1 + github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.9.0 - github.com/spf13/viper v1.12.0 + github.com/spf13/viper v1.13.0 github.com/stretchr/testify v1.8.0 github.com/tidwall/gjson v1.14.3 github.com/vmihailenco/msgpack/v4 v4.3.11 @@ -60,7 +60,7 @@ require ( github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/subosito/gotenv v1.3.0 // indirect + github.com/subosito/gotenv v1.4.1 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect @@ -71,6 +71,6 @@ require ( google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220902135211-223410557253 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect - gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 13ceeb8ee1..b6065498cd 100644 --- a/go.sum +++ b/go.sum @@ -232,8 +232,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -280,8 +280,8 @@ github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmq github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= -github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= +github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -294,8 +294,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= -github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -637,8 +637,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= -gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From f1b4c352d084df5814fe0dce0f610c214ae922f5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 18:45:47 +0000 Subject: [PATCH 235/351] Bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0 (#528) --- go.mod | 6 +++--- go.sum | 18 ++++++++++++++---- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 5878df96a9..67327c2886 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/klauspost/compress v1.15.9 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.12.2 + github.com/prometheus/client_golang v1.13.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.9.0 github.com/spf13/viper v1.13.0 @@ -54,8 +54,8 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect diff --git a/go.sum b/go.sum index b6065498cd..3c0c8896d3 100644 --- a/go.sum +++ b/go.sum @@ -96,9 +96,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= @@ -245,8 +247,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -255,14 +258,16 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -395,6 +400,8 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY= golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -407,6 +414,7 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -461,12 +469,14 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 33e5c66c8055a69ab72db80f55771b711533b052 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 18:50:56 +0000 Subject: [PATCH 236/351] Bump github.com/honeycombio/husky from 0.15.0 to 0.16.1 (#529) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 67327c2886..039f3d82c0 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.15.0 + github.com/honeycombio/husky v0.16.1 github.com/honeycombio/libhoney-go v1.16.0 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 @@ -43,7 +43,7 @@ require ( github.com/go-playground/locales v0.13.0 // indirect github.com/go-playground/universal-translator v0.17.0 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect diff --git a/go.sum b/go.sum index 3c0c8896d3..64df9378fd 100644 --- a/go.sum +++ b/go.sum @@ -174,8 +174,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -184,8 +184,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.15.0 h1:RuYmKC1jHSB6y3C7pSXf889w8AaSzZ1JkIkLaiXlC5o= -github.com/honeycombio/husky v0.15.0/go.mod h1:ZO2AzJnnJkUoXbIQ5qLCvQuWdQOG/BZEup8pf2y5ua0= +github.com/honeycombio/husky v0.16.1 h1:kChIUn9Bi1J6PbGQcjHbZH/lihcNrpbso7fpDNobxqA= +github.com/honeycombio/husky v0.16.1/go.mod h1:LHuBxW0qybEhRFqyIR5p7yXlPEPzV8HipwPzMnEIHko= github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/honeycombio/libhoney-go v1.16.0/go.mod h1:izP4fbREuZ3vqC4HlCAmPrcPT9gxyxejRjGtCYpmBn0= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= From 1f160aa31572d5c63cd721ffcaf35a12c748ac09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 18:52:07 +0000 Subject: [PATCH 237/351] Bump github.com/klauspost/compress from 1.15.9 to 1.15.11 (#531) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 039f3d82c0..550f165cf2 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/honeycombio/libhoney-go v1.16.0 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.9 + github.com/klauspost/compress v1.15.11 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.13.0 diff --git a/go.sum b/go.sum index 64df9378fd..99058175c0 100644 --- a/go.sum +++ b/go.sum @@ -204,8 +204,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= From 719cca4290479ec2ff4480fe434c4502479c7f17 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Tue, 4 Oct 2022 08:56:14 -0600 Subject: [PATCH 238/351] Bump go version to 1.19 (#534) ## Which problem is this PR solving? - bumps minimum go version to 1.19 ## Short description of the changes - bumps module to use 1.19 - bump circleci to build targeting 1.19 --- .circleci/config.yml | 8 ++++---- go.mod | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6e49d91489..acd9dfd906 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -40,7 +40,7 @@ commands: jobs: test: docker: - - image: cimg/go:1.18 + - image: cimg/go:1.19 - image: redis:6 steps: - checkout @@ -64,7 +64,7 @@ jobs: build_binaries: docker: - - image: cimg/go:1.18 + - image: cimg/go:1.19 steps: - checkout - go-build: @@ -140,7 +140,7 @@ jobs: build_docker: docker: - - image: cimg/go:1.18 + - image: cimg/go:1.19 steps: - setup_googleko - checkout @@ -151,7 +151,7 @@ jobs: publish_docker: docker: - - image: cimg/go:1.18 + - image: cimg/go:1.19 steps: - setup_googleko - checkout diff --git a/go.mod b/go.mod index 550f165cf2..98a7889864 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/honeycombio/refinery -go 1.18 +go 1.19 require ( github.com/davecgh/go-spew v1.1.1 From 594c6e228343c2bdefa03e7ab029d4623c5c0288 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Tue, 4 Oct 2022 08:56:32 -0600 Subject: [PATCH 239/351] Add support for metrics api key env var (#535) ## Which problem is this PR solving? Allows setting `HoneycombMetrics.MetricsAPIKey` with a specific environment variable. - Fixes #520 ## Short description of the changes - Utilizes viper's BindEnv() function to handle env var precedence for `HoneycombMetrics.MetricsAPIKey`. - If `REFINERY_HONEYCOMB_METRICS_API_KEY` is set then it will be used. Otherwise `REFINERY_HONEYCOMB_API_KEY` will be used. As a result existing customer configs will not be affected. - Added new unit tests for this scenario. --- config/config_test.go | 75 +++++++++++++++++++++++++++++++++++++++++++ config/file_config.go | 2 +- 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/config/config_test.go b/config/config_test.go index b187fa2019..8048e2bcf9 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -77,6 +77,81 @@ func TestRedisPasswordEnvVar(t *testing.T) { } } +func TestMetricsAPIKeyEnvVar(t *testing.T) { + testCases := []struct { + name string + envVar string + key string + }{ + { + name: "Specific env var", + envVar: "REFINERY_HONEYCOMB_METRICS_API_KEY", + key: "abc123", + }, + { + name: "Fallback env var", + envVar: "REFINERY_HONEYCOMB_API_KEY", + key: "321cba", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + os.Setenv(tc.envVar, tc.key) + defer os.Unsetenv(tc.envVar) + + c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) + + if err != nil { + t.Error(err) + } + + if d, _ := c.GetHoneycombMetricsConfig(); d.MetricsAPIKey != tc.key { + t.Error("received", d, "expected", tc.key) + } + }) + } +} + +func TestMetricsAPIKeyMultipleEnvVar(t *testing.T) { + const specificKey = "abc123" + const specificEnvVarName = "REFINERY_HONEYCOMB_METRICS_API_KEY" + const fallbackKey = "this should not be set in the config" + const fallbackEnvVarName = "REFINERY_HONEYCOMB_API_KEY" + + os.Setenv(specificEnvVarName, specificKey) + defer os.Unsetenv(specificEnvVarName) + os.Setenv(fallbackEnvVarName, fallbackKey) + defer os.Unsetenv(fallbackEnvVarName) + + c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) + + if err != nil { + t.Error(err) + } + + if d, _ := c.GetHoneycombMetricsConfig(); d.MetricsAPIKey != specificKey { + t.Error("received", d, "expected", specificKey) + } +} + +func TestMetricsAPIKeyFallbackEnvVar(t *testing.T) { + const key = "abc1234" + const envVarName = "REFINERY_HONEYCOMB_API_KEY" + os.Setenv(envVarName, key) + defer os.Unsetenv(envVarName) + + c, err := NewConfig("../config.toml", "../rules.toml", func(err error) {}) + + if err != nil { + t.Error(err) + } + + if d, _ := c.GetHoneycombMetricsConfig(); d.MetricsAPIKey != key { + t.Error("received", d, "expected", key) + } +} + // creates two temporary toml files from the strings passed in and returns their filenames func createTempConfigs(t *testing.T, configBody string, rulesBody string) (string, string) { tmpDir, err := os.MkdirTemp("", "") diff --git a/config/file_config.go b/config/file_config.go index 5e84578848..960d758aac 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -119,7 +119,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.BindEnv("PeerManagement.RedisUsername", "REFINERY_REDIS_USERNAME") c.BindEnv("PeerManagement.RedisPassword", "REFINERY_REDIS_PASSWORD") c.BindEnv("HoneycombLogger.LoggerAPIKey", "REFINERY_HONEYCOMB_API_KEY") - c.BindEnv("HoneycombMetrics.MetricsAPIKey", "REFINERY_HONEYCOMB_API_KEY") + c.BindEnv("HoneycombMetrics.MetricsAPIKey", "REFINERY_HONEYCOMB_METRICS_API_KEY", "REFINERY_HONEYCOMB_API_KEY") c.BindEnv("QueryAuthToken", "REFINERY_QUERY_AUTH_TOKEN") c.SetDefault("ListenAddr", "0.0.0.0:8080") c.SetDefault("PeerListenAddr", "0.0.0.0:8081") From 9e6740d8cb76b5395666c19f586da44347e44337 Mon Sep 17 00:00:00 2001 From: Shawn Poulson <92753637+Baliedge@users.noreply.github.com> Date: Thu, 6 Oct 2022 09:06:02 -0400 Subject: [PATCH 240/351] Cloud support (#521) ## Which problem is this PR solving? When hosting Refinery in a cloud environment, certain network assumptions are no longer true. Issues arise from these assumptions while hosting in our cloud environment using [Nomad](https://www.nomadproject.io/) as container orchestration. Terms, as used in Nomad: - Task: Single deployed container. - Group: Group of tasks running as a scalable service (e.g. Refinery). This assumption is not necessarily true under Nomad: - The IP assigned to a task's `eth0` can be accessed by other tasks in the group. Redis peering expects peers to talk to each other using their `eth0` IP. This behavior can be overridden by specifying `RedisIdentifier` in config. This value will be advertised as the peer's IP in the peer list. However, on startup, peering uses its `eth0` IP to cross-reference in the peers list found in Redis. Since the internal `eth0` IP is not the true public IP (set by `RedisIdentifier`), this check will always fail and the container exits in error. ## Short description of the changes A workaround is to treat `RedisIdentifier` as the true public IP address of the task. Thus, it should be one of the "self" IPs when checking the peers list. This behavior is reinforced in existing code in [`internal/peer/redis.go`](https://github.com/honeycombio/refinery/blob/main/internal/peer/redis.go#L291-L298) where `RedisIdentifier` is used to create a URL with the value being the host. --- internal/peer/redis.go | 32 ++++++++++++++++++--------- sharder/deterministic.go | 48 +++++++++++++++++++++++++++++++--------- 2 files changed, 59 insertions(+), 21 deletions(-) diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 041433ac72..4d7be37715 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -249,6 +249,27 @@ func publicAddr(c config.Config) (string, error) { return "", err } + var myIdentifier string + + // If RedisIdentifier is set, use as identifier. + if redisIdentifier, _ := c.GetRedisIdentifier(); redisIdentifier != "" { + myIdentifier = redisIdentifier + logrus.WithField("identifier", myIdentifier).Info("using specified RedisIdentifier from config") + } else { + // Otherwise, determine idenntifier from network interface. + myIdentifier, err = getIdentifierFromInterfaces(c) + if err != nil { + return "", err + } + } + + publicListenAddr := fmt.Sprintf("http://%s:%s", myIdentifier, port) + + return publicListenAddr, nil +} + +// Scan network interfaces to determine an identifier from either IP or hostname. +func getIdentifierFromInterfaces(c config.Config) (string, error) { myIdentifier, _ := os.Hostname() identifierInterfaceName, _ := c.GetIdentifierInterfaceName() @@ -288,16 +309,7 @@ func publicAddr(c config.Config) (string, error) { logrus.WithField("identifier", myIdentifier).WithField("interface", ifc.Name).Info("using identifier from interface") } - redisIdentifier, _ := c.GetRedisIdentifier() - - if redisIdentifier != "" { - myIdentifier = redisIdentifier - logrus.WithField("identifier", myIdentifier).Info("using specific identifier from config") - } - - publicListenAddr := fmt.Sprintf("http://%s:%s", myIdentifier, port) - - return publicListenAddr, nil + return myIdentifier, nil } // equal tells whether a and b contain the same elements. diff --git a/sharder/deterministic.go b/sharder/deterministic.go index af8138f1f7..89acc73fcb 100644 --- a/sharder/deterministic.go +++ b/sharder/deterministic.go @@ -15,6 +15,7 @@ import ( "github.com/honeycombio/refinery/internal/peer" "github.com/honeycombio/refinery/logger" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // shardingSalt is a random bit to make sure we don't shard the same as any @@ -117,33 +118,58 @@ func (d *DeterministicSharder) Start() error { } d.Logger.Debug().Logf("picked up local peer port of %s", localPort) - // get my local interfaces - localAddrs, err := net.InterfaceAddrs() - if err != nil { - return errors.Wrap(err, "failed to get local interface list to initialize sharder") + var localIPs []string + + // If RedisIdentifier is an IP, use as localIPs value. + if redisIdentifier, err := d.Config.GetRedisIdentifier(); err == nil && redisIdentifier != "" { + if ip := net.ParseIP(redisIdentifier); ip != nil { + d.Logger.Debug().Logf("Using RedisIdentifier as public IP: %s", redisIdentifier) + localIPs = []string{redisIdentifier} + } + } + + // Otherwise, get my local interfaces' IPs. + if len(localIPs) == 0 { + localAddrs, err := net.InterfaceAddrs() + if err != nil { + return errors.Wrap(err, "failed to get local interface list to initialize sharder") + } + localIPs = make([]string, len(localAddrs)) + for i, addr := range localAddrs { + addrStr := addr.String() + ip, _, err := net.ParseCIDR(addrStr) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to parse CIDR for local IP %s", addrStr)) + } + localIPs[i] = ip.String() + } } // go through peer list, resolve each address, see if any of them match any // local interface. Note that this assumes only one instance of Refinery per // host can run. for i, peerShard := range d.peers { - d.Logger.Debug().WithField("peer", peerShard).WithField("self", localAddrs).Logf("Considering peer looking for self") + d.Logger.Debug().WithFields(logrus.Fields{ + "peer": peerShard, + "self": localIPs, + }).Logf("Considering peer looking for self") peerIPList, err := net.LookupHost(peerShard.ipOrHost) if err != nil { // TODO something better than fail to start if peer is missing return errors.Wrap(err, fmt.Sprintf("couldn't resolve peer hostname %s", peerShard.ipOrHost)) } for _, peerIP := range peerIPList { - for _, localIP := range localAddrs { - ipAddr, _, err := net.ParseCIDR(localIP.String()) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to parse CIDR for local IP %s", localIP.String())) - } - if peerIP == ipAddr.String() { + for _, ipAddr := range localIPs { + if peerIP == ipAddr { if peerShard.port == localPort { d.Logger.Debug().WithField("peer", peerShard).Logf("Found myself in peer list") found = true selfIndexIntoPeerList = i + } else { + d.Logger.Debug().WithFields(logrus.Fields{ + "peer": peerShard, + "expectedPort": localPort, + }).Logf("Peer port mismatch") } } } From 7b5733a8dfc6b9bf3eb2e16fc734566e88ac5abe Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Thu, 6 Oct 2022 14:50:28 -0400 Subject: [PATCH 241/351] Track span count and optionally add it to root (#532) ## Which problem is this PR solving? Adds the ability to add the trace's span count to the root span of the trace, and does it in a way that allows the rules engine to query it as `meta.span_count`. This would allow rules that depend on span count -- for example, a rule that adjusts sample rate so that larger traces are kept less often. Note that this value is not available if the root span hasn't arrived when the TraceTimeout completes; at this point, the rules will be evaluated without a `span_count` because there is no root span to put it on. If the root span arrives late, it will be updated with the total number of spans that have arrived at that point, so that the value stored in Honeycomb is close to correct. If more spans arrive later, they will not update the value in the root span. Closes #524 Closes #483 --- Makefile | 3 + collect/collect.go | 43 ++++++++---- collect/collect_test.go | 141 ++++++++++++++++++++++++++++++++++++++++ config/config.go | 2 + config/file_config.go | 9 +++ config/mock.go | 8 +++ config_complete.toml | 8 +++ sample/rules.go | 12 ++-- sample/rules_test.go | 42 ++++++++++++ types/event.go | 7 +- 10 files changed, 256 insertions(+), 19 deletions(-) diff --git a/Makefile b/Makefile index 032c4a401a..d5ae8318b2 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,9 @@ dockerize.tar.gz: @echo @echo "+++ Retrieving dockerize tool for Redis readiness check." @echo +# make sure that file is available + sudo apt-get update + sudo apt-get -y install file curl --location --silent --show-error \ --output dockerize.tar.gz \ https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/${DOCKERIZE_RELEASE_ASSET} \ diff --git a/collect/collect.go b/collect/collect.go index 59619c3e7a..7863d1c252 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -78,8 +78,9 @@ type InMemCollector struct { // our decision for the future, so any delinquent spans that show up later can // be dropped or passed along. type traceSentRecord struct { - keep bool // true if the trace was kept, false if it was dropped - rate uint // sample rate used when sending the trace + keep bool // true if the trace was kept, false if it was dropped + rate uint // sample rate used when sending the trace + spanCount int64 // number of spans in the trace (we decorate the root span with this) } func (i *InMemCollector) Start() error { @@ -271,7 +272,7 @@ func (i *InMemCollector) collect() { i.Metrics.Histogram("collector_incoming_queue", float64(len(i.incoming))) i.Metrics.Histogram("collector_peer_queue", float64(len(i.fromPeer))) - // Always drain peer channel before doing anyhting else. By processing peer + // Always drain peer channel before doing anything else. By processing peer // traffic preferentially we avoid the situation where the cluster essentially // deadlocks because peers are waiting to get their events handed off to each // other. @@ -329,7 +330,11 @@ func (i *InMemCollector) processSpan(sp *types.Span) { if sentRecord, found := i.sentTraceCache.Get(sp.TraceID); found { if sr, ok := sentRecord.(*traceSentRecord); ok { i.Metrics.Increment("trace_sent_cache_hit") - i.dealWithSentTrace(sr.keep, sr.rate, sp) + // bump the count of records on this trace -- if the root span isn't + // the last late span, then it won't be perfect, but it will be better than + // having none at all + sentRecord.(*traceSentRecord).spanCount++ + i.dealWithSentTrace(sr.keep, sr.rate, sentRecord.(*traceSentRecord).spanCount, sp) return } } @@ -360,7 +365,7 @@ func (i *InMemCollector) processSpan(sp *types.Span) { // if the trace we got back from the cache has already been sent, deal with the // span. if trace.Sent { - i.dealWithSentTrace(trace.KeepSample, trace.SampleRate, sp) + i.dealWithSentTrace(trace.KeepSample, trace.SampleRate, trace.SpanCount(), sp) } // great! trace is live. add the span. @@ -375,14 +380,14 @@ func (i *InMemCollector) processSpan(sp *types.Span) { } trace.SendBy = time.Now().Add(timeout) - trace.HasRootSpan = true + trace.RootSpan = sp } } // dealWithSentTrace handles a span that has arrived after the sampling decision // on the trace has already been made, and it obeys that decision by either // sending the span immediately or dropping it. -func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, sp *types.Span) { +func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, spanCount int64, sp *types.Span) { if i.Config.GetIsDryRun() { field := i.Config.GetDryRunFieldName() // if dry run mode is enabled, we keep all traces and mark the spans with the sampling decision @@ -396,6 +401,10 @@ func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, sp *types if keep { i.Logger.Debug().WithField("trace_id", sp.TraceID).Logf("Sending span because of previous decision to send trace") mergeTraceAndSpanSampleRates(sp, sampleRate) + // if this span is a late root span, possibly update it with our current span count + if i.Config.GetAddSpanCountToRoot() && isRootSpan(sp) { + sp.Data["meta.span_count"] = spanCount + } i.Transmission.EnqueueSpan(sp) return } @@ -451,8 +460,8 @@ func (i *InMemCollector) send(trace *types.Trace) { traceDur := time.Since(trace.StartTime) i.Metrics.Histogram("trace_duration_ms", float64(traceDur.Milliseconds())) - i.Metrics.Histogram("trace_span_count", float64(len(trace.GetSpans()))) - if trace.HasRootSpan { + i.Metrics.Histogram("trace_span_count", float64(trace.SpanCount())) + if trace.RootSpan != nil { i.Metrics.Increment("trace_send_has_root") } else { i.Metrics.Increment("trace_send_no_root") @@ -472,6 +481,11 @@ func (i *InMemCollector) send(trace *types.Trace) { logFields["environment"] = samplerKey } + // If we have a root span, update it with the count before determining the SampleRate. + if i.Config.GetAddSpanCountToRoot() && trace.RootSpan != nil { + trace.RootSpan.Data["meta.span_count"] = trace.SpanCount() + } + // use sampler key to find sampler; create and cache if not found if sampler, found = i.datasetSamplers[samplerKey]; !found { sampler = i.SamplerFactory.GetSamplerImplementationForKey(samplerKey, isLegacyKey) @@ -486,8 +500,9 @@ func (i *InMemCollector) send(trace *types.Trace) { // record this decision in the sent record LRU for future spans sentRecord := traceSentRecord{ - keep: shouldSend, - rate: rate, + keep: shouldSend, + rate: rate, + spanCount: trace.SpanCount(), } i.sentTraceCache.Add(trace.TraceID, &sentRecord) @@ -509,6 +524,12 @@ func (i *InMemCollector) send(trace *types.Trace) { sp.Data["meta.refinery.reason"] = reason } + // update the root span (if we have one, which we might not if the trace timed out) + // with the final total as of our send time + if i.Config.GetAddSpanCountToRoot() && isRootSpan(sp) { + sp.Data["meta.span_count"] = sentRecord.spanCount + } + if i.Config.GetIsDryRun() { field := i.Config.GetDryRunFieldName() sp.Data[field] = shouldSend diff --git a/collect/collect_test.go b/collect/collect_test.go index 4eea2bee08..ea4a2d82f3 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -723,3 +723,144 @@ func TestDependencyInjection(t *testing.T) { t.Error(err) } } + +// TestAddSpanCount tests that adding a root span winds up with a trace object in +// the cache and that that trace gets span count added to it +func TestAddSpanCount(t *testing.T) { + transmission := &transmit.MockTransmission{} + transmission.Start() + conf := &config.MockConfig{ + GetSendDelayVal: 0, + GetTraceTimeoutVal: 60 * time.Second, + GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1}, + SendTickerVal: 2 * time.Millisecond, + AddSpanCountToRoot: true, + } + coll := &InMemCollector{ + Config: conf, + Logger: &logger.NullLogger{}, + Transmission: transmission, + Metrics: &metrics.NullMetrics{}, + SamplerFactory: &sample.SamplerFactory{ + Config: conf, + Logger: &logger.NullLogger{}, + }, + } + c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) + coll.cache = c + stc, err := lru.New(15) + assert.NoError(t, err, "lru cache should start") + coll.sentTraceCache = stc + + coll.incoming = make(chan *types.Span, 5) + coll.fromPeer = make(chan *types.Span, 5) + coll.datasetSamplers = make(map[string]sample.Sampler) + go coll.collect() + defer coll.Stop() + + var traceID = "mytrace" + + span := &types.Span{ + TraceID: traceID, + Event: types.Event{ + Dataset: "aoeu", + Data: map[string]interface{}{ + "trace.parent_id": "unused", + }, + APIKey: legacyAPIKey, + }, + } + coll.AddSpanFromPeer(span) + time.Sleep(conf.SendTickerVal * 2) + assert.Equal(t, traceID, coll.getFromCache(traceID).TraceID, "after adding the span, we should have a trace in the cache with the right trace ID") + assert.Equal(t, 0, len(transmission.Events), "adding a non-root span should not yet send the span") + // ok now let's add the root span and verify that both got sent + rootSpan := &types.Span{ + TraceID: traceID, + Event: types.Event{ + Dataset: "aoeu", + Data: map[string]interface{}{}, + APIKey: legacyAPIKey, + }, + } + coll.AddSpan(rootSpan) + time.Sleep(conf.SendTickerVal * 2) + assert.Nil(t, coll.getFromCache(traceID), "after adding a leaf and root span, it should be removed from the cache") + transmission.Mux.RLock() + assert.Equal(t, 2, len(transmission.Events), "adding a root span should send all spans in the trace") + assert.Equal(t, nil, transmission.Events[0].Data["meta.span_count"], "child span metadata should NOT be populated with span count") + assert.Equal(t, int64(2), transmission.Events[1].Data["meta.span_count"], "root span metadata should be populated with span count") + transmission.Mux.RUnlock() +} + +// TestLateRootGetsSpanCount tests that the root span gets decorated with the right span count +// even if the trace had already been sent +func TestLateRootGetsSpanCount(t *testing.T) { + transmission := &transmit.MockTransmission{} + transmission.Start() + conf := &config.MockConfig{ + GetSendDelayVal: 0, + GetTraceTimeoutVal: 5 * time.Millisecond, + GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1}, + SendTickerVal: 2 * time.Millisecond, + AddSpanCountToRoot: true, + } + coll := &InMemCollector{ + Config: conf, + Logger: &logger.NullLogger{}, + Transmission: transmission, + Metrics: &metrics.NullMetrics{}, + SamplerFactory: &sample.SamplerFactory{ + Config: conf, + Logger: &logger.NullLogger{}, + }, + } + c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) + coll.cache = c + stc, err := lru.New(15) + assert.NoError(t, err, "lru cache should start") + coll.sentTraceCache = stc + + coll.incoming = make(chan *types.Span, 5) + coll.fromPeer = make(chan *types.Span, 5) + coll.datasetSamplers = make(map[string]sample.Sampler) + go coll.collect() + defer coll.Stop() + + var traceID = "mytrace" + + span := &types.Span{ + TraceID: traceID, + Event: types.Event{ + Dataset: "aoeu", + Data: map[string]interface{}{ + "trace.parent_id": "unused", + }, + APIKey: legacyAPIKey, + }, + } + coll.AddSpanFromPeer(span) + time.Sleep(conf.SendTickerVal * 10) + + trace := coll.getFromCache(traceID) + assert.Nil(t, trace, "trace should have been sent although the root span hasn't arrived") + assert.Equal(t, 1, len(transmission.Events), "adding a non-root span and waiting should send the span") + // now we add the root span and verify that both got sent and that the root span had the span count + rootSpan := &types.Span{ + TraceID: traceID, + Event: types.Event{ + Dataset: "aoeu", + Data: map[string]interface{}{}, + APIKey: legacyAPIKey, + }, + } + coll.AddSpan(rootSpan) + time.Sleep(conf.SendTickerVal * 2) + assert.Nil(t, coll.getFromCache(traceID), "after adding a leaf and root span, it should be removed from the cache") + transmission.Mux.RLock() + assert.Equal(t, 2, len(transmission.Events), "adding a root span should send all spans in the trace") + assert.Equal(t, nil, transmission.Events[0].Data["meta.span_count"], "child span metadata should NOT be populated with span count") + assert.Equal(t, int64(2), transmission.Events[1].Data["meta.span_count"], "root span metadata should be populated with span count") + transmission.Mux.RUnlock() + +} diff --git a/config/config.go b/config/config.go index 4bfe3924b3..b053f27cf8 100644 --- a/config/config.go +++ b/config/config.go @@ -166,4 +166,6 @@ type Config interface { GetPeerTimeout() time.Duration GetAdditionalErrorFields() []string + + GetAddSpanCountToRoot() bool } diff --git a/config/file_config.go b/config/file_config.go index 960d758aac..1bf2e2f90a 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -55,6 +55,7 @@ type configContents struct { QueryAuthToken string GRPCServerParameters GRPCServerParameters AdditionalErrorFields []string + AddSpanCountToRoot bool } type InMemoryCollectorCacheCapacity struct { @@ -155,6 +156,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("GRPCServerParameters.Time", 10*time.Second) c.SetDefault("GRPCServerParameters.Timeout", 2*time.Second) c.SetDefault("AdditionalErrorFields", []string{"trace.span_id"}) + c.SetDefault("AddSpanCountToRoot", false) c.SetConfigFile(config) err := c.ReadInConfig() @@ -879,3 +881,10 @@ func (f *fileConfig) GetAdditionalErrorFields() []string { return f.conf.AdditionalErrorFields } + +func (f *fileConfig) GetAddSpanCountToRoot() bool { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.AddSpanCountToRoot +} diff --git a/config/mock.go b/config/mock.go index 3cc5911b2c..0c9ef034d3 100644 --- a/config/mock.go +++ b/config/mock.go @@ -83,6 +83,7 @@ type MockConfig struct { GRPCTimeout time.Duration PeerTimeout time.Duration AdditionalErrorFields []string + AddSpanCountToRoot bool Mux sync.RWMutex } @@ -451,3 +452,10 @@ func (f *MockConfig) GetAdditionalErrorFields() []string { return f.AdditionalErrorFields } + +func (f *MockConfig) GetAddSpanCountToRoot() bool { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.AddSpanCountToRoot +} diff --git a/config_complete.toml b/config_complete.toml index 265e95a719..8ce344dd5e 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -135,6 +135,14 @@ AdditionalErrorFields = [ "trace.span_id" ] +# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate +# the number of child spans on the trace at the time the sampling decision was made. +# This value is available to the rules-based sampler, making it possible to write rules that +# are dependent upon the number of spans in the trace. +# Default is false. +# Eligible for live reload. +# AddSpanCountToRoot = true + ############################ ## Implementation Choices ## ############################ diff --git a/sample/rules.go b/sample/rules.go index e1219a2621..a255df25b5 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -154,21 +154,19 @@ func ruleMatchesSpanInTrace(trace *types.Trace, rule *config.RulesBasedSamplerRu } for _, span := range trace.GetSpans() { - // the number of conditions that match this span. - // incremented later on after we match a condition - // since we need to match *all* conditions on a single span, we reset in each iteration of the loop. - matchCount := 0 + ruleMatched := true for _, condition := range rule.Condition { // whether this condition is matched by this span. value, exists := extractValueFromSpan(span, condition, checkNestedFields) - if conditionMatchesValue(condition, value, exists) { - matchCount++ + if !conditionMatchesValue(condition, value, exists) { + ruleMatched = false + break // if any condition fails, we can't possibly succeed, so exit inner loop early } } // If this span was matched by every condition, then the rule as a whole // matches (and we can return) - if matchCount == len(rule.Condition) { + if ruleMatched { return true } } diff --git a/sample/rules_test.go b/sample/rules_test.go index 3957844790..61b1aaec6c 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -496,6 +496,48 @@ func TestRules(t *testing.T) { ExpectedKeep: true, ExpectedRate: 10, }, + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "Check root span for span count", + SampleRate: 1, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "meta.span_count", + Operator: "=", + Value: int(2), + }, + }, + }, + }, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "trace.trace_id": "12345", + "trace.span_id": "54321", + "meta.span_count": int64(2), + "test": int64(2), + }, + }, + }, + { + Event: types.Event{ + Data: map[string]interface{}{ + "trace.trace_id": "12345", + "trace.span_id": "654321", + "trace.parent_id": "54321", + "test": int64(2), + }, + }, + }, + }, + ExpectedName: "Check root span for span count", + ExpectedKeep: true, + ExpectedRate: 1, + }, } for _, d := range data { diff --git a/types/event.go b/types/event.go index 43ca9a2d48..84f6f66d8c 100644 --- a/types/event.go +++ b/types/event.go @@ -52,7 +52,7 @@ type Trace struct { // Used to calculate how long traces spend sitting in Refinery StartTime time.Time - HasRootSpan bool + RootSpan *Span // spans is the list of spans in this trace spans []*Span @@ -68,6 +68,11 @@ func (t *Trace) GetSpans() []*Span { return t.spans } +// SpanCount gets the number of spans currently in this trace as int64 +func (t *Trace) SpanCount() int64 { + return int64(len(t.spans)) +} + func (t *Trace) GetSamplerKey() (string, bool) { if IsLegacyAPIKey(t.APIKey) { return t.Dataset, true From fc7f1364b8dd04913f3b1e638a1f11db2cb11936 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Tue, 11 Oct 2022 11:14:36 -0400 Subject: [PATCH 242/351] maint: add new project workflow (#537) --- .github/workflows/add-to-project-v2.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/workflows/add-to-project-v2.yml diff --git a/.github/workflows/add-to-project-v2.yml b/.github/workflows/add-to-project-v2.yml new file mode 100644 index 0000000000..5d569202b2 --- /dev/null +++ b/.github/workflows/add-to-project-v2.yml @@ -0,0 +1,15 @@ +name: Add to project +on: + issues: + types: [opened] + pull_request_target: + types: [opened] +jobs: + add-to-project: + runs-on: ubuntu-latest + name: Add issues and PRs to project + steps: + - uses: actions/add-to-project@main + with: + project-url: https://github.com/orgs/honeycombio/projects/11 + github-token: ${{ secrets.GHPROJECTS_TOKEN }} From fe64ed788ab7c0897e708569d04f954df5c7d999 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 12 Oct 2022 16:58:35 -0400 Subject: [PATCH 243/351] Upgrade Husky to v0.17.0 (#538) ## Which problem is this PR solving? - Bump to latest husky for decoration of span events and links ## Short description of the changes - Bump husky, run go mod tidy. --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 98a7889864..5ed279d21d 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.16.1 + github.com/honeycombio/husky v0.17.0 github.com/honeycombio/libhoney-go v1.16.0 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 @@ -27,7 +27,7 @@ require ( github.com/tidwall/gjson v1.14.3 github.com/vmihailenco/msgpack/v4 v4.3.11 golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect - google.golang.org/grpc v1.49.0 + google.golang.org/grpc v1.50.0 google.golang.org/protobuf v1.28.1 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 99058175c0..89f115abd3 100644 --- a/go.sum +++ b/go.sum @@ -184,8 +184,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.16.1 h1:kChIUn9Bi1J6PbGQcjHbZH/lihcNrpbso7fpDNobxqA= -github.com/honeycombio/husky v0.16.1/go.mod h1:LHuBxW0qybEhRFqyIR5p7yXlPEPzV8HipwPzMnEIHko= +github.com/honeycombio/husky v0.17.0 h1:Or5DuZ+jEoO7SYblGyxnBI7vRNhs2xq83jaLOBcHkIQ= +github.com/honeycombio/husky v0.17.0/go.mod h1:oR3+rfUql/AlYk/RY4ougzXSKvDGtmeIVGac0nK0qOs= github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/honeycombio/libhoney-go v1.16.0/go.mod h1:izP4fbREuZ3vqC4HlCAmPrcPT9gxyxejRjGtCYpmBn0= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -621,8 +621,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0 h1:fPVVDxY9w++VjTZsYvXWqEf9Rqar/e+9zYfxKK+W+YU= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From e71f192376b949a842826d5597539dd537f16c5b Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Wed, 12 Oct 2022 15:06:03 -0600 Subject: [PATCH 244/351] prepare release v1.18.0 (#539) ## Which problem is this PR solving? - prepare for 1.18.0 release ## Short description of the changes - updates changelog --- CHANGELOG.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8fa03a4dac..53ef94efd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,34 @@ # Refinery Changelog +## 1.18.0 2022-10-12 + +### Enhancements + +- Track span count and optionally add it to root (#532) | [@kentquirk](https://github.com/kentquirk) +- Add support for metrics api key env var (#535) | [@TylerHelmuth](https://github.com/TylerHelmuth) + +### Fixes + +- RedisIdentifier now operates properly in more circumstances (#521) | [@Baliedge](https://github.com/Baliedge) +- Properly set metadata to values that will work. (#523) | [@kentquirk](https://github.com/kentquirk) + +### Maintenance + +- maint: add new project workflow (#537) | [@vreynolds](https://github.com/vreynolds) +- Bump go version to 1.19 (#534) | [@TylerHelmuth](https://github.com/TylerHelmuth) +- Bump github.com/klauspost/compress from 1.15.9 to 1.15.11 (#531) +- Bump github.com/honeycombio/husky from 0.15.0 to 0.16.1 (#529) +- Bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0 (#528) +- Bump github.com/spf13/viper from 1.12.0 to 1.13.0 (#527) +- Bump Husky to v0.17.0 (#538) | [@kentquirk](https://github.com/kentquirk) + +### New Contributors + +- @Baliedge made their first contribution in https://github.com/honeycombio/refinery/pull/521 +- @TylerHelmuth made their first contribution in https://github.com/honeycombio/refinery/pull/534 + +**Full Changelog**: https://github.com/honeycombio/refinery/compare/v1.17.0...v1.18.0 + ## 1.17.0 2022-09-16 ### Enhancements From 7edd887683ab0cc532c22f4f88f6a7f4a274c3c0 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Mon, 17 Oct 2022 10:24:32 -0400 Subject: [PATCH 245/351] maint: add release file (#540) --- .github/release.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 .github/release.yml diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 0000000000..3d9ee33826 --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,23 @@ +# .github/release.yml + +changelog: + exclude: + labels: + - no-changelog + categories: + - title: 💥 Breaking Changes 💥 + labels: + - "version: bump major" + - breaking-change + - title: 💡 Enhancements + labels: + - "type: enhancement" + - title: 🐛 Fixes + labels: + - "type: bug" + - title: 🛠 Maintenance + labels: + - "type: maintenance" + - title: 🤷 Other Changes + labels: + - "*" \ No newline at end of file From ba1b257643674d764049ff61b35b3b0891e6fef5 Mon Sep 17 00:00:00 2001 From: Vera Reynolds Date: Tue, 25 Oct 2022 18:03:34 -0400 Subject: [PATCH 246/351] maint: delete workflows for old board (#543) --- .github/workflows/add-to-project.yml | 14 -------------- .github/workflows/re-triage.yml | 12 ------------ 2 files changed, 26 deletions(-) delete mode 100644 .github/workflows/add-to-project.yml delete mode 100644 .github/workflows/re-triage.yml diff --git a/.github/workflows/add-to-project.yml b/.github/workflows/add-to-project.yml deleted file mode 100644 index ac02faa17b..0000000000 --- a/.github/workflows/add-to-project.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: Apply project management flow -on: - issues: - types: [opened] - pull_request_target: - types: [opened] -jobs: - project-management: - runs-on: ubuntu-latest - name: Apply project management flow - steps: - - uses: honeycombio/oss-management-actions/projects@v1 - with: - ghprojects-token: ${{ secrets.GHPROJECTS_TOKEN }} diff --git a/.github/workflows/re-triage.yml b/.github/workflows/re-triage.yml deleted file mode 100644 index a98366047e..0000000000 --- a/.github/workflows/re-triage.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: Re-triage issues with new comments -on: - issue_comment: - types: [created] -jobs: - re-triage: - runs-on: ubuntu-latest - name: Re-triage issues with new comments - steps: - - uses: honeycombio/oss-management-actions/re-triage@v1 - with: - ghprojects-token: ${{ secrets.GHPROJECTS_TOKEN }} From 93f58ee7f61fa44960d2bfe5bfae365b5ca27f52 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 9 Nov 2022 09:27:42 -0500 Subject: [PATCH 247/351] New cache management strategy (#547) Implements a new cache management strategy that ejects "large" items from the cache rather than resizing the cache. ## Which problem is this PR solving? Currently, when Refinery is under memory pressure and exceeds the configured memory maximum, it attempts to resize the trace cache to 90% of its previous size, and ejects the oldest traces. But as the trace cache is sized by trace count, when a very large trace arrives, it can cause the cache to shrink repeatedly, discarding the smaller traces, which doesn't help much. The result is that the cache can be resized to a tiny fraction of its original size to very little benefit. Furthermore, it never recovers until configuration is manually reloaded. This PR implements a different strategy: * The memory size of individual spans is calculated when they are placed into the cache * Spans also track their arrival time * The "cacheImpact" of a span is a measure of how long the span has been in the cache, multiplied by the size of the span * Traces keep track of the total impact of all the spans in the trace When a memory overrun occurs, the system sorts the traces by cache impact and ejects (makes a sampling decision and drops or sends) those traces with the largest impact until memory usage falls below the maximum. The cache is not resized. This strategy leads to more stable memory usage and fewer overruns. ## Short description of the changes * The memory size of individual spans is calculated when they are placed into the cache * Spans also track their arrival time * The "cacheImpact" of a span is a measure of how long the span has been in the cache, multiplied by the size of the span * Traces keep track of the total impact of all the spans in the trace * There's some new telemetry * There's a config value to control switching between the two modes dynamically and it shows up in the sample config * There are tests for some of the algorithmic calculations as well as the strategy modes Note to reviewers -- this is on the large side, but I didn't see a great way to break it up. --- collect/cache/cache.go | 18 +++++ collect/cache/cache_test.go | 47 +++++++++--- collect/collect.go | 141 +++++++++++++++++++++++++++++++----- collect/collect_test.go | 107 ++++++++++++++++++++++++++- config/config.go | 2 + config/config_test.go | 6 +- config/file_config.go | 24 +++++- config/mock.go | 8 ++ config_complete.toml | 10 +++ rules_complete.toml | 16 ++++ types/event.go | 77 +++++++++++++++++++- types/event_test.go | 83 +++++++++++++++++++++ 12 files changed, 498 insertions(+), 41 deletions(-) create mode 100644 types/event_test.go diff --git a/collect/cache/cache.go b/collect/cache/cache.go index 69bf691b12..03b6a39ee2 100644 --- a/collect/cache/cache.go +++ b/collect/cache/cache.go @@ -128,6 +128,8 @@ func (d *DefaultInMemCache) GetAll() []*types.Trace { return tmp } +// TakeExpiredTraces should be called to decide which traces are past their expiration time; +// It removes and returns them. func (d *DefaultInMemCache) TakeExpiredTraces(now time.Time) []*types.Trace { d.Metrics.Gauge("collect_cache_capacity", float64(len(d.insertionOrder))) d.Metrics.Histogram("collect_cache_entries", float64(len(d.cache))) @@ -142,3 +144,19 @@ func (d *DefaultInMemCache) TakeExpiredTraces(now time.Time) []*types.Trace { } return res } + +// RemoveTraces accepts a set of trace IDs and removes any matching ones from +// the insertion list. This is used in the case of a cache overrun. +func (d *DefaultInMemCache) RemoveTraces(toDelete map[string]struct{}) { + d.Metrics.Gauge("collect_cache_capacity", float64(len(d.insertionOrder))) + d.Metrics.Histogram("collect_cache_entries", float64(len(d.cache))) + + for i, t := range d.insertionOrder { + if t != nil { + if _, ok := toDelete[t.TraceID]; ok { + d.insertionOrder[i] = nil + delete(d.cache, t.TraceID) + } + } + } +} diff --git a/collect/cache/cache_test.go b/collect/cache/cache_test.go index d385aa0259..02d9574223 100644 --- a/collect/cache/cache_test.go +++ b/collect/cache/cache_test.go @@ -4,11 +4,10 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/honeycombio/refinery/logger" "github.com/honeycombio/refinery/metrics" "github.com/honeycombio/refinery/types" + "github.com/stretchr/testify/assert" ) // TestCacheSetGet sets a value then fetches it back @@ -33,9 +32,9 @@ func TestBufferOverrun(t *testing.T) { c := NewInMemCache(2, s, &logger.NullLogger{}) traces := []*types.Trace{ - &types.Trace{TraceID: "abc123"}, - &types.Trace{TraceID: "def456"}, - &types.Trace{TraceID: "ghi789"}, + {TraceID: "abc123"}, + {TraceID: "def456"}, + {TraceID: "ghi789"}, } c.Set(traces[0]) @@ -52,10 +51,10 @@ func TestTakeExpiredTraces(t *testing.T) { now := time.Now() traces := []*types.Trace{ - &types.Trace{TraceID: "1", SendBy: now.Add(-time.Minute), Sent: true}, - &types.Trace{TraceID: "2", SendBy: now.Add(-time.Minute)}, - &types.Trace{TraceID: "3", SendBy: now.Add(time.Minute)}, - &types.Trace{TraceID: "4"}, + {TraceID: "1", SendBy: now.Add(-time.Minute), Sent: true}, + {TraceID: "2", SendBy: now.Add(-time.Minute)}, + {TraceID: "3", SendBy: now.Add(time.Minute)}, + {TraceID: "4"}, } for _, t := range traces { c.Set(t) @@ -75,3 +74,33 @@ func TestTakeExpiredTraces(t *testing.T) { assert.Equal(t, traces[2], all[i]) } } + +func TestRemoveSentTraces(t *testing.T) { + s := &metrics.MockMetrics{} + s.Start() + c := NewInMemCache(10, s, &logger.NullLogger{}) + + now := time.Now() + traces := []*types.Trace{ + {TraceID: "1", SendBy: now.Add(-time.Minute), Sent: true}, + {TraceID: "2", SendBy: now.Add(-time.Minute)}, + {TraceID: "3", SendBy: now.Add(time.Minute)}, + {TraceID: "4"}, + } + for _, t := range traces { + c.Set(t) + } + + deletes := map[string]struct{}{ + "1": {}, + "3": {}, + "4": {}, + "5": {}, // not present + } + + c.RemoveTraces(deletes) + + all := c.GetAll() + assert.Equal(t, 1, len(all)) + assert.Equal(t, traces[1], all[0]) +} diff --git a/collect/collect.go b/collect/collect.go index 7863d1c252..24f136d81b 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -47,7 +47,15 @@ func GetCollectorImplementation(c config.Config) Collector { return collector } -// InMemCollector is a single threaded collector +// These are the names of the metrics we use to track our send decisions. +const ( + TraceSendGotRoot = "trace_send_got_root" + TraceSendExpired = "trace_send_expired" + TraceSendEjectedFull = "trace_send_ejected_full" + TraceSendEjectedMemsize = "trace_send_ejected_memsize" +) + +// InMemCollector is a single threaded collector. type InMemCollector struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` @@ -100,12 +108,18 @@ func (i *InMemCollector) Start() error { i.Metrics.Register("collector_tosend_queue", "histogram") i.Metrics.Register("collector_incoming_queue", "histogram") i.Metrics.Register("collector_peer_queue", "histogram") + i.Metrics.Register("collector_cache_size", "gauge") + i.Metrics.Register("memory_heap_allocation", "gauge") i.Metrics.Register("trace_sent_cache_hit", "counter") i.Metrics.Register("trace_accepted", "counter") i.Metrics.Register("trace_send_kept", "counter") i.Metrics.Register("trace_send_dropped", "counter") i.Metrics.Register("trace_send_has_root", "counter") i.Metrics.Register("trace_send_no_root", "counter") + i.Metrics.Register(TraceSendGotRoot, "counter") + i.Metrics.Register(TraceSendExpired, "counter") + i.Metrics.Register(TraceSendEjectedFull, "counter") + i.Metrics.Register(TraceSendEjectedMemsize, "counter") stc, err := lru.New(imcConfig.CacheCapacity * 5) // keep 5x ring buffer size if err != nil { @@ -155,7 +169,7 @@ func (i *InMemCollector) reloadConfigs() { // pull the old cache contents into the new cache for j, trace := range existingCache.GetAll() { if j >= imcConfig.CacheCapacity { - i.send(trace) + i.send(trace, TraceSendEjectedFull) continue } c.Set(trace) @@ -174,17 +188,21 @@ func (i *InMemCollector) reloadConfigs() { // TODO add resizing the LRU sent trace cache on config reload } -func (i *InMemCollector) checkAlloc() { +func (i *InMemCollector) oldCheckAlloc() { inMemConfig, err := i.Config.GetInMemCollectorCacheCapacity() var mem runtime.MemStats runtime.ReadMemStats(&mem) + i.Metrics.Gauge("memory_heap_allocation", int64(mem.Alloc)) if err != nil || inMemConfig.MaxAlloc == 0 || mem.Alloc < inMemConfig.MaxAlloc { return } existingCache, ok := i.cache.(*cache.DefaultInMemCache) - if !ok || existingCache.GetCacheSize() < 100 { + existingSize := existingCache.GetCacheSize() + i.Metrics.Gauge("collector_cache_size", existingSize) + + if !ok || existingSize < 100 { i.Logger.Error().WithField("alloc", mem.Alloc).Logf( "total allocation exceeds limit, but unable to shrink cache", ) @@ -194,14 +212,13 @@ func (i *InMemCollector) checkAlloc() { // Reduce cache size by a fixed 10%, successive overages will continue to shrink. // Base this on the total number of actual traces, which may be fewer than // the cache capacity. - oldCap := existingCache.GetCacheSize() oldTraces := existingCache.GetAll() newCap := int(float64(len(oldTraces)) * 0.9) // Treat any MaxAlloc overage as an error. The configured cache capacity // should be reduced to avoid this condition. i.Logger.Error(). - WithField("cache_size.previous", oldCap). + WithField("cache_size.previous", existingSize). WithField("cache_size.new", newCap). WithField("alloc", mem.Alloc). Logf("reducing cache size due to memory overage") @@ -215,7 +232,7 @@ func (i *InMemCollector) checkAlloc() { // Send the traces we can't keep, put the rest into the new cache. for _, trace := range oldTraces[:len(oldTraces)-newCap] { - i.send(trace) + i.send(trace, TraceSendEjectedMemsize) } for _, trace := range oldTraces[len(oldTraces)-newCap:] { c.Set(trace) @@ -228,6 +245,76 @@ func (i *InMemCollector) checkAlloc() { runtime.GC() } +func (i *InMemCollector) newCheckAlloc() { + inMemConfig, err := i.Config.GetInMemCollectorCacheCapacity() + + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + i.Metrics.Gauge("memory_heap_allocation", int64(mem.Alloc)) + if err != nil || inMemConfig.MaxAlloc == 0 || mem.Alloc < inMemConfig.MaxAlloc { + return + } + + // Figure out what fraction of the total cache we should remove. We'd like it to be + // enough to get us below the max capacity, but not TOO much below. + // Because our impact numbers are only the data size, reducing by enough to reach + // max alloc will actually do more than that. + totalToRemove := mem.Alloc - inMemConfig.MaxAlloc + + // The size of the cache exceeds the user's intended allocation, so we're going to + // remove the traces from the cache that have had the most impact on allocation. + // To do this, we sort the traces by their CacheImpact value and then remove traces + // until the total size is less than the amount to which we want to shrink. + existingCache, ok := i.cache.(*cache.DefaultInMemCache) + if !ok { + i.Logger.Error().WithField("alloc", mem.Alloc).Logf( + "total allocation exceeds limit, but unable to control cache", + ) + return + } + allTraces := existingCache.GetAll() + timeout, err := i.Config.GetTraceTimeout() + if err != nil { + timeout = 60 * time.Second + } // Sort traces by CacheImpact, heaviest first + sort.Slice(allTraces, func(i, j int) bool { + return allTraces[i].CacheImpact(timeout) > allTraces[j].CacheImpact(timeout) + }) + + // Now start removing the biggest traces, by summing up DataSize for + // successive traces until we've crossed the totalToRemove threshold + // or just run out of traces to delete. + + cap := existingCache.GetCacheSize() + i.Metrics.Gauge("collector_cache_size", cap) + + totalDataSizeSent := 0 + tracesSent := make(map[string]struct{}) + // Send the traces we can't keep. + for _, trace := range allTraces { + tracesSent[trace.TraceID] = struct{}{} + totalDataSizeSent += trace.DataSize + i.send(trace, TraceSendEjectedMemsize) + if totalDataSizeSent > int(totalToRemove) { + break + } + } + existingCache.RemoveTraces(tracesSent) + + // Treat any MaxAlloc overage as an error so we know it's happening + i.Logger.Error(). + WithField("cache_size", cap). + WithField("alloc", mem.Alloc). + WithField("num_traces_sent", len(tracesSent)). + WithField("datasize_sent", totalDataSizeSent). + WithField("new_trace_count", existingCache.GetCacheSize()). + Logf("evicting large traces early due to memory overage") + + // Manually GC here - without this we can easily end up evicting more than we + // need to, since total alloc won't be updated until after a GC pass. + runtime.GC() +} + // AddSpan accepts the incoming span to a queue and returns immediately func (i *InMemCollector) AddSpan(sp *types.Span) error { return i.add(sp, i.incoming) @@ -287,7 +374,14 @@ func (i *InMemCollector) collect() { select { case <-ticker.C: i.sendTracesInCache(time.Now()) - i.checkAlloc() + switch i.Config.GetCacheOverrunStrategy() { + case "impact": + i.newCheckAlloc() + case "resize": + i.oldCheckAlloc() + default: + i.oldCheckAlloc() + } // Briefly unlock the cache, to allow test access. i.mutex.Unlock() @@ -317,7 +411,11 @@ func (i *InMemCollector) collect() { func (i *InMemCollector) sendTracesInCache(now time.Time) { traces := i.cache.TakeExpiredTraces(now) for _, t := range traces { - i.send(t) + if t.RootSpan != nil { + i.send(t, TraceSendGotRoot) + } else { + i.send(t, TraceSendExpired) + } } } @@ -347,19 +445,20 @@ func (i *InMemCollector) processSpan(sp *types.Span) { timeout = 60 * time.Second } + now := time.Now() trace = &types.Trace{ - APIHost: sp.APIHost, - APIKey: sp.APIKey, - Dataset: sp.Dataset, - TraceID: sp.TraceID, - StartTime: time.Now(), - SendBy: time.Now().Add(timeout), - SampleRate: sp.SampleRate, // if it had a sample rate, we want to keep it + APIHost: sp.APIHost, + APIKey: sp.APIKey, + Dataset: sp.Dataset, + TraceID: sp.TraceID, + ArrivalTime: now, + SendBy: now.Add(timeout), + SampleRate: sp.SampleRate, // if it had a sample rate, we want to keep it } // push this into the cache and if we eject an unsent trace, send it ASAP ejectedTrace := i.cache.Set(trace) if ejectedTrace != nil { - i.send(ejectedTrace) + i.send(ejectedTrace, TraceSendEjectedFull) } } // if the trace we got back from the cache has already been sent, deal with the @@ -445,7 +544,7 @@ func isRootSpan(sp *types.Span) bool { return false } -func (i *InMemCollector) send(trace *types.Trace) { +func (i *InMemCollector) send(trace *types.Trace, reason string) { if trace.Sent { // someone else already sent this so we shouldn't also send it. This happens // when two timers race and two signals for the same trace are sent down the @@ -458,7 +557,7 @@ func (i *InMemCollector) send(trace *types.Trace) { } trace.Sent = true - traceDur := time.Since(trace.StartTime) + traceDur := time.Since(trace.ArrivalTime) i.Metrics.Histogram("trace_duration_ms", float64(traceDur.Milliseconds())) i.Metrics.Histogram("trace_span_count", float64(trace.SpanCount())) if trace.RootSpan != nil { @@ -467,6 +566,8 @@ func (i *InMemCollector) send(trace *types.Trace) { i.Metrics.Increment("trace_send_no_root") } + i.Metrics.Increment(reason) + var sampler sample.Sampler var found bool @@ -554,7 +655,7 @@ func (i *InMemCollector) Stop() error { traces := i.cache.GetAll() for _, trace := range traces { if trace != nil { - i.send(trace) + i.send(trace, TraceSendEjectedFull) } } } diff --git a/collect/collect_test.go b/collect/collect_test.go index ea4a2d82f3..64a5a0a9a2 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -2,8 +2,10 @@ package collect import ( "fmt" + "math/rand" "runtime" "strconv" + "strings" "testing" "time" @@ -562,7 +564,7 @@ func TestSampleConfigReload(t *testing.T) { }, conf.GetTraceTimeoutVal*2, conf.SendTickerVal) } -func TestMaxAlloc(t *testing.T) { +func TestOldMaxAlloc(t *testing.T) { transmission := &transmit.MockTransmission{} transmission.Start() conf := &config.MockConfig{ @@ -638,7 +640,7 @@ func TestMaxAlloc(t *testing.T) { time.Sleep(conf.SendTickerVal) } - assert.Equal(t, 450, len(traces), "should have shrunk cache to 90% of previous size") + assert.Equal(t, 450, len(traces), "should have shrunk cache to 90%% of previous size") for i, trace := range traces { assert.False(t, trace.Sent) assert.Equal(t, strconv.Itoa(i+50), trace.TraceID) @@ -647,7 +649,7 @@ func TestMaxAlloc(t *testing.T) { // We discarded the first 50 spans, and sent them. transmission.Mux.Lock() - assert.Equal(t, 50, len(transmission.Events), "should have sent 10% of traces") + assert.Equal(t, 50, len(transmission.Events), "should have sent 10%% of traces") for i, ev := range transmission.Events { assert.Equal(t, i, ev.Data["id"]) } @@ -655,6 +657,105 @@ func TestMaxAlloc(t *testing.T) { transmission.Mux.Unlock() } +func TestStableMaxAlloc(t *testing.T) { + transmission := &transmit.MockTransmission{} + transmission.Start() + conf := &config.MockConfig{ + GetSendDelayVal: 0, + GetTraceTimeoutVal: 10 * time.Minute, + GetSamplerTypeVal: &config.DeterministicSamplerConfig{SampleRate: 1}, + SendTickerVal: 2 * time.Millisecond, + CacheOverrunStrategy: "impact", + } + coll := &InMemCollector{ + Config: conf, + Logger: &logger.NullLogger{}, + Transmission: transmission, + Metrics: &metrics.NullMetrics{}, + SamplerFactory: &sample.SamplerFactory{ + Config: conf, + Logger: &logger.NullLogger{}, + }, + } + spandata := make([]map[string]interface{}, 500) + for i := 0; i < 500; i++ { + spandata[i] = map[string]interface{}{ + "trace.parent_id": "unused", + "id": i, + "str1": strings.Repeat("abc", rand.Intn(100)+1), + "str2": strings.Repeat("def", rand.Intn(100)+1), + } + } + + c := cache.NewInMemCache(1000, &metrics.NullMetrics{}, &logger.NullLogger{}) + coll.cache = c + stc, err := lru.New(15) + assert.NoError(t, err, "lru cache should start") + coll.sentTraceCache = stc + + coll.incoming = make(chan *types.Span, 1000) + coll.fromPeer = make(chan *types.Span, 5) + coll.datasetSamplers = make(map[string]sample.Sampler) + go coll.collect() + defer coll.Stop() + + for i := 0; i < 500; i++ { + span := &types.Span{ + TraceID: strconv.Itoa(i), + Event: types.Event{ + Dataset: "aoeu", + Data: spandata[i], + APIKey: legacyAPIKey, + }, + } + coll.AddSpan(span) + } + + for len(coll.incoming) > 0 { + time.Sleep(conf.SendTickerVal) + } + + // Now there should be 500 traces in the cache. + coll.mutex.Lock() + assert.Equal(t, 500, len(coll.cache.GetAll())) + + // We want to induce an eviction event, so set MaxAlloc a bit below + // our current post-GC alloc. + runtime.GC() + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + // Set MaxAlloc, which should cause cache evictions. + conf.GetInMemoryCollectorCacheCapacityVal.MaxAlloc = mem.Alloc * 99 / 100 + + coll.mutex.Unlock() + + // wait for the cache to take some action + var traces []*types.Trace + for { + coll.mutex.Lock() + traces = coll.cache.GetAll() + if len(traces) < 500 { + break + } + coll.mutex.Unlock() + + time.Sleep(conf.SendTickerVal) + } + + assert.Equal(t, 1000, coll.cache.(*cache.DefaultInMemCache).GetCacheSize(), "cache size shouldn't change") + + tracesLeft := len(traces) + assert.Less(t, tracesLeft, 480, "should have sent some traces") + assert.Greater(t, tracesLeft, 100, "should have NOT sent some traces") + coll.mutex.Unlock() + + // We discarded the most costly spans, and sent them. + transmission.Mux.Lock() + assert.Equal(t, 500-len(traces), len(transmission.Events), "should have sent traces that weren't kept") + + transmission.Mux.Unlock() +} + func TestAddSpanNoBlock(t *testing.T) { transmission := &transmit.MockTransmission{} transmission.Start() diff --git a/config/config.go b/config/config.go index b053f27cf8..1aeb845521 100644 --- a/config/config.go +++ b/config/config.go @@ -168,4 +168,6 @@ type Config interface { GetAdditionalErrorFields() []string GetAddSpanCountToRoot() bool + + GetCacheOverrunStrategy() string } diff --git a/config/config_test.go b/config/config_test.go index 8048e2bcf9..6f057dc989 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -336,7 +336,7 @@ func TestReadRulesConfig(t *testing.T) { assert.NoError(t, err) switch r := d.(type) { case *RulesBasedSamplerConfig: - assert.Len(t, r.Rule, 5) + assert.Len(t, r.Rule, 6) var rule *RulesBasedSamplerRule @@ -350,11 +350,11 @@ func TestReadRulesConfig(t *testing.T) { assert.Equal(t, "keep slow 500 errors", rule.Name) assert.Len(t, rule.Condition, 2) - rule = r.Rule[3] + rule = r.Rule[4] assert.Equal(t, 5, rule.SampleRate) assert.Equal(t, "span", rule.Scope) - rule = r.Rule[4] + rule = r.Rule[5] assert.Equal(t, 10, rule.SampleRate) assert.Equal(t, "", rule.Scope) diff --git a/config/file_config.go b/config/file_config.go index 1bf2e2f90a..9b7c591227 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -56,6 +56,7 @@ type configContents struct { GRPCServerParameters GRPCServerParameters AdditionalErrorFields []string AddSpanCountToRoot bool + CacheOverrunStrategy string } type InMemoryCollectorCacheCapacity struct { @@ -157,6 +158,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("GRPCServerParameters.Timeout", 2*time.Second) c.SetDefault("AdditionalErrorFields", []string{"trace.span_id"}) c.SetDefault("AddSpanCountToRoot", false) + c.SetDefault("CacheOverrunStrategy", "resize") c.SetConfigFile(config) err := c.ReadInConfig() @@ -199,7 +201,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) return nil, err } - err = fc.validateConditionalConfigs() + err = fc.validateGeneralConfigs() if err != nil { return nil, err } @@ -226,7 +228,7 @@ func (f *fileConfig) onChange(in fsnotify.Event) { return } - err = f.validateConditionalConfigs() + err = f.validateGeneralConfigs() if err != nil { f.errorCallback(err) return @@ -263,7 +265,7 @@ func (f *fileConfig) unmarshal() error { return nil } -func (f *fileConfig) validateConditionalConfigs() error { +func (f *fileConfig) validateGeneralConfigs() error { // validate logger config loggerType, err := f.GetLoggerType() if err != nil { @@ -293,6 +295,15 @@ func (f *fileConfig) validateConditionalConfigs() error { return err } } + + // validate cache strategy + st := f.GetCacheOverrunStrategy() + switch st { + case "resize", "impact": + break + default: + return fmt.Errorf("invalid CacheOverrunStrategy: '%s'", st) + } return nil } @@ -888,3 +899,10 @@ func (f *fileConfig) GetAddSpanCountToRoot() bool { return f.conf.AddSpanCountToRoot } + +func (f *fileConfig) GetCacheOverrunStrategy() string { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.CacheOverrunStrategy +} diff --git a/config/mock.go b/config/mock.go index 0c9ef034d3..9ed5d8b5bd 100644 --- a/config/mock.go +++ b/config/mock.go @@ -84,6 +84,7 @@ type MockConfig struct { PeerTimeout time.Duration AdditionalErrorFields []string AddSpanCountToRoot bool + CacheOverrunStrategy string Mux sync.RWMutex } @@ -459,3 +460,10 @@ func (f *MockConfig) GetAddSpanCountToRoot() bool { return f.AddSpanCountToRoot } + +func (f *MockConfig) GetCacheOverrunStrategy() string { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.CacheOverrunStrategy +} diff --git a/config_complete.toml b/config_complete.toml index 8ce344dd5e..972883a4f5 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -143,6 +143,16 @@ AdditionalErrorFields = [ # Eligible for live reload. # AddSpanCountToRoot = true +# CacheOverrunStrategy controls the cache management behavior under memory pressure. +# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, +# which is generally not helpful unless it occurs because of a permanent change in traffic patterns. +# In the "impact" strategy, the items having the most impact on the cache size are +# ejected from the cache earlier than normal but the cache is not resized. +# In all cases, it only applies if MaxAlloc is nonzero. +# Default is "resize" for compatibility but "impact" is recommended for most installations. +# Eligible for live reload. +# CacheOverrunStrategy = "impact" + ############################ ## Implementation Choices ## ############################ diff --git a/rules_complete.toml b/rules_complete.toml index f984d9d938..d4da29585f 100644 --- a/rules_complete.toml +++ b/rules_complete.toml @@ -242,6 +242,22 @@ SampleRate = 1 AddSampleRateKeyToTrace = true AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + # Note that Refinery comparisons are type-dependent. If you are operating in an environment where different + # telemetry may send the same field with different types (for example, some systems send status codes as "200" + # instead of 200), you may need to create additional rules to cover these cases. + [[dataset4.rule]] + name = "dynamically sample 200 string responses" + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = "200" + [dataset4.rule.sampler.EMADynamicSampler] + Sampler = "EMADynamicSampler" + GoalSampleRate = 15 + FieldList = ["request.method", "request.route"] + AddSampleRateKeyToTrace = true + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + [[dataset4.rule]] name = "sample traces originating from a service" # if scope is set to "span", a single span in the trace must match diff --git a/types/event.go b/types/event.go index 84f6f66d8c..68eee8a97a 100644 --- a/types/event.go +++ b/types/event.go @@ -48,19 +48,50 @@ type Trace struct { SendBy time.Time - // StartTime is the server time when the first span arrived for this trace. + // ArrivalTime is the server time when the first span arrived for this trace. // Used to calculate how long traces spend sitting in Refinery - StartTime time.Time + ArrivalTime time.Time RootSpan *Span + // DataSize is the sum of the DataSize of spans that are added. + // It's used to help expire the most expensive traces. + DataSize int + // spans is the list of spans in this trace spans []*Span + + // totalImpact is the sum of the trace's cacheImpact; if this value is 0 + // it is recalculated during CacheImpact(), otherwise this value is + // returned. We reset it to 0 when adding spans so it gets recalculated. + // This is used to memoize the impact calculation so that it doesn't get + // calculated over and over during a sort. + totalImpact int } // AddSpan adds a span to this trace func (t *Trace) AddSpan(sp *Span) { + // We've done all the work to know this is a trace we are putting in our cache, so + // now is when we can calculate the size of it so that our cache size management + // code works properly. + sp.ArrivalTime = time.Now() + sp.DataSize = sp.GetDataSize() + t.DataSize += sp.DataSize t.spans = append(t.spans, sp) + t.totalImpact = 0 +} + +// CacheImpact calculates an abstract value for something we're calling cache impact, which is +// the sum of the CacheImpact of all of the spans in a trace. We use it to order traces +// so we can eject the ones that having the most impact on the cache size, but balancing that +// against preferring to keep newer spans. +func (t *Trace) CacheImpact(traceTimeout time.Duration) int { + if t.totalImpact == 0 { + for _, sp := range t.GetSpans() { + t.totalImpact += sp.CacheImpact(traceTimeout) + } + } + return t.totalImpact } // GetSpans returns the list of spans in this trace @@ -92,7 +123,47 @@ func (t *Trace) GetSamplerKey() (string, bool) { // Span is an event that shows up with a trace ID, so will be part of a Trace type Span struct { Event - TraceID string + TraceID string + DataSize int + ArrivalTime time.Time +} + +// GetDataSize computes the size of the Data element of the Span. +// Note that it's not the full size of the span, but we're mainly using this for +// relative ordering, not absolute calculations. +func (sp *Span) GetDataSize() int { + total := 0 + // the data types we should be getting from JSON are: + // float64, int64, bool, string + for _, v := range sp.Data { + switch v.(type) { + case bool: + total += 1 + case float64, int64, int: + total += 8 + case string, []byte: + total += len(v.(string)) + default: + total += 8 // catchall + } + } + return total +} + +// cacheImpactFactor controls how much more we weigh older spans compared to newer ones; +// setting this to 1 means they're not weighted by duration +const cacheImpactFactor = 4 + +// CacheImpact calculates an abstract value for something we're calling cache impact, which is +// the product of the size of the span and a factor related to the amount of time the span +// has been stored in the cache, based on the TraceTimeout value. +func (sp *Span) CacheImpact(traceTimeout time.Duration) int { + // multiplier will be a value from 1-cacheImpactFactor, depending on how long the + // span has been in the cache compared to the traceTimeout. It might go higher + // during the brief period between traceTimeout and the time when the span is sent. + multiplier := int(cacheImpactFactor*time.Since(sp.ArrivalTime)/traceTimeout) + 1 + // We can assume DataSize was set when the span was added. + return multiplier * sp.DataSize } func IsLegacyAPIKey(apiKey string) bool { diff --git a/types/event_test.go b/types/event_test.go new file mode 100644 index 0000000000..28dbe8b661 --- /dev/null +++ b/types/event_test.go @@ -0,0 +1,83 @@ +package types + +import ( + "strconv" + "strings" + "testing" +) + +func TestSpan_GetDataSize(t *testing.T) { + tests := []struct { + name string + numInts int + numStrings int + want int + }{ + {"all ints small", 10, 0, 80}, + {"all ints large", 100, 0, 800}, + {"all strings small", 0, 10, 45}, + {"all strings large", 0, 100, 4950}, + {"mixed small", 10, 10, 125}, + {"mixed large", 100, 100, 5750}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sp := &Span{ + TraceID: tt.name, + Event: Event{ + Data: make(map[string]any), + }, + } + for i := 0; i < tt.numInts; i++ { + sp.Data[tt.name+"int"+strconv.Itoa(i)] = i + } + for i := 0; i < tt.numStrings; i++ { + sp.Data[tt.name+"str"+strconv.Itoa(i)] = strings.Repeat("x", i) + } + if got := sp.GetDataSize(); got != tt.want { + t.Errorf("Span.CalculateSize() = %v, want %v", got, tt.want) + } + }) + } +} + +// These benchmarks were just to verify that the size calculation is acceptable +// even on big spans. The P99 for normal (20-field) spans shows that it will take ~1 +// microsecond (on an m1 laptop) but a 1000-field span (extremely rare!) will take +// ~10 microseconds. Since these happen once per span, when adding it to a trace, +// we don't expect this to be a performance issue. +func BenchmarkSpan_CalculateSizeSmall(b *testing.B) { + sp := &Span{ + Event: Event{ + Data: make(map[string]any), + }, + } + for i := 0; i < 10; i++ { + sp.Data["int"+strconv.Itoa(i)] = i + } + for i := 0; i < 10; i++ { + sp.Data["str"+strconv.Itoa(i)] = strings.Repeat("x", i) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + sp.GetDataSize() + } +} + +func BenchmarkSpan_CalculateSizeLarge(b *testing.B) { + sp := &Span{ + Event: Event{ + Data: make(map[string]any), + }, + } + for i := 0; i < 500; i++ { + sp.Data["int"+strconv.Itoa(i)] = i + } + for i := 0; i < 500; i++ { + sp.Data["str"+strconv.Itoa(i)] = strings.Repeat("x", i) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + sp.GetDataSize() + } +} From b79101ee2e736f5cda7a92720ad6ec9e7c806748 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 9 Nov 2022 09:28:08 -0500 Subject: [PATCH 248/351] Set content-type on marshalToFormat (#548) ## Which problem is this PR solving? The /query endpoints aren't setting Content-Type, which they should. ## Short description of the changes - Set content-type based on the type asked for. --- route/route.go | 1 + 1 file changed, 1 insertion(+) diff --git a/route/route.go b/route/route.go index 8727ac637b..8c47c7dbdb 100644 --- a/route/route.go +++ b/route/route.go @@ -332,6 +332,7 @@ func (r *Router) marshalToFormat(w http.ResponseWriter, obj interface{}, format w.WriteHeader(http.StatusBadRequest) return } + w.Header().Set("Content-Type", "application/"+format) w.Write(body) } From 6a7327c9ad5192ae70daa20c5c92413bd69cfb79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Nov 2022 14:28:54 +0000 Subject: [PATCH 249/351] Bump github.com/klauspost/compress from 1.15.11 to 1.15.12 (#549) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5ed279d21d..2f85fd14f2 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/honeycombio/libhoney-go v1.16.0 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.11 + github.com/klauspost/compress v1.15.12 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.13.0 diff --git a/go.sum b/go.sum index 89f115abd3..11e6a94dcf 100644 --- a/go.sum +++ b/go.sum @@ -204,8 +204,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= -github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= From 3cd77c914ebdc0d12840cc21b8177286e58f7721 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Nov 2022 14:36:50 +0000 Subject: [PATCH 250/351] Bump github.com/honeycombio/libhoney-go from 1.16.0 to 1.18.0 (#550) --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 2f85fd14f2..e511feb9f4 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/husky v0.17.0 - github.com/honeycombio/libhoney-go v1.16.0 + github.com/honeycombio/libhoney-go v1.18.0 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 diff --git a/go.sum b/go.sum index 11e6a94dcf..67723f8744 100644 --- a/go.sum +++ b/go.sum @@ -186,8 +186,8 @@ github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+D github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= github.com/honeycombio/husky v0.17.0 h1:Or5DuZ+jEoO7SYblGyxnBI7vRNhs2xq83jaLOBcHkIQ= github.com/honeycombio/husky v0.17.0/go.mod h1:oR3+rfUql/AlYk/RY4ougzXSKvDGtmeIVGac0nK0qOs= -github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= -github.com/honeycombio/libhoney-go v1.16.0/go.mod h1:izP4fbREuZ3vqC4HlCAmPrcPT9gxyxejRjGtCYpmBn0= +github.com/honeycombio/libhoney-go v1.18.0 h1:OYHOP381r3Ea76BhUYeza8PUTMDp8MByoOxDn3qtEq8= +github.com/honeycombio/libhoney-go v1.18.0/go.mod h1:KwbcXkqUbH20x3MpfSt/kdvlog3FFdEnouqYD3XKXLY= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= @@ -203,7 +203,7 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= From 2c544cf41ed28c4e669fd643a59c693a5c2e5a76 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Nov 2022 14:37:20 +0000 Subject: [PATCH 251/351] Bump github.com/stretchr/testify from 1.8.0 to 1.8.1 (#551) --- go.mod | 2 +- go.sum | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index e511feb9f4..358d010a81 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/sirupsen/logrus v1.9.0 github.com/spf13/viper v1.13.0 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.1 github.com/tidwall/gjson v1.14.3 github.com/vmihailenco/msgpack/v4 v4.3.11 golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect diff --git a/go.sum b/go.sum index 67723f8744..11c12fd24f 100644 --- a/go.sum +++ b/go.sum @@ -290,6 +290,7 @@ github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+z github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -297,8 +298,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= From c8fcf4fd45f43d1dafefdc2e0f0561d71131251f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Nov 2022 14:37:59 +0000 Subject: [PATCH 252/351] Bump github.com/fsnotify/fsnotify from 1.5.4 to 1.6.0 (#552) --- go.mod | 4 ++-- go.sum | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 358d010a81..a686269016 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d - github.com/fsnotify/fsnotify v1.5.4 + github.com/fsnotify/fsnotify v1.6.0 github.com/go-playground/validator v9.31.0+incompatible github.com/gomodule/redigo v1.8.9 github.com/gorilla/mux v1.8.0 @@ -66,7 +66,7 @@ require ( github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 // indirect + golang.org/x/sys v0.0.0-20220908164124-27713097b956 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220902135211-223410557253 // indirect diff --git a/go.sum b/go.sum index 11c12fd24f..85bda354ab 100644 --- a/go.sum +++ b/go.sum @@ -88,8 +88,8 @@ github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691/go.mod h1:sKL github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -473,10 +473,9 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956 h1:XeJjHH1KiLpKGb6lvMiksZ9l0fVUh+AmGcm0nOMEBOY= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 17e86d457fc5fbb45753558672c835e5b0532060 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Nov 2022 14:38:29 +0000 Subject: [PATCH 253/351] Bump google.golang.org/grpc from 1.50.0 to 1.50.1 (#553) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a686269016..7d21988296 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/tidwall/gjson v1.14.3 github.com/vmihailenco/msgpack/v4 v4.3.11 golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect - google.golang.org/grpc v1.50.0 + google.golang.org/grpc v1.50.1 google.golang.org/protobuf v1.28.1 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 85bda354ab..b9d33b4a72 100644 --- a/go.sum +++ b/go.sum @@ -622,8 +622,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.50.0 h1:fPVVDxY9w++VjTZsYvXWqEf9Rqar/e+9zYfxKK+W+YU= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 5ac09f01e33aba1369540ae99fb53937f3db4c53 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 9 Nov 2022 09:55:57 -0500 Subject: [PATCH 254/351] Add command to query config metadata (#556) ## Which problem is this PR solving? - Operators would like to know that their refinery is running current configurations, and to be able to verify that their configuration file in use is the one they believe it to be. For security reasons, we can't simply dump the configuration information, so instead we can calculate a hash and indicate the time when the configuration file was last loaded. This should be especially helpful in a cluster to make sure that different instances have the same configuration in use. - Closes #542 ## Changes made - Add a `/query/configmetadata` endpoint that has the same query header rule as the other query endpoints - Add a function to calculate the md5 of a file (verified to return the same results as the `md5sum` command) - Use that function to generate the sums of the config files currently in use - Record the time whenever config is loaded - Report all of this in JSON format when requested - Update README.md Will require docs update. Sample of the generated data: ```json [ { "type": "config", "id": "tools/loadtest/config.toml", "hash": "1047bb6140b487ecdb0745f3335b6bc3", "loaded_at": "2022-11-08T22:24:18-05:00" }, { "type": "rules", "id": "tools/loadtest/rules.toml", "hash": "2d88389e1ff6530fba53466973e591e0", "loaded_at": "2022-11-08T22:24:18-05:00" } ] ``` --- README.md | 5 +++++ config/config.go | 9 +++++++++ config/file_config.go | 42 ++++++++++++++++++++++++++++++++++++++++++ config/mock.go | 8 ++++++++ route/route.go | 6 ++++++ 5 files changed, 70 insertions(+) diff --git a/README.md b/README.md index e7c8f4b7d6..6aa25ff70a 100644 --- a/README.md +++ b/README.md @@ -147,6 +147,11 @@ The `/query` endpoints are protected and can be enabled by specifying `QueryAuth `curl --include --get $REFINERY_HOST/query/rules/$FORMAT/$DATASET --header "x-honeycomb-refinery-query: my-local-token"` will retrieve the rule set that refinery will use for the specified dataset. It comes back as a map of the sampler type to its rule set. +`curl --include --get $REFINERY_HOST/query/configmetadata --header "x-honeycomb-refinery-query: my-local-token"` will retrieve information about the configurations currently in use, including the timestamp when the configuration was last loaded. + +For file-based configurations (the only type currently supported), the `hash` value is identical to the value generated by the `md5sum` command for the given config file. + +For all of these commands: - `$REFINERY_HOST` should be the url of your refinery. - `$FORMAT` can be one of `json`, `yaml`, or `toml`. - `$DATASET` is the name of the dataset you want to check. diff --git a/config/config.go b/config/config.go index 1aeb845521..54ef588297 100644 --- a/config/config.go +++ b/config/config.go @@ -170,4 +170,13 @@ type Config interface { GetAddSpanCountToRoot() bool GetCacheOverrunStrategy() string + + GetConfigMetadata() []ConfigMetadata +} + +type ConfigMetadata struct { + Type string `json:"type"` + ID string `json:"id"` + Hash string `json:"hash"` + LoadedAt string `json:"loaded_at"` } diff --git a/config/file_config.go b/config/file_config.go index 9b7c591227..a00441cba4 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -1,9 +1,13 @@ package config import ( + "crypto/md5" + "encoding/hex" "errors" "fmt" + "io" "net" + "os" "strings" "sync" "time" @@ -22,6 +26,7 @@ type fileConfig struct { callbacks []func() errorCallback func(error) mux sync.RWMutex + lastLoadTime time.Time } type configContents struct { @@ -266,6 +271,8 @@ func (f *fileConfig) unmarshal() error { } func (f *fileConfig) validateGeneralConfigs() error { + f.lastLoadTime = time.Now() + // validate logger config loggerType, err := f.GetLoggerType() if err != nil { @@ -906,3 +913,38 @@ func (f *fileConfig) GetCacheOverrunStrategy() string { return f.conf.CacheOverrunStrategy } + +// calculates an MD5 sum for a file that returns the same result as the md5sum command +func calcMD5For(filename string) string { + f, err := os.Open(filename) + if err != nil { + return err.Error() + } + defer f.Close() + data, err := io.ReadAll(f) + if err != nil { + return err.Error() + } + h := md5.New() + if _, err := h.Write(data); err != nil { + return err.Error() + } + return hex.EncodeToString(h.Sum(nil)) +} + +func (f *fileConfig) GetConfigMetadata() []ConfigMetadata { + ret := make([]ConfigMetadata, 2) + ret[0] = ConfigMetadata{ + Type: "config", + ID: f.config.ConfigFileUsed(), + Hash: calcMD5For(f.config.ConfigFileUsed()), + LoadedAt: f.lastLoadTime.Format(time.RFC3339), + } + ret[1] = ConfigMetadata{ + Type: "rules", + ID: f.rules.ConfigFileUsed(), + Hash: calcMD5For(f.rules.ConfigFileUsed()), + LoadedAt: f.lastLoadTime.Format(time.RFC3339), + } + return ret +} diff --git a/config/mock.go b/config/mock.go index 9ed5d8b5bd..31c3371f93 100644 --- a/config/mock.go +++ b/config/mock.go @@ -85,6 +85,7 @@ type MockConfig struct { AdditionalErrorFields []string AddSpanCountToRoot bool CacheOverrunStrategy string + CfgMetadata []ConfigMetadata Mux sync.RWMutex } @@ -467,3 +468,10 @@ func (f *MockConfig) GetCacheOverrunStrategy() string { return f.CacheOverrunStrategy } + +func (f *MockConfig) GetConfigMetadata() []ConfigMetadata { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.CfgMetadata +} diff --git a/route/route.go b/route/route.go index 8c47c7dbdb..4eeb8e270a 100644 --- a/route/route.go +++ b/route/route.go @@ -164,6 +164,7 @@ func (r *Router) LnS(incomingOrPeer string) { queryMuxxer.HandleFunc("/trace/{traceID}", r.debugTrace).Name("get debug information for given trace ID") queryMuxxer.HandleFunc("/rules/{format}/{dataset}", r.getSamplerRules).Name("get formatted sampler rules for given dataset") queryMuxxer.HandleFunc("/allrules/{format}", r.getAllSamplerRules).Name("get formatted sampler rules for all datasets") + queryMuxxer.HandleFunc("/configmetadata", r.getConfigMetadata).Name("get configuration metadata") // require an auth header for events and batches authedMuxxer := muxxer.PathPrefix("/1/").Methods("POST").Subrouter() @@ -302,6 +303,11 @@ func (r *Router) getAllSamplerRules(w http.ResponseWriter, req *http.Request) { r.marshalToFormat(w, cfgs, format) } +func (r *Router) getConfigMetadata(w http.ResponseWriter, req *http.Request) { + cm := r.Config.GetConfigMetadata() + r.marshalToFormat(w, cm, "json") +} + func (r *Router) marshalToFormat(w http.ResponseWriter, obj interface{}, format string) { var body []byte var err error From 6bee58140851b4487a586f19d3a67f2a025e49a6 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 9 Nov 2022 15:33:09 -0500 Subject: [PATCH 255/351] Prep v1.19.0 (#557) Prep v1.19.0 --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 53ef94efd4..f19426d7bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Refinery Changelog +## 1.19.0 2022-11-09 + +Adds new query command to retrieve configuration metadata, and also allows for a new (optional) cache management strategy that should be more effective at preventing OOM crashes in situations where memory is under pressure. + +### Enhancements + +- Add command to query config metadata (#556) | [@kentquirk](https://github.com/kentquirk) +- New cache management strategy (#547) | [@kentquirk](https://github.com/kentquirk) + +### Fixes + +- Set content-type on marshalToFormat (#548) | [@kentquirk](https://github.com/kentquirk) + +### Maintenance + +- Bump google.golang.org/grpc from 1.50.0 to 1.50.1 (#553) +- Bump github.com/fsnotify/fsnotify from 1.5.4 to 1.6.0 (#552) +- Bump github.com/stretchr/testify from 1.8.0 to 1.8.1 (#551) +- Bump github.com/honeycombio/libhoney-go from 1.16.0 to 1.18.0 (#550) +- Bump github.com/klauspost/compress from 1.15.11 to 1.15.12 (#549) + ## 1.18.0 2022-10-12 ### Enhancements From 53d3646da413838c298e4199f61f3a7cacc6c94c Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Mon, 14 Nov 2022 09:39:31 -0500 Subject: [PATCH 256/351] Extract Sent Cache to an interface for future expansion (#561) ## Which problem is this PR solving? This is a prep PR for further work on the sent trace cache. For improved scalability, we want to be able to store trace decision records for a longer time. The best way to implement this in a backwards-compatible way is to pull the mechanisms for managing decision records into an interface, and then implement the interface with the legacy logic. That's what this does. There are no expected changes in behavior, and all tests still pass. ## Short description of the changes - Define interfaces for a TraceSentCache and a TraceSentRecord - Implement code for those that duplicates the existing legacy logic - Refactor the places the code is used to use the new interfaces - Tweak span count data type so that it's not an int64 any more - Rename SpanCount to DescendantCount because now that we have links and events, they're not all Spans anymore, and future PRs will add additional counting functions. I haven't renamed the corresponding Get and Add functions because that's pretty messy. --- collect/cache/legacySentCache.go | 73 +++++++++++++++++++++++++++++++ collect/cache/traceSentCache.go | 24 ++++++++++ collect/collect.go | 52 +++++++--------------- collect/collect_benchmark_test.go | 3 +- collect/collect_test.go | 21 +++++---- sample/dynamic_ema.go | 2 +- tools/loadtest/.gitignore | 4 ++ types/event.go | 8 ++-- 8 files changed, 133 insertions(+), 54 deletions(-) create mode 100644 collect/cache/legacySentCache.go create mode 100644 collect/cache/traceSentCache.go create mode 100644 tools/loadtest/.gitignore diff --git a/collect/cache/legacySentCache.go b/collect/cache/legacySentCache.go new file mode 100644 index 0000000000..b3a84834c0 --- /dev/null +++ b/collect/cache/legacySentCache.go @@ -0,0 +1,73 @@ +package cache + +import ( + lru "github.com/hashicorp/golang-lru" + "github.com/honeycombio/refinery/types" +) + +// legacySentRecord is Refinery's original traceSent cache. It keeps the same records +// for both kept and dropped traces and the size of the sent cache is set based on the size +// of the live trace cache. + +// legacySentRecord is an internal record we leave behind when sending a trace to remember +// our decision for the future, so any delinquent spans that show up later can +// be dropped or passed along. +type legacySentRecord struct { + keep bool // true if the trace was kept, false if it was dropped + rate uint // sample rate used when sending the trace + spanCount uint // number of spans in the trace (we decorate the root span with this) +} + +func (t *legacySentRecord) Kept() bool { + return t.keep +} + +func (t *legacySentRecord) Rate() uint { + return t.rate +} + +func (t *legacySentRecord) DescendantCount() uint { + return uint(t.spanCount) +} + +func (t *legacySentRecord) Count(*types.Span) { + t.spanCount++ +} + +// Make sure it implements TraceSentRecord +var _ TraceSentRecord = (*legacySentRecord)(nil) + +type legacySentCache struct { + sentTraceCache *lru.Cache +} + +// Make sure it implements TraceSentCache +var _ TraceSentCache = (*legacySentCache)(nil) + +func NewLegacySentCache(capacity int) (TraceSentCache, error) { + stc, err := lru.New(capacity) + if err != nil { + return nil, err + } + return &legacySentCache{sentTraceCache: stc}, nil +} + +func (c *legacySentCache) Record(trace *types.Trace, keep bool) { + // record this decision in the sent record LRU for future spans + sentRecord := legacySentRecord{ + keep: keep, + rate: trace.SampleRate, + spanCount: trace.DescendantCount(), + } + c.sentTraceCache.Add(trace.TraceID, &sentRecord) +} + +func (c *legacySentCache) Check(span *types.Span) (TraceSentRecord, bool) { + if sentRecord, found := c.sentTraceCache.Get(span.TraceID); found { + if sr, ok := sentRecord.(*legacySentRecord); ok { + sr.Count(span) + return sr, true + } + } + return nil, false +} diff --git a/collect/cache/traceSentCache.go b/collect/cache/traceSentCache.go new file mode 100644 index 0000000000..e55e3b0def --- /dev/null +++ b/collect/cache/traceSentCache.go @@ -0,0 +1,24 @@ +package cache + +import ( + "github.com/honeycombio/refinery/types" +) + +type TraceSentRecord interface { + // Kept returns whether the trace was kept (sampled and sent to honeycomb) or dropped. + Kept() bool + // Rate() returns the sample rate for the trace + Rate() uint + // DescendantCount returns the count of items associated with the trace, including all types of children like span links and span events. + DescendantCount() uint + // Count records additional spans in the totals + Count(*types.Span) +} + +type TraceSentCache interface { + // Record preserves the record of a trace being sent or not. + Record(trace *types.Trace, keep bool) + // Check tests if a trace corresponding to the span is in the cache; if found, it returns the appropriate TraceSentRecord and true, + // else nil and false. + Check(span *types.Span) (TraceSentRecord, bool) +} diff --git a/collect/collect.go b/collect/collect.go index 24f136d81b..cf664bf9b9 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -9,7 +9,6 @@ import ( "sync" "time" - lru "github.com/hashicorp/golang-lru" "github.com/honeycombio/refinery/collect/cache" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/logger" @@ -73,7 +72,7 @@ type InMemCollector struct { cache cache.Cache datasetSamplers map[string]sample.Sampler - sentTraceCache *lru.Cache + sentTraceCache cache.TraceSentCache incoming chan *types.Span fromPeer chan *types.Span @@ -82,15 +81,6 @@ type InMemCollector struct { hostname string } -// traceSentRecord is the bit we leave behind when sending a trace to remember -// our decision for the future, so any delinquent spans that show up later can -// be dropped or passed along. -type traceSentRecord struct { - keep bool // true if the trace was kept, false if it was dropped - rate uint // sample rate used when sending the trace - spanCount int64 // number of spans in the trace (we decorate the root span with this) -} - func (i *InMemCollector) Start() error { i.Logger.Debug().Logf("Starting InMemCollector") defer func() { i.Logger.Debug().Logf("Finished starting InMemCollector") }() @@ -121,11 +111,10 @@ func (i *InMemCollector) Start() error { i.Metrics.Register(TraceSendEjectedFull, "counter") i.Metrics.Register(TraceSendEjectedMemsize, "counter") - stc, err := lru.New(imcConfig.CacheCapacity * 5) // keep 5x ring buffer size + i.sentTraceCache, err = cache.NewLegacySentCache(imcConfig.CacheCapacity * 5) // (keep 5x ring buffer size) if err != nil { return err } - i.sentTraceCache = stc i.incoming = make(chan *types.Span, imcConfig.CacheCapacity*3) i.fromPeer = make(chan *types.Span, imcConfig.CacheCapacity*3) @@ -425,16 +414,13 @@ func (i *InMemCollector) processSpan(sp *types.Span) { trace := i.cache.Get(sp.TraceID) if trace == nil { // if the trace has already been sent, just pass along the span - if sentRecord, found := i.sentTraceCache.Get(sp.TraceID); found { - if sr, ok := sentRecord.(*traceSentRecord); ok { - i.Metrics.Increment("trace_sent_cache_hit") - // bump the count of records on this trace -- if the root span isn't - // the last late span, then it won't be perfect, but it will be better than - // having none at all - sentRecord.(*traceSentRecord).spanCount++ - i.dealWithSentTrace(sr.keep, sr.rate, sentRecord.(*traceSentRecord).spanCount, sp) - return - } + if sr, found := i.sentTraceCache.Check(sp); found { + i.Metrics.Increment("trace_sent_cache_hit") + // bump the count of records on this trace -- if the root span isn't + // the last late span, then it won't be perfect, but it will be better than + // having none at all + i.dealWithSentTrace(sr.Kept(), sr.Rate(), sr.DescendantCount(), sp) + return } // trace hasn't already been sent (or this span is really old); let's // create a new trace to hold it @@ -464,7 +450,7 @@ func (i *InMemCollector) processSpan(sp *types.Span) { // if the trace we got back from the cache has already been sent, deal with the // span. if trace.Sent { - i.dealWithSentTrace(trace.KeepSample, trace.SampleRate, trace.SpanCount(), sp) + i.dealWithSentTrace(trace.KeepSample, trace.SampleRate, trace.DescendantCount(), sp) } // great! trace is live. add the span. @@ -486,7 +472,7 @@ func (i *InMemCollector) processSpan(sp *types.Span) { // dealWithSentTrace handles a span that has arrived after the sampling decision // on the trace has already been made, and it obeys that decision by either // sending the span immediately or dropping it. -func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, spanCount int64, sp *types.Span) { +func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, spanCount uint, sp *types.Span) { if i.Config.GetIsDryRun() { field := i.Config.GetDryRunFieldName() // if dry run mode is enabled, we keep all traces and mark the spans with the sampling decision @@ -502,7 +488,7 @@ func (i *InMemCollector) dealWithSentTrace(keep bool, sampleRate uint, spanCount mergeTraceAndSpanSampleRates(sp, sampleRate) // if this span is a late root span, possibly update it with our current span count if i.Config.GetAddSpanCountToRoot() && isRootSpan(sp) { - sp.Data["meta.span_count"] = spanCount + sp.Data["meta.span_count"] = int64(spanCount) } i.Transmission.EnqueueSpan(sp) return @@ -559,7 +545,7 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { traceDur := time.Since(trace.ArrivalTime) i.Metrics.Histogram("trace_duration_ms", float64(traceDur.Milliseconds())) - i.Metrics.Histogram("trace_span_count", float64(trace.SpanCount())) + i.Metrics.Histogram("trace_span_count", float64(trace.DescendantCount())) if trace.RootSpan != nil { i.Metrics.Increment("trace_send_has_root") } else { @@ -584,7 +570,7 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { // If we have a root span, update it with the count before determining the SampleRate. if i.Config.GetAddSpanCountToRoot() && trace.RootSpan != nil { - trace.RootSpan.Data["meta.span_count"] = trace.SpanCount() + trace.RootSpan.Data["meta.span_count"] = int64(trace.DescendantCount()) } // use sampler key to find sampler; create and cache if not found @@ -599,13 +585,7 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { trace.KeepSample = shouldSend logFields["reason"] = reason - // record this decision in the sent record LRU for future spans - sentRecord := traceSentRecord{ - keep: shouldSend, - rate: rate, - spanCount: trace.SpanCount(), - } - i.sentTraceCache.Add(trace.TraceID, &sentRecord) + i.sentTraceCache.Record(trace, shouldSend) // if we're supposed to drop this trace, and dry run mode is not enabled, then we're done. if !shouldSend && !i.Config.GetIsDryRun() { @@ -628,7 +608,7 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { // update the root span (if we have one, which we might not if the trace timed out) // with the final total as of our send time if i.Config.GetAddSpanCountToRoot() && isRootSpan(sp) { - sp.Data["meta.span_count"] = sentRecord.spanCount + sp.Data["meta.span_count"] = int64(trace.DescendantCount()) } if i.Config.GetIsDryRun() { diff --git a/collect/collect_benchmark_test.go b/collect/collect_benchmark_test.go index fda9ea7014..6171dd583d 100644 --- a/collect/collect_benchmark_test.go +++ b/collect/collect_benchmark_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - lru "github.com/hashicorp/golang-lru" "github.com/stretchr/testify/assert" "github.com/honeycombio/refinery/collect/cache" @@ -35,7 +34,7 @@ func BenchmarkCollect(b *testing.B) { metric := &metrics.MockMetrics{} metric.Start() - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(b, err, "lru cache should start") coll := &InMemCollector{ diff --git a/collect/collect_test.go b/collect/collect_test.go index 64a5a0a9a2..fcfb85a476 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -10,7 +10,6 @@ import ( "time" "github.com/facebookgo/inject" - lru "github.com/hashicorp/golang-lru" "github.com/stretchr/testify/assert" "github.com/honeycombio/refinery/collect/cache" @@ -48,7 +47,7 @@ func TestAddRootSpan(t *testing.T) { c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc @@ -126,7 +125,7 @@ func TestOriginalSampleRateIsNotedInMetaField(t *testing.T) { c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc @@ -184,7 +183,7 @@ func TestTransmittedSpansShouldHaveASampleRateOfAtLeastOne(t *testing.T) { c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc @@ -245,7 +244,7 @@ func TestAddSpan(t *testing.T) { } c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc @@ -318,7 +317,7 @@ func TestDryRunMode(t *testing.T) { } c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc @@ -585,7 +584,7 @@ func TestOldMaxAlloc(t *testing.T) { } c := cache.NewInMemCache(1000, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc @@ -689,7 +688,7 @@ func TestStableMaxAlloc(t *testing.T) { c := cache.NewInMemCache(1000, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc @@ -777,7 +776,7 @@ func TestAddSpanNoBlock(t *testing.T) { } c := cache.NewInMemCache(10, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc @@ -849,7 +848,7 @@ func TestAddSpanCount(t *testing.T) { } c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc @@ -918,7 +917,7 @@ func TestLateRootGetsSpanCount(t *testing.T) { } c := cache.NewInMemCache(3, &metrics.NullMetrics{}, &logger.NullLogger{}) coll.cache = c - stc, err := lru.New(15) + stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") coll.sentTraceCache = stc diff --git a/sample/dynamic_ema.go b/sample/dynamic_ema.go index 3483e1a94b..34a88b4820 100644 --- a/sample/dynamic_ema.go +++ b/sample/dynamic_ema.go @@ -54,7 +54,7 @@ func (d *EMADynamicSampler) Start() error { } d.dynsampler.Start() - // Register stastics this package will produce + // Register statistics this package will produce d.Metrics.Register("dynsampler_num_dropped", "counter") d.Metrics.Register("dynsampler_num_kept", "counter") d.Metrics.Register("dynsampler_sample_rate", "histogram") diff --git a/tools/loadtest/.gitignore b/tools/loadtest/.gitignore new file mode 100644 index 0000000000..af7a074c2b --- /dev/null +++ b/tools/loadtest/.gitignore @@ -0,0 +1,4 @@ +.direnv +.tool-versions +__* +.DS_Store \ No newline at end of file diff --git a/types/event.go b/types/event.go index 68eee8a97a..9e0b4384be 100644 --- a/types/event.go +++ b/types/event.go @@ -94,14 +94,14 @@ func (t *Trace) CacheImpact(traceTimeout time.Duration) int { return t.totalImpact } -// GetSpans returns the list of spans in this trace +// GetSpans returns the list of descendants in this trace func (t *Trace) GetSpans() []*Span { return t.spans } -// SpanCount gets the number of spans currently in this trace as int64 -func (t *Trace) SpanCount() int64 { - return int64(len(t.spans)) +// DescendantCount gets the number of descendants of all kinds currently in this trace +func (t *Trace) DescendantCount() uint { + return uint(len(t.spans)) } func (t *Trace) GetSamplerKey() (string, bool) { From bbb7cd81e1d1014a391e6500d2b9e41e0ce08905 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Tue, 22 Nov 2022 17:28:12 -0500 Subject: [PATCH 257/351] Add cuckoo-based drop cache (#567) ## Which problem is this PR solving? Before this change, Refinery used a circular LRU cache to retain a record for every trace; this cache is hardcoded to 5x the configured cache size, and does not change when the cache is resized. This is a relatively small number, and for long-lived traces, it might mean that late spans look like new traces, and therefore might get a different sampling decision -- which would result in missing spans in Honeycomb. #561 abstracted the SampleCache interface to prepare for other implementations. This uses it to provide a new cache design. ## Short description of the changes This design implements a "cuckoo filter" cache for dropped traces, which can store the dropped trace information much more efficiently (about 4 bytes per trace as compared to about 200 bytes for kept traces). - Adds a CuckooTraceChecker type that implements a 2-stage cuckoo filter for tracking recently-used trace IDs over time. - Implements the SampleCache interface with a CuckooSentCache, which uses the existing LRU for kept traces, and a CuckooTraceChecker for dropped traces. - Implements a new configuration block for caches to allow users to opt into the cuckoo cache and control it for their needs, but is still backwards compatible. - Adds documentation to the config_complete file. - Adds additional metrics for tracking the cuckoo cache size --- collect/cache/cuckoo.go | 97 +++++++++++++++ collect/cache/cuckooSentCache.go | 189 ++++++++++++++++++++++++++++++ collect/cache/legacySentCache.go | 11 ++ collect/cache/traceSentCache.go | 5 + collect/collect.go | 33 ++++-- collect/collect_benchmark_test.go | 12 +- collect/collect_test.go | 20 ++-- config/config.go | 2 + config/config_test.go | 55 +++++++++ config/file_config.go | 19 +++ config/mock.go | 8 ++ config_complete.toml | 51 +++++++- go.mod | 2 + go.sum | 4 + 14 files changed, 483 insertions(+), 25 deletions(-) create mode 100644 collect/cache/cuckoo.go create mode 100644 collect/cache/cuckooSentCache.go diff --git a/collect/cache/cuckoo.go b/collect/cache/cuckoo.go new file mode 100644 index 0000000000..4735c7a66b --- /dev/null +++ b/collect/cache/cuckoo.go @@ -0,0 +1,97 @@ +package cache + +import ( + "sync" + + "github.com/honeycombio/refinery/metrics" + cuckoo "github.com/panmari/cuckoofilter" +) + +// These are the names of metrics tracked for the cuckoo filter +const ( + CurrentLoadFactor = "cuckoo_current_load_factor" + FutureLoadFactor = "cuckoo_future_load_factor" + CurrentCapacity = "cuckoo_current_capacity" +) + +// This wraps a cuckoo filter implementation in a way that lets us keep it running forever +// without filling up. +// A cuckoo filter can't be emptied (you can delete individual items if you know what they are, +// but you can't get their names from the filter). Consequently, what we do is keep *two* filters, +// current and future. The current one is the one we use to check against, and when we add, we +// add to both. But the future one is started *after* the current one, so that when the current +// gets too full, we can discard it, replace it with future, and then start a new, empty future. +// This is why the future filter is nil until the current filter reaches .5. +// You must call Maintain() periodically, most likely from a goroutine. The call is cheap, +// and the timing isn't very critical. The effect of going above "capacity" is an increased +// false positive rate, but the filter continues to function. +type CuckooTraceChecker struct { + current *cuckoo.Filter + future *cuckoo.Filter + mut sync.RWMutex + capacity uint + met metrics.Metrics +} + +func NewCuckooTraceChecker(capacity uint, m metrics.Metrics) *CuckooTraceChecker { + return &CuckooTraceChecker{ + capacity: capacity, + current: cuckoo.NewFilter(capacity), + future: nil, + met: m, + } +} + +// Add puts a traceID into the filter. +func (c *CuckooTraceChecker) Add(traceID string) { + c.mut.Lock() + defer c.mut.Unlock() + c.current.Insert([]byte(traceID)) + // don't add anything to future if it doesn't exist yet + if c.future != nil { + c.future.Insert([]byte(traceID)) + } +} + +// Check tests if a traceID is (very probably) in the filter. +func (c *CuckooTraceChecker) Check(traceID string) bool { + b := []byte(traceID) + c.mut.RLock() + defer c.mut.RUnlock() + return c.current.Lookup(b) +} + +// Maintain should be called periodically; if the current filter is full, it replaces +// it with the future filter and creates a new future filter. +func (c *CuckooTraceChecker) Maintain() { + c.mut.RLock() + currentLoadFactor := c.current.LoadFactor() + c.met.Gauge(CurrentLoadFactor, currentLoadFactor) + if c.future != nil { + c.met.Gauge(FutureLoadFactor, c.future.LoadFactor()) + } + c.met.Gauge(CurrentCapacity, c.capacity) + c.mut.RUnlock() + + // once the current one is half loaded, we can start using the future one too + if c.future == nil && currentLoadFactor > 0.5 { + c.mut.Lock() + c.future = cuckoo.NewFilter(c.capacity) + c.mut.Unlock() + } + + // if the current one is full, cycle the filters + if currentLoadFactor > 0.99 { + c.mut.Lock() + defer c.mut.Unlock() + c.current = c.future + c.future = cuckoo.NewFilter(c.capacity) + } +} + +// SetNextCapacity adjusts the capacity that will be set for the future filter on the next replacement. +func (c *CuckooTraceChecker) SetNextCapacity(capacity uint) { + c.mut.Lock() + defer c.mut.Unlock() + c.capacity = capacity +} diff --git a/collect/cache/cuckooSentCache.go b/collect/cache/cuckooSentCache.go new file mode 100644 index 0000000000..820553b094 --- /dev/null +++ b/collect/cache/cuckooSentCache.go @@ -0,0 +1,189 @@ +package cache + +import ( + "sync" + "time" + + lru "github.com/hashicorp/golang-lru" + "github.com/honeycombio/refinery/config" + "github.com/honeycombio/refinery/metrics" + "github.com/honeycombio/refinery/types" +) + +// cuckooSentCache extends Refinery's legacy cache. It keeps the same records +// for kept traces but adds a pair of cuckoo filters to record dropped traces. +// This allows many more traces to be kept in the cache; now only kept records +// are retained in the cache of sentRecords. +// The size of the sent cache is still set based on the size of the live trace cache, +// and the size of the dropped cache is an independent value. + +// cuckooKeptRecord is an internal record we leave behind when keeping a trace to remember +// our decision for the future. We only store them if the record was kept. +type cuckooKeptRecord struct { + rate uint // sample rate used when sending the trace + spanCount uint // number of spans in the trace (we decorate the root span with this) +} + +func (t *cuckooKeptRecord) Kept() bool { + return true +} + +func (t *cuckooKeptRecord) Rate() uint { + return t.rate +} + +func (t *cuckooKeptRecord) DescendantCount() uint { + return uint(t.spanCount) +} + +func (t *cuckooKeptRecord) Count(*types.Span) { + t.spanCount++ +} + +// Make sure it implements TraceSentRecord +var _ TraceSentRecord = (*cuckooKeptRecord)(nil) + +// cuckooSentRecord is what we return when the trace was dropped. +// It's always the same one. +type cuckooDroppedRecord struct{} + +func (t *cuckooDroppedRecord) Kept() bool { + return false +} + +func (t *cuckooDroppedRecord) Rate() uint { + return 0 +} + +func (t *cuckooDroppedRecord) DescendantCount() uint { + return 0 +} + +func (t *cuckooDroppedRecord) Count(*types.Span) { +} + +// Make sure it implements TraceSentRecord +var _ TraceSentRecord = (*cuckooDroppedRecord)(nil) + +type cuckooSentCache struct { + kept *lru.Cache + dropped *CuckooTraceChecker + cfg config.SampleCacheConfig + + // The done channel is used to decide when to terminate the monitor + // goroutine. When resizing the cache, we write to the channel, but + // when terminating the system, call Stop() to close the channel. + // Either one causes the goroutine to shut down, and in resizing + // we then start a new monitor. + done chan struct{} + + // This mutex is for managing kept traces + keptMut sync.Mutex +} + +// Make sure it implements TraceSentCache +var _ TraceSentCache = (*cuckooSentCache)(nil) + +func NewCuckooSentCache(cfg config.SampleCacheConfig, met metrics.Metrics) (TraceSentCache, error) { + stc, err := lru.New(int(cfg.KeptSize)) + if err != nil { + return nil, err + } + dropped := NewCuckooTraceChecker(cfg.DroppedSize, met) + + cache := &cuckooSentCache{ + kept: stc, + dropped: dropped, + cfg: cfg, + done: make(chan struct{}), + } + go cache.monitor() + return cache, nil +} + +// goroutine to monitor the cache and cycle the size check periodically +func (c *cuckooSentCache) monitor() { + ticker := time.NewTicker(c.cfg.SizeCheckInterval) + for { + select { + case <-ticker.C: + c.dropped.Maintain() + case <-c.done: + return + } + } +} + +// Stop halts the monitor goroutine +func (c *cuckooSentCache) Stop() { + close(c.done) +} + +func (c *cuckooSentCache) Record(trace *types.Trace, keep bool) { + if keep { + // record this decision in the sent record LRU for future spans + sentRecord := cuckooKeptRecord{ + rate: trace.SampleRate, + spanCount: trace.DescendantCount(), + } + c.keptMut.Lock() + defer c.keptMut.Unlock() + c.kept.Add(trace.TraceID, &sentRecord) + return + } + // if we're not keeping it, save it in the dropped trace filter + c.dropped.Add(trace.TraceID) +} + +func (c *cuckooSentCache) Check(span *types.Span) (TraceSentRecord, bool) { + // was it dropped? + if c.dropped.Check(span.TraceID) { + // we recognize it as dropped, so just say so; there's nothing else to do + return &cuckooDroppedRecord{}, false + } + // was it kept? + c.keptMut.Lock() + defer c.keptMut.Unlock() + if sentRecord, found := c.kept.Get(span.TraceID); found { + if sr, ok := sentRecord.(*cuckooKeptRecord); ok { + // if we kept it, then this span being checked needs counting too + sr.Count(span) + return sr, true + } + } + // we have no memory of this place + return nil, false +} + +func (c *cuckooSentCache) Resize(cfg config.SampleCacheConfig) error { + stc, err := lru.New(int(cfg.KeptSize)) + if err != nil { + return err + } + + // grab all the items in the current cache; if it's larger than + // what will fit in the new one, discard the oldest ones + // (we don't have to do anything with the ones we discard, this is + // the trace decisions cache). + c.keptMut.Lock() + defer c.keptMut.Unlock() + keys := c.kept.Keys() + if len(keys) > int(cfg.KeptSize) { + keys = keys[len(keys)-int(cfg.KeptSize):] + } + // copy all the keys to the new cache in order + for _, k := range keys { + if v, found := c.kept.Get(k); found { + stc.Add(k, v) + } + } + c.kept = stc + + // also set up the drop cache size to change eventually + c.dropped.SetNextCapacity(cfg.DroppedSize) + + // shut down the old monitor and create a new one + c.done <- struct{}{} + go c.monitor() + return nil +} diff --git a/collect/cache/legacySentCache.go b/collect/cache/legacySentCache.go index b3a84834c0..b9e1f95c0d 100644 --- a/collect/cache/legacySentCache.go +++ b/collect/cache/legacySentCache.go @@ -2,6 +2,7 @@ package cache import ( lru "github.com/hashicorp/golang-lru" + "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/types" ) @@ -71,3 +72,13 @@ func (c *legacySentCache) Check(span *types.Span) (TraceSentRecord, bool) { } return nil, false } + +// legacy Stop does nothing +// Stop halts the monitor goroutine +func (c *legacySentCache) Stop() { +} + +// legacy Resize does nothing +func (c *legacySentCache) Resize(cfg config.SampleCacheConfig) error { + return nil +} diff --git a/collect/cache/traceSentCache.go b/collect/cache/traceSentCache.go index e55e3b0def..b9e2d8f213 100644 --- a/collect/cache/traceSentCache.go +++ b/collect/cache/traceSentCache.go @@ -1,6 +1,7 @@ package cache import ( + "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/types" ) @@ -21,4 +22,8 @@ type TraceSentCache interface { // Check tests if a trace corresponding to the span is in the cache; if found, it returns the appropriate TraceSentRecord and true, // else nil and false. Check(span *types.Span) (TraceSentRecord, bool) + // Stop halts the cache in preparation for shutdown + Stop() + // Resize adjusts the size of the cache according to the Config passed in + Resize(cfg config.SampleCacheConfig) error } diff --git a/collect/collect.go b/collect/collect.go index cf664bf9b9..62c9487946 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -72,7 +72,7 @@ type InMemCollector struct { cache cache.Cache datasetSamplers map[string]sample.Sampler - sentTraceCache cache.TraceSentCache + sampleTraceCache cache.TraceSentCache incoming chan *types.Span fromPeer chan *types.Span @@ -111,9 +111,23 @@ func (i *InMemCollector) Start() error { i.Metrics.Register(TraceSendEjectedFull, "counter") i.Metrics.Register(TraceSendEjectedMemsize, "counter") - i.sentTraceCache, err = cache.NewLegacySentCache(imcConfig.CacheCapacity * 5) // (keep 5x ring buffer size) - if err != nil { - return err + sampleCacheConfig := i.Config.GetSampleCacheConfig() + switch sampleCacheConfig.Type { + case "legacy", "": + i.sampleTraceCache, err = cache.NewLegacySentCache(imcConfig.CacheCapacity * 5) // (keep 5x ring buffer size) + if err != nil { + return err + } + case "cuckoo": + i.Metrics.Register(cache.CurrentCapacity, "gauge") + i.Metrics.Register(cache.FutureLoadFactor, "gauge") + i.Metrics.Register(cache.CurrentLoadFactor, "gauge") + i.sampleTraceCache, err = cache.NewCuckooSentCache(sampleCacheConfig, i.Metrics) + if err != nil { + return err + } + default: + return fmt.Errorf("validation failure - sampleTraceCache had invalid config type '%s'", sampleCacheConfig.Type) } i.incoming = make(chan *types.Span, imcConfig.CacheCapacity*3) @@ -165,8 +179,10 @@ func (i *InMemCollector) reloadConfigs() { } i.cache = c } else { - i.Logger.Debug().Logf("skipping reloading the cache on config reload because it hasn't changed capacity") + i.Logger.Debug().Logf("skipping reloading the in-memory cache on config reload because it hasn't changed capacity") } + + i.sampleTraceCache.Resize(i.Config.GetSampleCacheConfig()) } else { i.Logger.Error().WithField("cache", i.cache.(*cache.DefaultInMemCache)).Logf("skipping reloading the cache on config reload because it's not an in-memory cache") } @@ -414,7 +430,7 @@ func (i *InMemCollector) processSpan(sp *types.Span) { trace := i.cache.Get(sp.TraceID) if trace == nil { // if the trace has already been sent, just pass along the span - if sr, found := i.sentTraceCache.Check(sp); found { + if sr, found := i.sampleTraceCache.Check(sp); found { i.Metrics.Increment("trace_sent_cache_hit") // bump the count of records on this trace -- if the root span isn't // the last late span, then it won't be perfect, but it will be better than @@ -585,7 +601,7 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { trace.KeepSample = shouldSend logFields["reason"] = reason - i.sentTraceCache.Record(trace, shouldSend) + i.sampleTraceCache.Record(trace, shouldSend) // if we're supposed to drop this trace, and dry run mode is not enabled, then we're done. if !shouldSend && !i.Config.GetIsDryRun() { @@ -642,6 +658,9 @@ func (i *InMemCollector) Stop() error { if i.Transmission != nil { i.Transmission.Flush() } + + i.sampleTraceCache.Stop() + return nil } diff --git a/collect/collect_benchmark_test.go b/collect/collect_benchmark_test.go index 6171dd583d..2fccb5601b 100644 --- a/collect/collect_benchmark_test.go +++ b/collect/collect_benchmark_test.go @@ -46,12 +46,12 @@ func BenchmarkCollect(b *testing.B) { Config: conf, Logger: log, }, - BlockOnAddSpan: true, - cache: cache.NewInMemCache(3, metric, log), - incoming: make(chan *types.Span, 500), - fromPeer: make(chan *types.Span, 500), - datasetSamplers: make(map[string]sample.Sampler), - sentTraceCache: stc, + BlockOnAddSpan: true, + cache: cache.NewInMemCache(3, metric, log), + incoming: make(chan *types.Span, 500), + fromPeer: make(chan *types.Span, 500), + datasetSamplers: make(map[string]sample.Sampler), + sampleTraceCache: stc, } go coll.collect() diff --git a/collect/collect_test.go b/collect/collect_test.go index fcfb85a476..dc0f21ce6f 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -49,7 +49,7 @@ func TestAddRootSpan(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 5) coll.fromPeer = make(chan *types.Span, 5) @@ -127,7 +127,7 @@ func TestOriginalSampleRateIsNotedInMetaField(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 5) coll.fromPeer = make(chan *types.Span, 5) @@ -185,7 +185,7 @@ func TestTransmittedSpansShouldHaveASampleRateOfAtLeastOne(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 5) coll.fromPeer = make(chan *types.Span, 5) @@ -246,7 +246,7 @@ func TestAddSpan(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 5) coll.fromPeer = make(chan *types.Span, 5) @@ -319,7 +319,7 @@ func TestDryRunMode(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 5) coll.fromPeer = make(chan *types.Span, 5) @@ -586,7 +586,7 @@ func TestOldMaxAlloc(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 1000) coll.fromPeer = make(chan *types.Span, 5) @@ -690,7 +690,7 @@ func TestStableMaxAlloc(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 1000) coll.fromPeer = make(chan *types.Span, 5) @@ -778,7 +778,7 @@ func TestAddSpanNoBlock(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 3) coll.fromPeer = make(chan *types.Span, 3) @@ -850,7 +850,7 @@ func TestAddSpanCount(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 5) coll.fromPeer = make(chan *types.Span, 5) @@ -919,7 +919,7 @@ func TestLateRootGetsSpanCount(t *testing.T) { coll.cache = c stc, err := cache.NewLegacySentCache(15) assert.NoError(t, err, "lru cache should start") - coll.sentTraceCache = stc + coll.sampleTraceCache = stc coll.incoming = make(chan *types.Span, 5) coll.fromPeer = make(chan *types.Span, 5) diff --git a/config/config.go b/config/config.go index 54ef588297..f250314976 100644 --- a/config/config.go +++ b/config/config.go @@ -172,6 +172,8 @@ type Config interface { GetCacheOverrunStrategy() string GetConfigMetadata() []ConfigMetadata + + GetSampleCacheConfig() SampleCacheConfig } type ConfigMetadata struct { diff --git a/config/config_test.go b/config/config_test.go index 6f057dc989..3f19d4f915 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -814,3 +814,58 @@ func TestHoneycombAdditionalErrorDefaults(t *testing.T) { assert.Equal(t, []string{"trace.span_id"}, c.GetAdditionalErrorFields()) } + +func TestSampleCacheParameters(t *testing.T) { + config, rules := createTempConfigs(t, ` + [InMemCollector] + CacheCapacity=1000 + + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + + `, "") + defer os.Remove(rules) + defer os.Remove(config) + + c, err := NewConfig(config, rules, func(err error) {}) + assert.NoError(t, err) + + s := c.GetSampleCacheConfig() + assert.Equal(t, "legacy", s.Type) + assert.Equal(t, uint(10_000), s.KeptSize) + assert.Equal(t, uint(1_000_000), s.DroppedSize) + assert.Equal(t, 10*time.Second, s.SizeCheckInterval) +} + +func TestSampleCacheParametersCuckoo(t *testing.T) { + config, rules := createTempConfigs(t, ` + [InMemCollector] + CacheCapacity=1000 + + [HoneycombMetrics] + MetricsHoneycombAPI="http://honeycomb.io" + MetricsAPIKey="1234" + MetricsDataset="testDatasetName" + MetricsReportingInterval=3 + + [SampleCache] + Type="cuckoo" + KeptSize=100_000 + DroppedSize=10_000_000 + SizeCheckInterval="60s" + `, "") + defer os.Remove(rules) + defer os.Remove(config) + + c, err := NewConfig(config, rules, func(err error) {}) + assert.NoError(t, err) + + s := c.GetSampleCacheConfig() + assert.Equal(t, "cuckoo", s.Type) + assert.Equal(t, uint(100_000), s.KeptSize) + assert.Equal(t, uint(10_000_000), s.DroppedSize) + assert.Equal(t, 1*time.Minute, s.SizeCheckInterval) +} diff --git a/config/file_config.go b/config/file_config.go index a00441cba4..13a3d9e8ca 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -62,6 +62,7 @@ type configContents struct { AdditionalErrorFields []string AddSpanCountToRoot bool CacheOverrunStrategy string + SampleCache SampleCacheConfig `validate:"required"` } type InMemoryCollectorCacheCapacity struct { @@ -106,6 +107,13 @@ type PeerManagementConfig struct { Timeout time.Duration } +type SampleCacheConfig struct { + Type string `validate:"required,oneof= legacy cuckoo"` + KeptSize uint `validate:"gte=500"` + DroppedSize uint `validate:"gte=100_000"` + SizeCheckInterval time.Duration `validate:"gte=1_000_000_000"` // 1 second minimum +} + // GRPCServerParameters allow you to configure the GRPC ServerParameters used // by refinery's own GRPC server: // https://pkg.go.dev/google.golang.org/grpc/keepalive#ServerParameters @@ -164,6 +172,10 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("AdditionalErrorFields", []string{"trace.span_id"}) c.SetDefault("AddSpanCountToRoot", false) c.SetDefault("CacheOverrunStrategy", "resize") + c.SetDefault("SampleCache.Type", "legacy") + c.SetDefault("SampleCache.KeptSize", 10_000) + c.SetDefault("SampleCache.DroppedSize", 1_000_000) + c.SetDefault("SampleCache.SizeCheckInterval", 10*time.Second) c.SetConfigFile(config) err := c.ReadInConfig() @@ -914,6 +926,13 @@ func (f *fileConfig) GetCacheOverrunStrategy() string { return f.conf.CacheOverrunStrategy } +func (f *fileConfig) GetSampleCacheConfig() SampleCacheConfig { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.SampleCache +} + // calculates an MD5 sum for a file that returns the same result as the md5sum command func calcMD5For(filename string) string { f, err := os.Open(filename) diff --git a/config/mock.go b/config/mock.go index 31c3371f93..9b47219de1 100644 --- a/config/mock.go +++ b/config/mock.go @@ -85,6 +85,7 @@ type MockConfig struct { AdditionalErrorFields []string AddSpanCountToRoot bool CacheOverrunStrategy string + SampleCache SampleCacheConfig CfgMetadata []ConfigMetadata Mux sync.RWMutex @@ -469,6 +470,13 @@ func (f *MockConfig) GetCacheOverrunStrategy() string { return f.CacheOverrunStrategy } +func (f *MockConfig) GetSampleCacheConfig() SampleCacheConfig { + f.Mux.RLock() + defer f.Mux.RUnlock() + + return f.SampleCache +} + func (f *MockConfig) GetConfigMetadata() []ConfigMetadata { f.Mux.RLock() defer f.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index 972883a4f5..793288edf1 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -356,9 +356,9 @@ MetricsDataset = "Refinery Metrics" MetricsReportingInterval = 3 -#####################@## +####################### ## Prometheus Metrics ## -#####################@## +####################### [PrometheusMetrics] @@ -416,3 +416,50 @@ MetricsReportingInterval = 3 # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 # Not eligible for live reload. # Timeout = "2s" + + + +################################ +## Sample Cache Configuration ## +################################ + +# Sample Cache Configuration controls the sample cache used to retain information about trace +# status after the sampling decision has been made. + +[SampleCacheConfig] + +# Type controls the type of sample cache used. +# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is +# 5x the size of the trace cache. This is Refinery's original sample cache strategy. +# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember +# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. +# It is also more configurable. The cuckoo filter is recommended for most installations. +# Default is "legacy". +# Not eligible for live reload (you cannot change the type of cache with reload). +# Type = "cuckoo" + +# KeptSize controls the number of traces preserved in the cuckoo kept traces cache. +# Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some +# statistical information. This is most useful in cases where the trace was sent before sending +# the root span, so that the root span can be decorated with accurate metadata. +# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). +# Does not apply to the "legacy" type of cache. +# Eligible for live reload. +# KeptSize = 10_000 + +# DroppedSize controls the size of the cuckoo dropped traces cache. +# This cache consumes 4-6 bytes per trace at a scale of millions of traces. +# Changing its size with live reload sets a future limit, but does not have an immediate effect. +# Default is 1_000_000 traces. +# Does not apply to the "legacy" type of cache. +# Eligible for live reload. +# DroppedSize = 1_000_000 + +# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates +# the remaining capacity of its dropped traces cache and possibly cycles it. +# This cache is quite resilient so it doesn't need to happen very often, but the +# operation is also inexpensive. +# Default is 10 seconds. +# Does not apply to the "legacy" type of cache. +# Eligible for live reload. +# SizeCheckInterval = "10s" diff --git a/go.mod b/go.mod index 7d21988296..a051989d3d 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,7 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 // indirect github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 // indirect @@ -51,6 +52,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/panmari/cuckoofilter v1.0.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect diff --git a/go.sum b/go.sum index b9d33b4a72..d6c0adf5f5 100644 --- a/go.sum +++ b/go.sum @@ -63,6 +63,8 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 h1:BS21ZUJ/B5X2UVUbczfmdWH7GapPWAhxcMsDnjJTU1E= +github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -232,6 +234,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= +github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= From 43edc1e55f064d128de5693f647a8b1ff9fb63c8 Mon Sep 17 00:00:00 2001 From: Purvi Kanal Date: Mon, 28 Nov 2022 13:46:30 -0500 Subject: [PATCH 258/351] ci: validate PR title (#571) --- .github/workflows/validate-pr-title.yml | 31 +++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/validate-pr-title.yml diff --git a/.github/workflows/validate-pr-title.yml b/.github/workflows/validate-pr-title.yml new file mode 100644 index 0000000000..65fc7bcb74 --- /dev/null +++ b/.github/workflows/validate-pr-title.yml @@ -0,0 +1,31 @@ +name: "Validate PR Title" + +on: + pull_request: + types: + - opened + - edited + - synchronize + +jobs: + main: + name: Validate PR title + runs-on: ubuntu-latest + steps: + - uses: amannn/action-semantic-pull-request@v5 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + # Have to specify all types because `maint` and `rel` aren't defaults + types: | + maint + rel + fix + feat + chore + ci + docs + style + refactor + perf + test From dc283f3bbf99a345142a07e8f40e34b695c5a431 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Mon, 28 Nov 2022 17:04:17 -0500 Subject: [PATCH 259/351] On shutdown, remove ourself from the peers list (#569) ## Which problem is this PR solving? - In a redis-managed pool of peers, the peer group is updated only after a timeout. This adds explicit unregistration so that in the case of a deliberately terminated instance, redis is notified immediately that the peer has dropped. - Doesn't change the behavior of refreshing the peer list, which is still a pull-based timeout. - Fixes #393 ## Short description of the changes - Add an Unregister method to redimem - Add a done channel to the goroutines that keep redis updated - Unregister when the done channel is closed - Propagate the done channel through the app initialization - Close the done channel on shutdown - Update tests - Add a new test to make sure that unregistration happens - Also adds a new test case to rules; I wrote it to verify we didn't have a bug and thought it should probably stay in. --- app/app_test.go | 4 +- cmd/refinery/main.go | 6 +- config/config_test_reload_error_test.go | 2 - internal/peer/peers.go | 5 +- internal/peer/peers_test.go | 35 +++++++++- internal/peer/redis.go | 89 +++++++++++++++---------- internal/redimem/redimem.go | 31 +++++++-- sample/rules_test.go | 53 +++++++++++++++ sharder/deterministic_test.go | 8 ++- 9 files changed, 181 insertions(+), 52 deletions(-) diff --git a/app/app_test.go b/app/app_test.go index 1eb44d035b..12da58ea51 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -1,5 +1,3 @@ -//go:build all || race - package app import ( @@ -123,7 +121,7 @@ func newStartedApp( var err error if peers == nil { - peers, err = peer.NewPeers(context.Background(), c) + peers, err = peer.NewPeers(context.Background(), c, make(chan struct{})) assert.NoError(t, err) } diff --git a/cmd/refinery/main.go b/cmd/refinery/main.go index 0690d96509..a619468d13 100644 --- a/cmd/refinery/main.go +++ b/cmd/refinery/main.go @@ -106,7 +106,8 @@ func main() { ctx, cancel := context.WithTimeout(context.Background(), c.GetPeerTimeout()) defer cancel() - peers, err := peer.NewPeers(ctx, c) + done := make(chan struct{}) + peers, err := peer.NewPeers(ctx, c, done) if err != nil { fmt.Printf("unable to load peers: %+v\n", err) @@ -226,5 +227,8 @@ func main() { // block on our signal handler to exit sig := <-sigsToExit + // unregister ourselves before we go + close(done) + time.Sleep(100 * time.Millisecond) a.Logger.Error().Logf("Caught signal \"%s\"", sig) } diff --git a/config/config_test_reload_error_test.go b/config/config_test_reload_error_test.go index 307166b27a..ca7e00c64a 100644 --- a/config/config_test_reload_error_test.go +++ b/config/config_test_reload_error_test.go @@ -1,5 +1,3 @@ -//go:build all || !race - package config import ( diff --git a/internal/peer/peers.go b/internal/peer/peers.go index c17000ef1d..ed84ab7764 100644 --- a/internal/peer/peers.go +++ b/internal/peer/peers.go @@ -3,6 +3,7 @@ package peer import ( "context" "errors" + "github.com/honeycombio/refinery/config" ) @@ -13,7 +14,7 @@ type Peers interface { RegisterUpdatedPeersCallback(callback func()) } -func NewPeers(ctx context.Context, c config.Config) (Peers, error) { +func NewPeers(ctx context.Context, c config.Config, done chan struct{}) (Peers, error) { t, err := c.GetPeerManagementType() if err != nil { @@ -24,7 +25,7 @@ func NewPeers(ctx context.Context, c config.Config) (Peers, error) { case "file": return newFilePeers(c), nil case "redis": - return newRedisPeers(ctx, c) + return newRedisPeers(ctx, c, done) default: return nil, errors.New("invalid config option 'PeerManagement.Type'") } diff --git a/internal/peer/peers_test.go b/internal/peer/peers_test.go index 5ec7f8137a..accaad1097 100644 --- a/internal/peer/peers_test.go +++ b/internal/peer/peers_test.go @@ -2,6 +2,7 @@ package peer import ( "context" + "strings" "testing" "time" @@ -16,7 +17,9 @@ func TestNewPeers(t *testing.T) { PeerTimeout: 5 * time.Second, } - p, err := NewPeers(context.Background(), c) + done := make(chan struct{}) + defer close(done) + p, err := NewPeers(context.Background(), c, done) assert.NoError(t, err) require.NotNil(t, p) @@ -32,7 +35,7 @@ func TestNewPeers(t *testing.T) { PeerTimeout: 5 * time.Second, } - p, err = NewPeers(context.Background(), c) + p, err = NewPeers(context.Background(), c, done) assert.NoError(t, err) require.NotNil(t, p) @@ -42,3 +45,31 @@ func TestNewPeers(t *testing.T) { t.Errorf("received %T expected %T", i, &redisPeers{}) } } + +func TestPeerShutdown(t *testing.T) { + c := &config.MockConfig{ + GetPeerListenAddrVal: "0.0.0.0:8081", + PeerManagementType: "redis", + PeerTimeout: 5 * time.Second, + } + + done := make(chan struct{}) + p, err := NewPeers(context.Background(), c, done) + assert.NoError(t, err) + require.NotNil(t, p) + + peer, ok := p.(*redisPeers) + assert.True(t, ok) + + peers, err := peer.GetPeers() + assert.NoError(t, err) + assert.Equal(t, 1, len(peers)) + assert.True(t, strings.HasPrefix(peers[0], "http")) + assert.True(t, strings.HasSuffix(peers[0], "8081")) + + close(done) + time.Sleep(100 * time.Millisecond) + peers, err = peer.GetPeers() + assert.NoError(t, err) + assert.Equal(t, 0, len(peers)) +} diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 4d7be37715..405dc7f95a 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -45,7 +45,7 @@ type redisPeers struct { } // NewRedisPeers returns a peers collection backed by redis -func newRedisPeers(ctx context.Context, c config.Config) (Peers, error) { +func newRedisPeers(ctx context.Context, c config.Config, done chan struct{}) (Peers, error) { redisHost, _ := c.GetRedisHost() if redisHost == "" { @@ -108,7 +108,7 @@ func newRedisPeers(ctx context.Context, c config.Config) (Peers, error) { } // go establish a regular registration heartbeat to ensure I stay alive in redis - go peers.registerSelf() + go peers.registerSelf(done) // get our peer list once to seed ourselves peers.updatePeerListOnce() @@ -116,7 +116,7 @@ func newRedisPeers(ctx context.Context, c config.Config) (Peers, error) { // go watch the list of peers and trigger callbacks whenever it changes. // populate my local list of peers so each request can hit memory and only hit // redis on a ticker - go peers.watchPeers() + go peers.watchPeers(done) return peers, nil } @@ -135,15 +135,24 @@ func (p *redisPeers) RegisterUpdatedPeersCallback(cb func()) { // registerSelf inserts self into the peer list and updates self's entry on a // regular basis so it doesn't time out and get removed from the list of peers. -// If this function stops, this host will get ejected from other's peer lists. -func (p *redisPeers) registerSelf() { +// When this function stops, it tries to remove the registered key. +func (p *redisPeers) registerSelf(done chan struct{}) { tk := time.NewTicker(refreshCacheInterval) - for range tk.C { - ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout()) - // every 5 seconds, insert a 30sec timeout record. we ignore the error - // here since Register() logs the error for us. - p.store.Register(ctx, p.publicAddr, peerEntryTimeout) - cancel() + for { + select { + case <-tk.C: + ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout()) + // every interval, insert a timeout record. we ignore the error + // here since Register() logs the error for us. + p.store.Register(ctx, p.publicAddr, peerEntryTimeout) + cancel() + case <-done: + // unregister ourselves + ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout()) + p.store.Unregister(ctx, p.publicAddr) + cancel() + return + } } } @@ -168,38 +177,46 @@ func (p *redisPeers) updatePeerListOnce() { p.peerLock.Unlock() } -func (p *redisPeers) watchPeers() { +func (p *redisPeers) watchPeers(done chan struct{}) { oldPeerList := p.peers sort.Strings(oldPeerList) tk := time.NewTicker(refreshCacheInterval) - for range tk.C { - ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout()) - currentPeers, err := p.store.GetMembers(ctx) - cancel() - - if err != nil { - logrus.WithError(err). - WithFields(logrus.Fields{ - "name": p.publicAddr, - "timeout": p.c.GetPeerTimeout().String(), - "oldPeers": oldPeerList, - }). - Error("get members failed during watch") - continue - } + for { + select { + case <-tk.C: + ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout()) + currentPeers, err := p.store.GetMembers(ctx) + cancel() + + if err != nil { + logrus.WithError(err). + WithFields(logrus.Fields{ + "name": p.publicAddr, + "timeout": p.c.GetPeerTimeout().String(), + "oldPeers": oldPeerList, + }). + Error("get members failed during watch") + continue + } - sort.Strings(currentPeers) - if !equal(oldPeerList, currentPeers) { - // update peer list and trigger callbacks saying the peer list has changed + sort.Strings(currentPeers) + if !equal(oldPeerList, currentPeers) { + // update peer list and trigger callbacks saying the peer list has changed + p.peerLock.Lock() + p.peers = currentPeers + oldPeerList = currentPeers + p.peerLock.Unlock() + for _, callback := range p.callbacks { + // don't block on any of the callbacks. + go callback() + } + } + case <-done: p.peerLock.Lock() - p.peers = currentPeers - oldPeerList = currentPeers + p.peers = []string{} p.peerLock.Unlock() - for _, callback := range p.callbacks { - // don't block on any of the callbacks. - go callback() - } + return } } } diff --git a/internal/redimem/redimem.go b/internal/redimem/redimem.go index 4def176a5a..ded96ed16d 100644 --- a/internal/redimem/redimem.go +++ b/internal/redimem/redimem.go @@ -20,6 +20,10 @@ type Membership interface { // in order to remain a member of the group. Register(ctx context.Context, memberName string, timeout time.Duration) error + // Unregister removes a name from the list immediately. It's intended to be + // used during shutdown so that there's no delay in the case of deliberate downsizing. + Unregister(ctx context.Context, memberName string) error + // GetMembers retrieves the list of all currently registered members. Members // that have registered but timed out will not be returned. GetMembers(ctx context.Context) ([]string, error) @@ -87,6 +91,27 @@ func (rm *RedisMembership) Register(ctx context.Context, memberName string, time return nil } +func (rm *RedisMembership) Unregister(ctx context.Context, memberName string) error { + err := rm.validateDefaults() + if err != nil { + return err + } + key := fmt.Sprintf("%s•%s•%s", globalPrefix, rm.Prefix, memberName) + conn, err := rm.Pool.GetContext(ctx) + if err != nil { + return err + } + defer conn.Close() + _, err = conn.Do("DEL", key) + if err != nil { + logrus.WithField("name", memberName). + WithField("err", err). + Error("unregistration failed") + return err + } + return nil +} + // GetMembers reaches out to Redis to retrieve a list of all members in the // cluster. It does this multiple times (how many is configured on // initializition) and takes the union of the results returned. @@ -189,10 +214,8 @@ func (rm *RedisMembership) scan(conn redis.Conn, pattern, count string, timeout break } - if keys != nil { - for _, key := range keys { - keyChan <- key - } + for _, key := range keys { + keyChan <- key } // redis will return 0 when we have iterated over the entire set diff --git a/sample/rules_test.go b/sample/rules_test.go index 61b1aaec6c..ea1dc166fa 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -538,6 +538,59 @@ func TestRules(t *testing.T) { ExpectedKeep: true, ExpectedRate: 1, }, + { + Rules: &config.RulesBasedSamplerConfig{ + Rule: []*config.RulesBasedSamplerRule{ + { + Name: "Check root span for span count", + Drop: true, + SampleRate: 0, + Condition: []*config.RulesBasedSamplerCondition{ + { + Field: "meta.span_count", + Operator: ">=", + Value: int(2), + }, + }, + }, + }, + }, + Spans: []*types.Span{ + { + Event: types.Event{ + Data: map[string]interface{}{ + "trace.trace_id": "12345", + "trace.span_id": "54321", + "meta.span_count": int64(2), + "test": int64(2), + }, + }, + }, + { + Event: types.Event{ + Data: map[string]interface{}{ + "trace.trace_id": "12345", + "trace.span_id": "654321", + "trace.parent_id": "54321", + "test": int64(2), + }, + }, + }, + { + Event: types.Event{ + Data: map[string]interface{}{ + "trace.trace_id": "12345", + "trace.span_id": "754321", + "trace.parent_id": "54321", + "test": int64(3), + }, + }, + }, + }, + ExpectedName: "Check root span for span count", + ExpectedKeep: false, + ExpectedRate: 0, + }, } for _, d := range data { diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go index 8d32287697..828252341b 100644 --- a/sharder/deterministic_test.go +++ b/sharder/deterministic_test.go @@ -26,7 +26,9 @@ func TestWhichShard(t *testing.T) { GetPeersVal: peers, PeerManagementType: "file", } - filePeers, err := peer.NewPeers(context.Background(), config) + done := make(chan struct{}) + defer close(done) + filePeers, err := peer.NewPeers(context.Background(), config, done) assert.Equal(t, nil, err) sharder := DeterministicSharder{ Config: config, @@ -67,7 +69,9 @@ func TestWhichShardAtEdge(t *testing.T) { GetPeersVal: peers, PeerManagementType: "file", } - filePeers, err := peer.NewPeers(context.Background(), config) + done := make(chan struct{}) + defer close(done) + filePeers, err := peer.NewPeers(context.Background(), config, done) assert.Equal(t, nil, err) sharder := DeterministicSharder{ Config: config, From 5e08742ea82e55567c2855467e0bd77c4b5b7c5c Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Mon, 28 Nov 2022 17:06:13 -0500 Subject: [PATCH 260/351] feat: Implement alternative sharding using rendezvous hash to improve dynamic scalability (#570) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Which problem is this PR solving? The legacy code in Refinery does a deterministic sharding calculation; for every span that comes in, we hash the traceID with SHA1, truncate to generate a 32-bit number, then divide the range by N if there are N shards. If there are 4 shards, the lowest values go to shard 0, the next block goes to shard 1, etc. The problem is that if the number of shards changes from 4 to 5, 1/5 of the shards that would have gone to 0 now go to 1. 2/5 of shard 1 now goes to shard 2; 3/5 of shard 2 (more than half!) goes to shard 3, and so forth. On average, half the shards move. The practical result of this is that when a span arrives just after a change in shard count, there’s a chance that it will end up on a different shard from other spans in the same trace that arrived earlier. The two shards might make different sampling decisions, and therefore we could end up with missing spans in Honeycomb. A technique called "[Rendezvous Hashing](https://en.wikipedia.org/wiki/Rendezvous_hashing)" uses a double hash — both the traceID and the shardID are hashed, and then we do a calculation to find which trace should go with which shard. There are several algorithms for doing this; all the algorithms seem to be roughly similar in terms of their ability to balance the load among shards. The randomness of traceIDs can lead to variation anyway, so it’s mainly a probabalistic effect. With one of these algorithms, adding or dropping a new shard usually affects approximately 1/N of the existing traces, where N is the number of shards. This is far better than half! Sometimes, random chance will be unlucky, and it might be closer to 2/N, but in general it's still quite a bit better than the legacy algorithm. Implementing the new algorithm reduces the probability of being on a different shard from 1/2 to 1/N, which is most meaningful for large installations where N might be 15, 40, or even something like 120. This seems worthwhile. Note that the point of these algorithms is to allow shard assignment decisions to be made independently by different shards yet generate the same results without requiring any synchronization between shards. It should be noted that while the legacy sharder is O(1) in its operation, the hash sharder is O(N) where N is the number of partitions (which is either 50 or the number of shards, whichever is greater). It still runs in under a microsecond even with 128 shards. ## Short description of the changes - Add configuration to choose legacy or hash sharding strategy (default to legacy) - Update example config to explain it - Add new sharding strategy - Add mechanism to select a sharding strategy based on the config - Add tests to show that it works - Add benchmarks to show that it's performant enough --- config/config.go | 4 + config/file_config.go | 9 + config/mock.go | 8 + config_complete.toml | 13 +- go.mod | 5 +- go.sum | 2 + sharder/deterministic.go | 133 +++++++++++++-- sharder/deterministic_test.go | 304 ++++++++++++++++++++++++++++++++++ 8 files changed, 464 insertions(+), 14 deletions(-) diff --git a/config/config.go b/config/config.go index f250314976..dec7c502fa 100644 --- a/config/config.go +++ b/config/config.go @@ -41,6 +41,10 @@ type Config interface { GetPeerManagementType() (string, error) + // GetPeerManagementStrategy returns the strategy specified for + // Peer management. + GetPeerManagementStrategy() (string, error) + // GetRedisHost returns the address of a Redis instance to use for peer // management. GetRedisHost() (string, error) diff --git a/config/file_config.go b/config/file_config.go index 13a3d9e8ca..8240e003b5 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -105,6 +105,7 @@ type PeerManagementConfig struct { UseIPV6Identifier bool RedisIdentifier string Timeout time.Duration + Strategy string `validate:"required,oneof= legacy hash"` } type SampleCacheConfig struct { @@ -146,6 +147,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerManagement.UseTLSInsecure", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) c.SetDefault("PeerManagement.Timeout", 5*time.Second) + c.SetDefault("PeerManagement.Strategy", "legacy") c.SetDefault("HoneycombAPI", "https://api.honeycomb.io") c.SetDefault("Logger", "logrus") c.SetDefault("LoggingLevel", "debug") @@ -461,6 +463,13 @@ func (f *fileConfig) GetPeerManagementType() (string, error) { return f.conf.PeerManagement.Type, nil } +func (f *fileConfig) GetPeerManagementStrategy() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.PeerManagement.Strategy, nil +} + func (f *fileConfig) GetPeers() ([]string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/mock.go b/config/mock.go index 9b47219de1..6d561f6b91 100644 --- a/config/mock.go +++ b/config/mock.go @@ -68,6 +68,7 @@ type MockConfig struct { UseIPV6Identifier bool RedisIdentifier string PeerManagementType string + PeerManagementStrategy string DebugServiceAddr string DryRun bool DryRunFieldName string @@ -351,6 +352,13 @@ func (m *MockConfig) GetPeerManagementType() (string, error) { return m.PeerManagementType, nil } +func (m *MockConfig) GetPeerManagementStrategy() (string, error) { + m.Mux.RLock() + defer m.Mux.RUnlock() + + return m.PeerManagementStrategy, nil +} + func (m *MockConfig) GetDebugServiceAddr() (string, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index 793288edf1..129848fb3c 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -252,6 +252,16 @@ Metrics = "honeycomb" # after 5s when communicating with Redis. # Timeout = "5s" +# Strategy controls the way that traces are assigned to refinery nodes. +# The "legacy" strategy uses a simple algorithm that unfortunately causes +# 1/2 of the in-flight traces to be assigned to a different node whenever the +# number of nodes changes. +# The legacy strategy is deprecated and is intended to be removed in a future release. +# The "hash" strategy is strongly recommended, as only 1/N traces (where N is the +# number of nodes) are disrupted when the node count changes. +# Not eligible for live reload. +# Strategy = "hash" + ######################### ## In-Memory Collector ## ######################### @@ -275,7 +285,8 @@ CacheCapacity = 1000 # supported. # If set to a non-zero value, once per tick (see SendTicker) the collector # will compare total allocated bytes to this value. If allocation is too -# high, cache capacity will be reduced and an error will be logged. +# high, cache capacity will be adjusted according to the setting for +# CacheOverrunStrategy. # Useful values for this setting are generally in the range of 75%-90% of # available system memory. MaxAlloc = 0 diff --git a/go.mod b/go.mod index a051989d3d..5330c25f8e 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.19 require ( github.com/davecgh/go-spew v1.1.1 + github.com/dgryski/go-wyhash v0.0.0-20191203203029-c4841ae36371 github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d github.com/fsnotify/fsnotify v1.6.0 @@ -17,6 +18,7 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 + github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.13.0 @@ -26,7 +28,6 @@ require ( github.com/stretchr/testify v1.8.1 github.com/tidwall/gjson v1.14.3 github.com/vmihailenco/msgpack/v4 v4.3.11 - golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect google.golang.org/grpc v1.50.1 google.golang.org/protobuf v1.28.1 gopkg.in/alexcesaro/statsd.v2 v2.0.0 @@ -52,7 +53,6 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/panmari/cuckoofilter v1.0.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect @@ -68,6 +68,7 @@ require ( github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect golang.org/x/sys v0.0.0-20220908164124-27713097b956 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index d6c0adf5f5..a139b00b83 100644 --- a/go.sum +++ b/go.sum @@ -65,6 +65,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 h1:BS21ZUJ/B5X2UVUbczfmdWH7GapPWAhxcMsDnjJTU1E= github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= +github.com/dgryski/go-wyhash v0.0.0-20191203203029-c4841ae36371 h1:bz5ApY1kzFBvw3yckuyRBCtqGvprWrKswYK468nm+Gs= +github.com/dgryski/go-wyhash v0.0.0-20191203203029-c4841ae36371/go.mod h1:/ENMIO1SQeJ5YQeUWWpbX8f+bS8INHrrhFjXgEqi4LA= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= diff --git a/sharder/deterministic.go b/sharder/deterministic.go index 89acc73fcb..739d955a54 100644 --- a/sharder/deterministic.go +++ b/sharder/deterministic.go @@ -11,6 +11,7 @@ import ( "sync" "time" + "github.com/dgryski/go-wyhash" "github.com/honeycombio/refinery/config" "github.com/honeycombio/refinery/internal/peer" "github.com/honeycombio/refinery/logger" @@ -18,9 +19,12 @@ import ( "github.com/sirupsen/logrus" ) -// shardingSalt is a random bit to make sure we don't shard the same as any -// other sharding that uses the trace ID (eg deterministic sampling) -const shardingSalt = "gf4LqTwcJ6PEj2vO" +// These are random bits to make sure we differentiate between different +// hash cases even if we use the same value (traceID). +const ( + shardingSalt = "gf4LqTwcJ6PEj2vO" + peerSeed uint64 = 6789531204236 +) // DetShard implements Shard type DetShard struct { @@ -29,6 +33,11 @@ type DetShard struct { port string } +type hashShard struct { + uhash uint64 + shardIndex int +} + func (d *DetShard) Equals(other Shard) bool { otherDetshard, ok := other.(*DetShard) if !ok { @@ -75,13 +84,32 @@ func (d *DetShard) String() string { return d.GetAddress() } +// GetHashesFor generates a number of hashShards for a given DetShard by repeatedly hashing the +// seed with itself. The intent is to generate a repeatable pseudo-random sequence. +func (d *DetShard) GetHashesFor(index int, n int, seed uint64) []hashShard { + hashes := make([]hashShard, 0) + addr := d.GetAddress() + for i := 0; i < n; i++ { + hashes = append(hashes, hashShard{ + uhash: wyhash.Hash([]byte(addr), seed), + shardIndex: index, + }) + // generate another seed from the previous seed; we want this to be the same + // sequence for everything. + seed = wyhash.Hash([]byte("anything"), seed) + } + return hashes +} + type DeterministicSharder struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` Peers peer.Peers `inject:""` - myShard *DetShard - peers []*DetShard + myShard *DetShard + peers []*DetShard + hashes []hashShard + shardFunc func(traceID string) Shard peerLock sync.RWMutex } @@ -98,6 +126,21 @@ func (d *DeterministicSharder) Start() error { } }) + // this isn't runtime-reloadable because it would + // reassign nearly every trace to a new shard. + strat, err := d.Config.GetPeerManagementStrategy() + if err != nil { + return errors.Wrap(err, "failed to get peer management strategy") + } + switch strat { + case "legacy", "": + d.shardFunc = d.WhichShardLegacy + case "hash": + d.shardFunc = d.WhichShardHashed + default: + return fmt.Errorf("unknown PeerManagementStrategy '%s'", strat) + } + // Try up to 5 times to find myself in the peer list before giving up var found bool var selfIndexIntoPeerList int @@ -205,9 +248,10 @@ func (d *DeterministicSharder) loadPeerList() error { return errors.New("refusing to load empty peer list") } - // turn my peer list into a list of shards - newPeers := make([]*DetShard, 0, len(peerList)) - for _, peer := range peerList { + // turn the peer list into a list of shards + // and a list of hashes + newPeers := make([]*DetShard, len(peerList)) + for ix, peer := range peerList { peerURL, err := url.Parse(peer) if err != nil { return errors.Wrap(err, "couldn't parse peer as a URL") @@ -217,13 +261,43 @@ func (d *DeterministicSharder) loadPeerList() error { ipOrHost: peerURL.Hostname(), port: peerURL.Port(), } - newPeers = append(newPeers, peerShard) + newPeers[ix] = peerShard } - // the redis peer discovery already sorts its content. Does every backend? - // well, it's not too much work, let's sort it one more time. + // make sure the list is in a stable, comparable order sort.Sort(SortableShardList(newPeers)) + // In general, the variation in the traffic assigned to a randomly partitioned space is + // controlled by the number of partitions. PartitionCount controls the minimum number + // of partitions used to control node assignment when we use the "hash" strategy. + // When there's a small number of partitions, the two-layer hash strategy can end up giving + // one partition a disproportionate fraction of the traffic. So we create a large number of + // random partitions and then assign (potentially) multiple partitions to individual nodes. + // We're asserting that if we randomly divide the space among at this many partitions, the variation + // between them is likely to be acceptable. (As this is random, there might be exceptions.) + // The reason not to make this value much larger, say 1000, is that finding the right partition + // is linear -- O(number of partitions) and so we want it to be as small as possible + // while still being big enough. + // PartitionCount, therefore, is the smallest value that we believe will yield reasonable + // distribution between nodes. We divide it by the number of nodes using integer division + // and add 1 to get partitionsPerPeer. We then actually create (nNodes*partitionsPerPeer) + // partitions, which will always be greater than or equal to partitionCount. + // Examples: if we have 6 nodes, then partitionsPerPeer will be 9, and we will create + // 54 partitions. If we have 85 nodes, then partitionsPerPeer will be 1, and we will create + // 85 partitions. + const partitionCount = 50 + // now build the hash list; + // We make a list of hash value and an index to a peer. + hashes := make([]hashShard, 0) + partitionsPerPeer := partitionCount/len(peerList) + 1 + for ix := range newPeers { + hashes = append(hashes, newPeers[ix].GetHashesFor(ix, partitionsPerPeer, peerSeed)...) + } + // now sort the hash list by hash value so we can search it efficiently + sort.Slice(hashes, func(i, j int) bool { + return hashes[i].uhash < hashes[j].uhash + }) + // if the peer list changed, load the new list d.peerLock.RLock() if !SortableShardList(d.peers).Equals(newPeers) { @@ -231,6 +305,7 @@ func (d *DeterministicSharder) loadPeerList() error { d.peerLock.RUnlock() d.peerLock.Lock() d.peers = newPeers + d.hashes = hashes d.peerLock.Unlock() } else { d.peerLock.RUnlock() @@ -243,6 +318,16 @@ func (d *DeterministicSharder) MyShard() Shard { } func (d *DeterministicSharder) WhichShard(traceID string) Shard { + return d.shardFunc(traceID) +} + +// WhichShardLegacy is the original sharding decider. It uses sha1, which is +// slow and not well-distributed, and also simply partitions the sharding +// space into N evenly-divided buckets, which means that on every change in +// shard count, half of the traces get reassigned (which leads to broken traces). +// We leave it here to avoid disrupting things and provide a fallback if needed, +// but the intent is eventually to delete this. +func (d *DeterministicSharder) WhichShardLegacy(traceID string) Shard { d.peerLock.RLock() defer d.peerLock.RUnlock() @@ -262,3 +347,29 @@ func (d *DeterministicSharder) WhichShard(traceID string) Shard { return d.peers[index] } + +// WhichShardHashed calculates which shard we want by keeping a list of partitions. Each +// partition has a different hash value and a map from partition to a given shard. +// We take the traceID and calculate a hash for each partition, using the partition +// hash as the seed for the trace hash. Whichever one has the highest value is the +// partition we use, which determines the shard we use. +// This is O(N) where N is the number of partitions, but because we use an efficient hash, +// (as opposed to SHA1) it executes in 1 uSec for 50 partitions, so it works out to about +// the same cost as the legacy sharder. +func (d *DeterministicSharder) WhichShardHashed(traceID string) Shard { + d.peerLock.RLock() + defer d.peerLock.RUnlock() + + tid := []byte(traceID) + + bestix := 0 + var maxHash uint64 + for _, hash := range d.hashes { + h := wyhash.Hash(tid, hash.uhash) + if h > maxHash { + maxHash = h + bestix = hash.shardIndex + } + } + return d.peers[bestix] +} diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go index 828252341b..c59a886f01 100644 --- a/sharder/deterministic_test.go +++ b/sharder/deterministic_test.go @@ -2,6 +2,8 @@ package sharder import ( "context" + "fmt" + "math/rand" "testing" "github.com/honeycombio/refinery/config" @@ -91,3 +93,305 @@ func TestWhichShardAtEdge(t *testing.T) { assert.Equal(t, shard.GetAddress(), sharder.WhichShard(traceID).GetAddress(), "should select the same peer if peer list becomes empty") } + +// GenID returns a random hex string of length numChars +func GenID(numChars int) string { + const charset = "abcdef0123456789" + + id := make([]byte, numChars) + for i := 0; i < numChars; i++ { + id[i] = charset[rand.Intn(len(charset))] + } + return string(id) +} + +func BenchmarkShardBulk(b *testing.B) { + const ( + selfAddr = "127.0.0.1:8081" + traceID = "test" + ) + + const npeers = 11 + peers := []string{ + "http://" + selfAddr, + } + for i := 1; i < npeers; i++ { + peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i)) + } + config := &config.MockConfig{ + GetPeerListenAddrVal: selfAddr, + GetPeersVal: peers, + PeerManagementType: "file", + PeerManagementStrategy: "legacy", + } + filePeers, err := peer.NewPeers(context.Background(), config) + assert.Equal(b, nil, err) + sharder := DeterministicSharder{ + Config: config, + Logger: &logger.NullLogger{}, + Peers: filePeers, + } + + assert.NoError(b, sharder.Start(), "starting deterministic sharder should not error") + + const ntraces = 10 + ids := make([]string, ntraces) + for i := 0; i < ntraces; i++ { + ids[i] = GenID(32) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + sharder.WhichShard(ids[i%ntraces]) + } +} + +func TestShardBulk(t *testing.T) { + const ( + selfAddr = "127.0.0.1:8081" + traceID = "test" + ) + + // this test should work for all strategies and a wide range of peer counts + for _, strategy := range []string{"legacy", "hash"} { + for i := 0; i < 5; i++ { + npeers := i*10 + 5 + t.Run(fmt.Sprintf("bulk npeers=%d", npeers), func(t *testing.T) { + peers := []string{ + "http://" + selfAddr, + } + for i := 1; i < npeers; i++ { + peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i)) + } + + config := &config.MockConfig{ + GetPeerListenAddrVal: selfAddr, + GetPeersVal: peers, + PeerManagementType: "file", + PeerManagementStrategy: strategy, + } + filePeers, err := peer.NewPeers(context.Background(), config) + assert.Equal(t, nil, err) + sharder := DeterministicSharder{ + Config: config, + Logger: &logger.NullLogger{}, + Peers: filePeers, + } + + assert.NoError(t, sharder.Start(), "starting sharder should not error") + + const ntraces = 1000 + ids := make([]string, ntraces) + for i := 0; i < ntraces; i++ { + ids[i] = GenID(32) + } + + results := make(map[string]int) + for i := 0; i < ntraces; i++ { + s := sharder.WhichShardHashed(ids[i]) + results[s.GetAddress()]++ + } + min := ntraces + max := 0 + for _, r := range results { + if r < min { + min = r + } + if r > max { + max = r + } + } + + // This is probabalistic, so could fail, but shouldn't be flaky as long as + // expectedResult is at least 20 or so. + expectedResult := ntraces / npeers + assert.Greater(t, expectedResult*2, max, "expected smaller max, got %d: %v", max, results) + assert.NotEqual(t, 0, min, "expected larger min, got %d: %v", min, results) + }) + } + } +} + +func TestShardDrop(t *testing.T) { + const ( + selfAddr = "127.0.0.1:8081" + traceID = "test" + ) + + for i := 0; i < 5; i++ { + npeers := i*10 + 5 + t.Run(fmt.Sprintf("drop npeers=%d", npeers), func(t *testing.T) { + peers := []string{ + "http://" + selfAddr, + } + for i := 1; i < npeers; i++ { + peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i)) + } + + config := &config.MockConfig{ + GetPeerListenAddrVal: selfAddr, + GetPeersVal: peers, + PeerManagementType: "file", + PeerManagementStrategy: "hash", + } + filePeers, err := peer.NewPeers(context.Background(), config) + assert.Equal(t, nil, err) + sharder := DeterministicSharder{ + Config: config, + Logger: &logger.NullLogger{}, + Peers: filePeers, + } + + assert.NoError(t, sharder.Start(), "starting sharder should not error") + + type placement struct { + id string + shard string + } + + const ntraces = 1000 + placements := make([]placement, ntraces) + for i := 0; i < ntraces; i++ { + placements[i].id = GenID(32) + } + + results := make(map[string]int) + for i := 0; i < ntraces; i++ { + s := sharder.WhichShard(placements[i].id) + results[s.GetAddress()]++ + placements[i].shard = s.GetAddress() + } + fmt.Println(results) + + // reach in and delete one of the peers, then reshard + config.GetPeersVal = config.GetPeersVal[1:] + sharder.loadPeerList() + + results = make(map[string]int) + nDiff := 0 + for i := 0; i < ntraces; i++ { + s := sharder.WhichShardHashed(placements[i].id) + results[s.GetAddress()]++ + if s.GetAddress() != placements[i].shard { + nDiff++ + } + } + + expected := ntraces / (npeers - 1) + assert.Greater(t, expected*2, nDiff) + assert.Less(t, expected/2, nDiff) + }) + } +} + +func TestShardAddHash(t *testing.T) { + const ( + selfAddr = "127.0.0.1:8081" + traceID = "test" + ) + + for i := 0; i < 5; i++ { + npeers := i*10 + 7 + t.Run(fmt.Sprintf("add npeers=%d", npeers), func(t *testing.T) { + peers := []string{ + "http://" + selfAddr, + } + for i := 1; i < npeers; i++ { + peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i)) + } + + config := &config.MockConfig{ + GetPeerListenAddrVal: selfAddr, + GetPeersVal: peers, + PeerManagementType: "file", + PeerManagementStrategy: "hash", + } + filePeers, err := peer.NewPeers(context.Background(), config) + assert.Equal(t, nil, err) + sharder := DeterministicSharder{ + Config: config, + Logger: &logger.NullLogger{}, + Peers: filePeers, + } + + assert.NoError(t, sharder.Start(), "starting sharder should not error") + + type placement struct { + id string + shard string + } + + const ntraces = 1000 + placements := make([]placement, ntraces) + for i := 0; i < ntraces; i++ { + placements[i].id = GenID(32) + } + + results := make(map[string]int) + for i := 0; i < ntraces; i++ { + s := sharder.WhichShardHashed(placements[i].id) + results[s.GetAddress()]++ + placements[i].shard = s.GetAddress() + } + fmt.Println(results) + + // reach in and add a peer, then reshard + config.GetPeersVal = append(config.GetPeersVal, "http://2.2.2.255/:8081") + sharder.loadPeerList() + + results = make(map[string]int) + nDiff := 0 + for i := 0; i < ntraces; i++ { + s := sharder.WhichShard(placements[i].id) + results[s.GetAddress()]++ + if s.GetAddress() != placements[i].shard { + nDiff++ + } + } + expected := ntraces / (npeers - 1) + assert.Greater(t, expected*2, nDiff) + assert.Less(t, expected/2, nDiff) + }) + } +} + +func BenchmarkDeterministicShard(b *testing.B) { + const ( + selfAddr = "127.0.0.1:8081" + traceID = "test" + ) + for _, strat := range []string{"legacy", "hash"} { + for i := 0; i < 5; i++ { + npeers := i*10 + 4 + b.Run(fmt.Sprintf("benchmark_deterministic_%s_%d", strat, npeers), func(b *testing.B) { + peers := []string{ + "http://" + selfAddr, + } + for i := 1; i < npeers; i++ { + peers = append(peers, fmt.Sprintf("http://2.2.2.%d/:8081", i)) + } + config := &config.MockConfig{ + GetPeerListenAddrVal: selfAddr, + GetPeersVal: peers, + PeerManagementType: "file", + PeerManagementStrategy: strat, + } + filePeers, err := peer.NewPeers(context.Background(), config) + assert.Equal(b, nil, err) + sharder := DeterministicSharder{ + Config: config, + Logger: &logger.NullLogger{}, + Peers: filePeers, + } + + assert.NoError(b, sharder.Start(), + "starting deterministic sharder should not error") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + sharder.WhichShard(traceID) + } + }) + } + } +} From 93cf04ec4ecb0f2690945a6cf54e375e48d052b0 Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Tue, 29 Nov 2022 14:57:19 -0500 Subject: [PATCH 261/351] fix: Add missing done channel to fix build (#573) ## Which problem is this PR solving? - There was a merge error between successive PRs that caused some new tests to fail because they didn't have the new signature for the NewPeers function. ## Short description of the changes - Add missing done channels to several tests. --- config/config_test_reload_error_test.go | 2 ++ sharder/deterministic_test.go | 20 +++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/config/config_test_reload_error_test.go b/config/config_test_reload_error_test.go index ca7e00c64a..307166b27a 100644 --- a/config/config_test_reload_error_test.go +++ b/config/config_test_reload_error_test.go @@ -1,3 +1,5 @@ +//go:build all || !race + package config import ( diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go index c59a886f01..20a144dd28 100644 --- a/sharder/deterministic_test.go +++ b/sharder/deterministic_test.go @@ -124,7 +124,9 @@ func BenchmarkShardBulk(b *testing.B) { PeerManagementType: "file", PeerManagementStrategy: "legacy", } - filePeers, err := peer.NewPeers(context.Background(), config) + done := make(chan struct{}) + defer close(done) + filePeers, err := peer.NewPeers(context.Background(), config, done) assert.Equal(b, nil, err) sharder := DeterministicSharder{ Config: config, @@ -170,7 +172,9 @@ func TestShardBulk(t *testing.T) { PeerManagementType: "file", PeerManagementStrategy: strategy, } - filePeers, err := peer.NewPeers(context.Background(), config) + done := make(chan struct{}) + defer close(done) + filePeers, err := peer.NewPeers(context.Background(), config, done) assert.Equal(t, nil, err) sharder := DeterministicSharder{ Config: config, @@ -234,7 +238,9 @@ func TestShardDrop(t *testing.T) { PeerManagementType: "file", PeerManagementStrategy: "hash", } - filePeers, err := peer.NewPeers(context.Background(), config) + done := make(chan struct{}) + defer close(done) + filePeers, err := peer.NewPeers(context.Background(), config, done) assert.Equal(t, nil, err) sharder := DeterministicSharder{ Config: config, @@ -306,7 +312,9 @@ func TestShardAddHash(t *testing.T) { PeerManagementType: "file", PeerManagementStrategy: "hash", } - filePeers, err := peer.NewPeers(context.Background(), config) + done := make(chan struct{}) + defer close(done) + filePeers, err := peer.NewPeers(context.Background(), config, done) assert.Equal(t, nil, err) sharder := DeterministicSharder{ Config: config, @@ -376,7 +384,9 @@ func BenchmarkDeterministicShard(b *testing.B) { PeerManagementType: "file", PeerManagementStrategy: strat, } - filePeers, err := peer.NewPeers(context.Background(), config) + done := make(chan struct{}) + defer close(done) + filePeers, err := peer.NewPeers(context.Background(), config, done) assert.Equal(b, nil, err) sharder := DeterministicSharder{ Config: config, From 82ddc514fbb417e0a6a3b66cb21178afac222c04 Mon Sep 17 00:00:00 2001 From: Purvi Kanal Date: Tue, 29 Nov 2022 15:07:02 -0500 Subject: [PATCH 262/351] ci: update validate PR title workflow (#572) Adds a better error message when the PR title conventional commit check fails. We'll use this PR as the main PR to review before applying it to the other repos. --- .github/workflows/validate-pr-title.yml | 33 +++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/.github/workflows/validate-pr-title.yml b/.github/workflows/validate-pr-title.yml index 65fc7bcb74..5186cd01be 100644 --- a/.github/workflows/validate-pr-title.yml +++ b/.github/workflows/validate-pr-title.yml @@ -13,6 +13,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: amannn/action-semantic-pull-request@v5 + id: lint_pr_title + name: "🤖 Check PR title follows conventional commit spec" env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -29,3 +31,34 @@ jobs: refactor perf test + ignoreLabels: | + "type: dependencies" + # When the previous steps fails, the workflow would stop. By adding this + # condition you can continue the execution with the populated error message. + - if: always() && (steps.lint_pr_title.outputs.error_message != null) + name: "📝 Add PR comment about using conventional commit spec" + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: pr-title-lint-error + message: | + Thank you for contributing to the project! 🎉 + + We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted. + + Make sure to prepend with `feat:`, `fix:`, or another option in the list below. + + Once you update the title, this workflow will re-run automatically and validate the updated title. + + Details: + + ``` + ${{ steps.lint_pr_title.outputs.error_message }} + ``` + + # Delete a previous comment when the issue has been resolved + - if: ${{ steps.lint_pr_title.outputs.error_message == null }} + name: "❌ Delete PR comment after title has been updated" + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: pr-title-lint-error + delete: true From 353a045d97d95e247e6385a20c566a8e9f5d33d8 Mon Sep 17 00:00:00 2001 From: "saikalyan.bhagavathula" Date: Wed, 7 Dec 2022 12:06:01 +0530 Subject: [PATCH 263/351] Testing for running tracing proxy independently --- app/app.go | 50 +++++++++++++++++++++++++++++++++++++++++-- config/config.go | 6 ++++++ config/file_config.go | 27 +++++++++++++++++++++++ config_complete.toml | 9 +++++--- route/otlp_trace.go | 43 +++++++++++++++++++++++++++++++++++++ 5 files changed, 130 insertions(+), 5 deletions(-) diff --git a/app/app.go b/app/app.go index cfd51e3396..06f5c772bd 100644 --- a/app/app.go +++ b/app/app.go @@ -1,13 +1,20 @@ package app import ( + "encoding/json" + "fmt" "github.com/jirs5/tracing-proxy/collect" "github.com/jirs5/tracing-proxy/config" "github.com/jirs5/tracing-proxy/logger" "github.com/jirs5/tracing-proxy/metrics" "github.com/jirs5/tracing-proxy/route" + "io/ioutil" + "net/http" + "strings" ) + +var OpsrampToken string type App struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` @@ -15,18 +22,27 @@ type App struct { PeerRouter route.Router `inject:"inline"` Collector collect.Collector `inject:""` Metrics metrics.Metrics `inject:"metrics"` - + Client http.Client // Version is the build ID for tracing-proxy so that the running process may answer // requests for the version Version string } +type OpsRampAuthTokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + Scope string `json:"scope"` +} + + + // Start on the App obect should block until the proxy is shutting down. After // Start exits, Stop will be called on all dependencies then on App then the // program will exit. func (a *App) Start() error { a.Logger.Debug().Logf("Starting up App...") - + OpsrampToken = a.opsrampOauthToken() a.IncomingRouter.SetVersion(a.Version) a.PeerRouter.SetVersion(a.Version) @@ -42,3 +58,33 @@ func (a *App) Stop() error { a.Logger.Debug().Logf("Shutting down App...") return nil } + +func (a *App) opsrampOauthToken() string { + OpsrampKey, _ := a.Config.GetOpsrampKey() + OpsrampSecret, _ := a.Config.GetOpsrampSecret() + ApiEndPoint, _ := a.Config.GetOpsrampAPI() + + fmt.Println("OpsrampKey:", OpsrampKey) + fmt.Println("OpsrampSecret:", OpsrampSecret) + url := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(ApiEndPoint, "/")) + fmt.Println(url) + requestBody := strings.NewReader("client_id=" + OpsrampKey + "&client_secret=" + OpsrampSecret + "&grant_type=client_credentials") + req, err := http.NewRequest(http.MethodPost, url, requestBody) + fmt.Println("request error is: ", err) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + req.Header.Add("Accept", "application/json") + req.Header.Set("Connection", "close") + + resp, err := a.Client.Do(req) + defer resp.Body.Close() + fmt.Println("Response error is: ", err) + + respBody, err := ioutil.ReadAll(resp.Body) + fmt.Println("resp.Body is ", string(respBody)) + var tokenResponse OpsRampAuthTokenResponse + err = json.Unmarshal(respBody, &tokenResponse) + //fmt.Println("tokenResponse", tokenResponse) + fmt.Println("tokenResponse.AccessToken: ", tokenResponse.AccessToken) + return tokenResponse.AccessToken +} + diff --git a/config/config.go b/config/config.go index ad453d8627..2660662bdd 100644 --- a/config/config.go +++ b/config/config.go @@ -154,4 +154,10 @@ type Config interface { // GetProxyPassword returns the password of proxy user on which to listen for // proxy traffic GetProxyPassword()(string,error) + + GetOpsrampKey()(string, error) + + GetOpsrampSecret()(string, error) + + GetTenantId()(string, error) } diff --git a/config/file_config.go b/config/file_config.go index 14cef339cd..a2c0c9b1e2 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -32,6 +32,9 @@ type configContents struct { GRPCPeerListenAddr string APIKeys []string `validate:"required"` OpsrampAPI string `validate:"required,url"` + OpsrampKey string + OpsrampSecret string + TenantId string LoggingLevel string `validate:"required"` Collector string `validate:"required,oneof= InMemCollector"` Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"` @@ -118,6 +121,9 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerManagement.UseTLSInsecure", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) c.SetDefault("OpsrampAPI", "https://api.jirs5") + c.SetDefault("OpsrampKey", "") + c.SetDefault("OpsrampSecret", "") + c.SetDefault("TenantId", "") c.SetDefault("LoggingLevel", "debug") c.SetDefault("Collector", "InMemCollector") c.SetDefault("SendDelay", 2*time.Second) @@ -491,6 +497,27 @@ func (f *fileConfig) GetOpsrampAPI() (string, error) { return f.conf.OpsrampAPI, nil } +func (f *fileConfig) GetOpsrampKey() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.OpsrampKey , nil +} + +func (f *fileConfig) GetOpsrampSecret() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.OpsrampSecret , nil +} + +func (f *fileConfig) GetTenantId() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.TenantId , nil +} + func (f *fileConfig) GetLoggingLevel() (string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index 4573bee10a..b262ca814d 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -7,14 +7,14 @@ # front to do the decryption. # Should be of the form 0.0.0.0:8080 # Not eligible for live reload. -ListenAddr = "0.0.0.0:8082" +ListenAddr = "0.0.0.0:8080" # GRPCListenAddr is the IP and port on which to listen for incoming events over # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put # something like nginx in front to do the decryption. # Should be of the form 0.0.0.0:9090 # Not eligible for live reload. -GRPCListenAddr = "0.0.0.0:9090" +GRPCListenAddr = "0.0.0.0:4317" # PeerListenAddr is the IP and port on which to listen for traffic being # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL @@ -70,6 +70,9 @@ APIKeys = [ # Eligible for live reload. #OpsrampAPI = "localhost:50052" OpsrampAPI = "https://asura.opsramp.net" +OpsrampKey = "***REMOVED***" +OpsrampSecret = "***REMOVED***" +TenantId = "788714cd-a17a-4d7e-9bac-c35131f4bcc2" #Tls Options UseTls = true @@ -248,7 +251,7 @@ MaxAlloc = 0 LogFormatter = "logfmt" # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] -LogOutput = "file" +LogOutput = "stdout" ## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" [LogrusLogger.File] diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 239e08ec6f..298f7781b8 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -16,6 +16,10 @@ import ( collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" ) + + + + func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { ri := huskyotlp.GetRequestInfoFromHttpHeaders(req.Header) /*if err := ri.ValidateHeaders(); err != nil { @@ -54,7 +58,45 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac } token := ri.ApiToken tenantId := ri.ApiTenantId + //OpsrampKey, _ := router.Config.GetOpsrampKey() + //OpsrampSecret, _ := router.Config.GetOpsrampSecret() + //ApiEndPoint, _ := router.Config.GetOpsrampAPI() + //if len(token) == 0 && len(OpsrampKey) != 0 && len(OpsrampSecret) != 0 { + // fmt.Println("OpsrampKey:", OpsrampKey) + // fmt.Println("OpsrampKey:", OpsrampSecret) + // url := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(ApiEndPoint, "/")) + // fmt.Println(url) + // requestBody := strings.NewReader("client_id=" + OpsrampKey + "&client_secret=" + OpsrampSecret + "&grant_type=client_credentials") + // req, err := http.NewRequest(http.MethodPost, url, requestBody) + // fmt.Println("request error is: ", err) + // req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + // req.Header.Add("Accept", "application/json") + // req.Header.Set("Connection", "close") + // + // resp, err := router.Client.Do(req) + // defer resp.Body.Close() + // fmt.Println("Response error is: ", err) + // + // respBody, err := ioutil.ReadAll(resp.Body) + // fmt.Println("resp.Body is ", string(respBody)) + // var tokenResponse OpsRampAuthTokenResponse + // err = json.Unmarshal(respBody, &tokenResponse) + // //fmt.Println("tokenResponse", tokenResponse) + // fmt.Println("tokenResponse.AccessToken: ", tokenResponse.AccessToken) + // token = tokenResponse.AccessToken + //} + + //if len(token) == 0 { + // token = app.OpsrampToken + //} + if len(tenantId) == 0 { + OpsrampTenantId, _ := router.Config.GetTenantId() + tenantId = OpsrampTenantId + } + if len(ri.Dataset) == 0 { + ri.Dataset = "ds" + } fmt.Println("Token:", token) fmt.Println("TenantId:", tenantId) fmt.Println("dataset:", ri.Dataset) @@ -120,6 +162,7 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get request metadata", Status: "Failed"}, nil } else { authorization := md.Get("Authorization") + fmt.Println("authorization is ", authorization) if len(authorization) == 0 { return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get Authorization", Status: "Failed"}, nil } else { From 5fddb8e7185221b86d436c55eb94b1ee20897b8d Mon Sep 17 00:00:00 2001 From: "saikalyan.bhagavathula" Date: Mon, 12 Dec 2022 18:20:57 +0530 Subject: [PATCH 264/351] auth token generation and renewal for independent trace proxy --- app/app.go | 34 --- cmd/tracing-proxy/main.go | 10 + config/config.go | 2 + config/file_config.go | 9 + config_complete.toml | 15 +- go.mod | 6 +- go.sum | 487 ++++++++++++++++++++++++++++++++++++-- route/otlp_trace.go | 36 +-- route/route.go | 4 +- 9 files changed, 512 insertions(+), 91 deletions(-) diff --git a/app/app.go b/app/app.go index 06f5c772bd..b245fa3736 100644 --- a/app/app.go +++ b/app/app.go @@ -1,16 +1,12 @@ package app import ( - "encoding/json" - "fmt" "github.com/jirs5/tracing-proxy/collect" "github.com/jirs5/tracing-proxy/config" "github.com/jirs5/tracing-proxy/logger" "github.com/jirs5/tracing-proxy/metrics" "github.com/jirs5/tracing-proxy/route" - "io/ioutil" "net/http" - "strings" ) @@ -42,7 +38,6 @@ type OpsRampAuthTokenResponse struct { // program will exit. func (a *App) Start() error { a.Logger.Debug().Logf("Starting up App...") - OpsrampToken = a.opsrampOauthToken() a.IncomingRouter.SetVersion(a.Version) a.PeerRouter.SetVersion(a.Version) @@ -59,32 +54,3 @@ func (a *App) Stop() error { return nil } -func (a *App) opsrampOauthToken() string { - OpsrampKey, _ := a.Config.GetOpsrampKey() - OpsrampSecret, _ := a.Config.GetOpsrampSecret() - ApiEndPoint, _ := a.Config.GetOpsrampAPI() - - fmt.Println("OpsrampKey:", OpsrampKey) - fmt.Println("OpsrampSecret:", OpsrampSecret) - url := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(ApiEndPoint, "/")) - fmt.Println(url) - requestBody := strings.NewReader("client_id=" + OpsrampKey + "&client_secret=" + OpsrampSecret + "&grant_type=client_credentials") - req, err := http.NewRequest(http.MethodPost, url, requestBody) - fmt.Println("request error is: ", err) - req.Header.Add("Content-Type", "application/x-www-form-urlencoded") - req.Header.Add("Accept", "application/json") - req.Header.Set("Connection", "close") - - resp, err := a.Client.Do(req) - defer resp.Body.Close() - fmt.Println("Response error is: ", err) - - respBody, err := ioutil.ReadAll(resp.Body) - fmt.Println("resp.Body is ", string(respBody)) - var tokenResponse OpsRampAuthTokenResponse - err = json.Unmarshal(respBody, &tokenResponse) - //fmt.Println("tokenResponse", tokenResponse) - fmt.Println("tokenResponse.AccessToken: ", tokenResponse.AccessToken) - return tokenResponse.AccessToken -} - diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 4b82507ced..e9fbbbfb0b 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -130,6 +130,10 @@ func main() { upstreamMetricsConfig := metrics.GetMetricsImplementation("libtrace_upstream") peerMetricsConfig := metrics.GetMetricsImplementation("libtrace_peer") + opsrampkey, _ := c.GetOpsrampKey() + opsrampsecret, _ := c.GetOpsrampSecret() + opsrampapi, _ := c.GetOpsrampAPI() + userAgentAddition := "tracing-proxy/" + version upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{ Transmission: &transmission.Opsramptraceproxy{ @@ -144,6 +148,9 @@ func main() { Metrics: upstreamMetricsConfig, UseTls: c.GetGlobalUseTLS(), UseTlsInsecure: c.GetGlobalUseTLSInsecureSkip(), + OpsrampKey: opsrampkey, + OpsrampSecret: opsrampsecret, + ApiHost: opsrampapi, }, }) if err != nil { @@ -164,6 +171,9 @@ func main() { DisableCompression: !c.GetCompressPeerCommunication(), EnableMsgpackEncoding: false, Metrics: peerMetricsConfig, + OpsrampKey: opsrampkey, + OpsrampSecret: opsrampsecret, + ApiHost: opsrampapi, }, }) if err != nil { diff --git a/config/config.go b/config/config.go index 2660662bdd..bc5d69b445 100644 --- a/config/config.go +++ b/config/config.go @@ -160,4 +160,6 @@ type Config interface { GetOpsrampSecret()(string, error) GetTenantId()(string, error) + + GetDataset()(string, error) } diff --git a/config/file_config.go b/config/file_config.go index a2c0c9b1e2..74aec3f077 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -35,6 +35,7 @@ type configContents struct { OpsrampKey string OpsrampSecret string TenantId string + Dataset string LoggingLevel string `validate:"required"` Collector string `validate:"required,oneof= InMemCollector"` Sampler string `validate:"required,oneof= DeterministicSampler DynamicSampler EMADynamicSampler RulesBasedSampler TotalThroughputSampler"` @@ -124,6 +125,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("OpsrampKey", "") c.SetDefault("OpsrampSecret", "") c.SetDefault("TenantId", "") + c.SetDefault("Dataset", "ds") c.SetDefault("LoggingLevel", "debug") c.SetDefault("Collector", "InMemCollector") c.SetDefault("SendDelay", 2*time.Second) @@ -511,6 +513,13 @@ func (f *fileConfig) GetOpsrampSecret() (string, error) { return f.conf.OpsrampSecret , nil } +func (f *fileConfig) GetDataset() (string, error) { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.Dataset , nil +} + func (f *fileConfig) GetTenantId() (string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml index b262ca814d..454f5053a6 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -69,10 +69,19 @@ APIKeys = [ # OpsrampAPI is the URL for the upstream Opsramp API. # Eligible for live reload. #OpsrampAPI = "localhost:50052" -OpsrampAPI = "https://asura.opsramp.net" +OpsrampAPI = "https://int.opsramp.net" + +# OpsrampKey is used to get the OauthToken OpsrampKey = "***REMOVED***" + +# OpsrampSecret is used to get the OauthToken OpsrampSecret = "***REMOVED***" -TenantId = "788714cd-a17a-4d7e-9bac-c35131f4bcc2" + +# Traces are send to the client with given tenantid +TenantId = "3748c67e-bec1-4cad-bd8b-8f2f8ea840f3" + +# Dataset you want to use for sampling +Dataset = "ds" #Tls Options UseTls = true @@ -203,7 +212,7 @@ Peers = [ # UseIPV6Identifier = false # RedisIdentifier is optional. By default, when using RedisHost, Refinery will use -# the local hostname to identify itself to other peers in Redis. If your environment +# the local hostname to id788714cd-a17a-4d7e-9bac-c35131f4bcc2entify itself to other peers in Redis. If your environment # requires that you use IPs as identifiers (for example, if peers can't resolve eachother # by name), you can specify the exact identifier (IP address, etc) to use here. # Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. diff --git a/go.mod b/go.mod index 2a198d8cb4..d539bfbf8d 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,6 @@ require ( github.com/golang/snappy v0.0.3 github.com/gomodule/redigo v1.8.8 github.com/gorilla/mux v1.8.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.3 // indirect github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 github.com/honeycombio/husky v0.9.0 @@ -35,8 +34,7 @@ require ( github.com/stretchr/testify v1.7.0 github.com/vmihailenco/msgpack/v4 v4.3.11 go.opentelemetry.io/proto/otlp v0.9.0 - golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect - google.golang.org/grpc v1.44.0 + google.golang.org/grpc v1.50.1 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -44,7 +42,7 @@ require ( //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20220616111622-198db7b29be0 +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20221212123252-6c812a22c313 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df diff --git a/go.sum b/go.sum index f970e070a2..c222f934fa 100644 --- a/go.sum +++ b/go.sum @@ -26,24 +26,341 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -105,6 +422,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= @@ -203,8 +521,10 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -226,16 +546,26 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.3 h1:I8MsauTJQXZ8df8qJvEln0kYNc3bSapuaSsEsnFdEFU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.3/go.mod h1:lZdb/YAJUSj9OqrCHs2ihjtoO3+xK3G53wTYXFWRGDo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0 h1:t7uX3JBHdVwAi3G7sSSdbsk8NfgA+LnUS88V/2EKaA0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0/go.mod h1:4OGVnY4qf2+gw+ssiHbW+pq4mo2yko94YxxMmXZ7jCA= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -274,8 +604,8 @@ github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LF github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df h1:vN66WfIFppi2IVEIp00wnmgBbvM6Jd6oT+WN5ChdUnQ= github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v1.15.9-0.20220616111622-198db7b29be0 h1:gA2frU8jEl4y1LngSFyy4GQrUPLgad4pI6uU8r4RBnE= -github.com/jirs5/libtrace-go v1.15.9-0.20220616111622-198db7b29be0/go.mod h1:7Z9QPLljLv8SW0BTttRBgHE969vQfy2bDAh0cnDXjBI= +github.com/jirs5/libtrace-go v1.15.9-0.20221212123252-6c812a22c313 h1:xpnN0lzzbvEYEIDg2wzC1EEHTTu53olQ/XCNAE5zkGI= +github.com/jirs5/libtrace-go v1.15.9-0.20221212123252-6c812a22c313/go.mod h1:2LOVRXQk1CJ43G+14WmamU7RtAq/sjw+L2dwFChkq3g= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -423,6 +753,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= @@ -448,6 +779,7 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -484,6 +816,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -524,8 +857,20 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8= -golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -543,6 +888,16 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -554,6 +909,10 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -622,9 +981,26 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486 h1:5hpz5aRr+W1erYCL5JRhSUBJRph7l9XkNveoExlrKYk= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -633,8 +1009,10 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -692,11 +1070,15 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -729,6 +1111,25 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -776,6 +1177,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -798,8 +1200,51 @@ google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 h1:zzNejm+EgrbLfDZ6lu9Uud2IVvHySPl8vQzf04laR5Q= -google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1 h1:jCw9YRd2s40X9Vxi4zKsPRvSPlHWNqadVkpbMsCPzPQ= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -827,8 +1272,16 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -842,8 +1295,10 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= @@ -868,8 +1323,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -880,4 +1336,3 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 298f7781b8..e170d47464 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -58,46 +58,16 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac } token := ri.ApiToken tenantId := ri.ApiTenantId - //OpsrampKey, _ := router.Config.GetOpsrampKey() - //OpsrampSecret, _ := router.Config.GetOpsrampSecret() - //ApiEndPoint, _ := router.Config.GetOpsrampAPI() - //if len(token) == 0 && len(OpsrampKey) != 0 && len(OpsrampSecret) != 0 { - // fmt.Println("OpsrampKey:", OpsrampKey) - // fmt.Println("OpsrampKey:", OpsrampSecret) - // url := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(ApiEndPoint, "/")) - // fmt.Println(url) - // requestBody := strings.NewReader("client_id=" + OpsrampKey + "&client_secret=" + OpsrampSecret + "&grant_type=client_credentials") - // req, err := http.NewRequest(http.MethodPost, url, requestBody) - // fmt.Println("request error is: ", err) - // req.Header.Add("Content-Type", "application/x-www-form-urlencoded") - // req.Header.Add("Accept", "application/json") - // req.Header.Set("Connection", "close") - // - // resp, err := router.Client.Do(req) - // defer resp.Body.Close() - // fmt.Println("Response error is: ", err) - // - // respBody, err := ioutil.ReadAll(resp.Body) - // fmt.Println("resp.Body is ", string(respBody)) - // var tokenResponse OpsRampAuthTokenResponse - // err = json.Unmarshal(respBody, &tokenResponse) - // //fmt.Println("tokenResponse", tokenResponse) - // fmt.Println("tokenResponse.AccessToken: ", tokenResponse.AccessToken) - // token = tokenResponse.AccessToken - //} - - //if len(token) == 0 { - // token = app.OpsrampToken - //} if len(tenantId) == 0 { OpsrampTenantId, _ := router.Config.GetTenantId() tenantId = OpsrampTenantId } if len(ri.Dataset) == 0 { - ri.Dataset = "ds" + dataset, _ := router.Config.GetDataset() + ri.Dataset = dataset } - fmt.Println("Token:", token) + fmt.Println("TenantId:", tenantId) fmt.Println("dataset:", ri.Dataset) diff --git a/route/route.go b/route/route.go index b27fe83e26..d82be4b5b6 100644 --- a/route/route.go +++ b/route/route.go @@ -50,6 +50,9 @@ const ( defaultSampleRate = 1 ) + + + type Router struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` @@ -59,7 +62,6 @@ type Router struct { Sharder sharder.Sharder `inject:""` Collector collect.Collector `inject:""` Metrics metrics.Metrics `inject:"metrics"` - // version is set on startup so that the router may answer HTTP requests for // the version versionStr string From 917f984cfe48e7378c5c1ce6f1ad9606108efafc Mon Sep 17 00:00:00 2001 From: "saikalyan.bhagavathula" Date: Thu, 15 Dec 2022 19:24:31 +0530 Subject: [PATCH 265/351] Final Changes for authtoken for sending traces --- config_complete.toml | 10 +++++----- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/config_complete.toml b/config_complete.toml index 454f5053a6..2b3e74cf01 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -138,7 +138,7 @@ AddHostMetadataToTrace = false # Metrics are sent to OpsRamp (The collection happens based on configuration specifie # in OpsRampMetrics and only works when the Metrics is set to "prometheus") -SendMetricsToOpsRamp = false +SendMetricsToOpsRamp = true ############################ ## Implementation Choices ## @@ -292,23 +292,23 @@ MetricsListenAddr = "localhost:2112" # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. # Not Eligible for live reload. -OpsRampMetricsAPI = "https://placeholder.api.com/" +OpsRampMetricsAPI = "https://int.opsramp.net" # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. # Not Eligible for live reload. -OpsRampTenantID = "placeholder_tenantID" +OpsRampTenantID = "3748c67e-bec1-4cad-bd8b-8f2f8ea840f3" # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. # This is separate from the APIKeys used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPIKey = "placeholder_key" +OpsRampMetricsAPIKey = "***REMOVED***" # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. # This is separate from the APISecret used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPISecret = "placeholder_secret" +OpsRampMetricsAPISecret = "***REMOVED***" # OpsRampMetricsReportingInterval is frequency specified in seconds at which # the metrics are collected and sent to OpsRamp diff --git a/go.mod b/go.mod index d539bfbf8d..757a2d9b59 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20221212123252-6c812a22c313 +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20221215130906-ffb6698e9c86 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df diff --git a/go.sum b/go.sum index c222f934fa..1189c4313a 100644 --- a/go.sum +++ b/go.sum @@ -604,8 +604,8 @@ github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LF github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df h1:vN66WfIFppi2IVEIp00wnmgBbvM6Jd6oT+WN5ChdUnQ= github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v1.15.9-0.20221212123252-6c812a22c313 h1:xpnN0lzzbvEYEIDg2wzC1EEHTTu53olQ/XCNAE5zkGI= -github.com/jirs5/libtrace-go v1.15.9-0.20221212123252-6c812a22c313/go.mod h1:2LOVRXQk1CJ43G+14WmamU7RtAq/sjw+L2dwFChkq3g= +github.com/jirs5/libtrace-go v1.15.9-0.20221215130906-ffb6698e9c86 h1:NPafoey84rhHdWhNIIAyCEfFVlyCN5OFqXgPVRbMtyE= +github.com/jirs5/libtrace-go v1.15.9-0.20221215130906-ffb6698e9c86/go.mod h1:2LOVRXQk1CJ43G+14WmamU7RtAq/sjw+L2dwFChkq3g= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= From 3548cef200cb47440fcaf895db26fb01bea48a61 Mon Sep 17 00:00:00 2001 From: "saikalyan.bhagavathula" Date: Mon, 19 Dec 2022 16:33:02 +0530 Subject: [PATCH 266/351] conn object reuse case --- go.mod | 5 +++-- go.sum | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 757a2d9b59..a1c497e0d9 100644 --- a/go.mod +++ b/go.mod @@ -41,8 +41,9 @@ require ( ) //replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 -//replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220302232703-acf6fcd2a9de -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20221215130906-ffb6698e9c86 +replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20221219105703-796cb39b0512 + +//replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20221215130906-ffb6698e9c86 //replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df diff --git a/go.sum b/go.sum index 1189c4313a..77ebf554e1 100644 --- a/go.sum +++ b/go.sum @@ -604,8 +604,8 @@ github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LF github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df h1:vN66WfIFppi2IVEIp00wnmgBbvM6Jd6oT+WN5ChdUnQ= github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v1.15.9-0.20221215130906-ffb6698e9c86 h1:NPafoey84rhHdWhNIIAyCEfFVlyCN5OFqXgPVRbMtyE= -github.com/jirs5/libtrace-go v1.15.9-0.20221215130906-ffb6698e9c86/go.mod h1:2LOVRXQk1CJ43G+14WmamU7RtAq/sjw+L2dwFChkq3g= +github.com/jirs5/libtrace-go v1.15.9-0.20221219105703-796cb39b0512 h1:f2nQnoyzl433SeoDOBost44pMa/lB7vznZOnGGQM+fU= +github.com/jirs5/libtrace-go v1.15.9-0.20221219105703-796cb39b0512/go.mod h1:2LOVRXQk1CJ43G+14WmamU7RtAq/sjw+L2dwFChkq3g= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= From 07583943c3d8e3085a64bc7cd88e9ccaf8631619 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 21 Dec 2022 21:40:44 +0530 Subject: [PATCH 267/351] updating all files to use opsramp repos --- CHANGELOG.md | 4 +-- app/app.go | 17 +++++------- app/app_test.go | 20 +++++++------- cmd/test_redimem/main.go | 2 +- cmd/tracing-proxy/main.go | 36 +++++++++++++------------- collect/cache/cache.go | 6 ++--- collect/cache/cache_test.go | 6 ++--- collect/collect.go | 14 +++++----- collect/collect_benchmark_test.go | 14 +++++----- collect/collect_test.go | 14 +++++----- config/file_config.go | 2 +- go.mod | 19 +++++--------- go.sum | 20 +++++++------- internal/peer/file.go | 26 ++++++++++--------- internal/peer/file_test.go | 2 +- internal/peer/peers.go | 2 +- internal/peer/peers_test.go | 2 +- internal/peer/redis.go | 4 +-- logger/logrus.go | 4 +-- metrics/metrics.go | 2 +- metrics/opsramp.go | 43 ++++++++++++++----------------- metrics/opsramp_test.go | 4 +-- route/errors.go | 2 +- route/errors_test.go | 2 +- route/middleware.go | 2 +- route/otlp_trace.go | 12 +++------ route/otlp_trace_test.go | 10 +++---- route/route.go | 28 ++++++++++---------- route/route_test.go | 13 +++++----- sample/deterministic.go | 6 ++--- sample/deterministic_test.go | 6 ++--- sample/dynamic.go | 8 +++--- sample/dynamic_ema.go | 8 +++--- sample/dynamic_ema_test.go | 8 +++--- sample/dynamic_test.go | 8 +++--- sample/rules.go | 10 +++---- sample/rules_test.go | 8 +++--- sample/sample.go | 8 +++--- sample/sample_test.go | 6 ++--- sample/totalthroughput.go | 8 +++--- sample/totalthroughput_test.go | 8 +++--- sample/trace_key.go | 2 +- sample/trace_key_test.go | 2 +- service/debug/debug_service.go | 2 +- sharder/deterministic.go | 6 ++--- sharder/deterministic_test.go | 6 ++--- sharder/sharder.go | 2 +- sharder/single.go | 2 +- transmit/mock.go | 2 +- transmit/transmit.go | 23 ++++++++--------- transmit/transmit_test.go | 8 +++--- 51 files changed, 229 insertions(+), 250 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 844acf48f5..cee44c89b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,7 +44,7 @@ - bump libtrace-go to v1.15.6 - empower apply-labels action to apply labels (#344) -- Bump github.com/honeycombio/libhoney-go from 1.15.4 to 1.15.5 (#327) +- Bump github.com/opsramp/libtrace-go from 1.15.4 to 1.15.5 (#327) - Re-add missing docker login when publishing (#338) ## 1.5.2 2021-10-13 @@ -97,7 +97,7 @@ - Bump google.golang.org/grpc from 1.39.0 to 1.39.1 (#300) - Bump github.com/klauspost/compress from 1.13.2 to 1.13.3 (#301) -- Bump github.com/honeycombio/libhoney-go from 1.12.4 to 1.15.4 (#295) +- Bump github.com/opsramp/libtrace-go from 1.12.4 to 1.15.4 (#295) - Bump github.com/klauspost/compress from 1.10.3 to 1.13.2 (#297) ## 1.4.0 diff --git a/app/app.go b/app/app.go index b245fa3736..fb087bd5fa 100644 --- a/app/app.go +++ b/app/app.go @@ -1,16 +1,16 @@ package app import ( - "github.com/jirs5/tracing-proxy/collect" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/route" + "github.com/opsramp/tracing-proxy/collect" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/route" "net/http" ) - var OpsrampToken string + type App struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` @@ -27,12 +27,10 @@ type App struct { type OpsRampAuthTokenResponse struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` - ExpiresIn int64 `json:"expires_in"` + ExpiresIn int64 `json:"expires_in"` Scope string `json:"scope"` } - - // Start on the App obect should block until the proxy is shutting down. After // Start exits, Stop will be called on all dependencies then on App then the // program will exit. @@ -53,4 +51,3 @@ func (a *App) Stop() error { a.Logger.Debug().Logf("Shutting down App...") return nil } - diff --git a/app/app_test.go b/app/app_test.go index b9747a1e5d..4f8a8e2b9f 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -26,15 +26,15 @@ import ( "github.com/stretchr/testify/assert" "gopkg.in/alexcesaro/statsd.v2" - "github.com/honeycombio/libhoney-go/transmission" - "github.com/jirs5/tracing-proxy/collect" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/internal/peer" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/sample" - "github.com/jirs5/tracing-proxy/sharder" - "github.com/jirs5/tracing-proxy/transmit" + "github.com/opsramp/libtrace-go/transmission" + "github.com/opsramp/tracing-proxy/collect" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/internal/peer" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/sample" + "github.com/opsramp/tracing-proxy/sharder" + "github.com/opsramp/tracing-proxy/transmit" ) type countingWriterSender struct { @@ -114,7 +114,7 @@ func newStartedApp( GetListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort), GetPeerListenAddrVal: "127.0.0.1:" + strconv.Itoa(basePort+1), GetAPIKeysVal: []string{"KEY"}, - GetOpsrampAPIVal: "http://jirs5", + GetOpsrampAPIVal: "http://jirs5", GetInMemoryCollectorCacheCapacityVal: config.InMemoryCollectorCacheCapacity{CacheCapacity: 10000}, AddHostMetadataToTrace: enableHostMetadata, } diff --git a/cmd/test_redimem/main.go b/cmd/test_redimem/main.go index 744faa681c..a9732fce13 100644 --- a/cmd/test_redimem/main.go +++ b/cmd/test_redimem/main.go @@ -13,7 +13,7 @@ import ( "github.com/gomodule/redigo/redis" "github.com/sirupsen/logrus" - "github.com/jirs5/tracing-proxy/internal/redimem" + "github.com/opsramp/tracing-proxy/internal/redimem" ) func main() { diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index e9fbbbfb0b..26e24e22cd 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -11,19 +11,19 @@ import ( "github.com/facebookgo/inject" "github.com/facebookgo/startstop" - libtrace "github.com/honeycombio/libhoney-go" - "github.com/honeycombio/libhoney-go/transmission" + libtrace "github.com/opsramp/libtrace-go" + "github.com/opsramp/libtrace-go/transmission" flag "github.com/jessevdk/go-flags" - "github.com/jirs5/tracing-proxy/app" - "github.com/jirs5/tracing-proxy/collect" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/internal/peer" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/sample" - "github.com/jirs5/tracing-proxy/service/debug" - "github.com/jirs5/tracing-proxy/sharder" - "github.com/jirs5/tracing-proxy/transmit" + "github.com/opsramp/tracing-proxy/app" + "github.com/opsramp/tracing-proxy/collect" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/internal/peer" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/sample" + "github.com/opsramp/tracing-proxy/service/debug" + "github.com/opsramp/tracing-proxy/sharder" + "github.com/opsramp/tracing-proxy/transmit" ) // set by travis. @@ -148,9 +148,9 @@ func main() { Metrics: upstreamMetricsConfig, UseTls: c.GetGlobalUseTLS(), UseTlsInsecure: c.GetGlobalUseTLSInsecureSkip(), - OpsrampKey: opsrampkey, - OpsrampSecret: opsrampsecret, - ApiHost: opsrampapi, + OpsrampKey: opsrampkey, + OpsrampSecret: opsrampsecret, + ApiHost: opsrampapi, }, }) if err != nil { @@ -171,9 +171,9 @@ func main() { DisableCompression: !c.GetCompressPeerCommunication(), EnableMsgpackEncoding: false, Metrics: peerMetricsConfig, - OpsrampKey: opsrampkey, - OpsrampSecret: opsrampsecret, - ApiHost: opsrampapi, + OpsrampKey: opsrampkey, + OpsrampSecret: opsrampsecret, + ApiHost: opsrampapi, }, }) if err != nil { diff --git a/collect/cache/cache.go b/collect/cache/cache.go index 11928b33e6..ce7ff0930d 100644 --- a/collect/cache/cache.go +++ b/collect/cache/cache.go @@ -3,9 +3,9 @@ package cache import ( "time" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" ) // Cache is a non-threadsafe cache. It must not be used for concurrent access. diff --git a/collect/cache/cache_test.go b/collect/cache/cache_test.go index b9c87852c1..22525c693a 100644 --- a/collect/cache/cache_test.go +++ b/collect/cache/cache_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" ) // TestCacheSetGet sets a value then fetches it back diff --git a/collect/collect.go b/collect/collect.go index f83fcf361b..348f0243cd 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -10,13 +10,13 @@ import ( "time" lru "github.com/hashicorp/golang-lru" - "github.com/jirs5/tracing-proxy/collect/cache" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/sample" - "github.com/jirs5/tracing-proxy/transmit" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/collect/cache" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/sample" + "github.com/opsramp/tracing-proxy/transmit" + "github.com/opsramp/tracing-proxy/types" ) var ErrWouldBlock = errors.New("not adding span, channel buffer is full") diff --git a/collect/collect_benchmark_test.go b/collect/collect_benchmark_test.go index 2ab74f23bd..59b6c76f4a 100644 --- a/collect/collect_benchmark_test.go +++ b/collect/collect_benchmark_test.go @@ -12,13 +12,13 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/stretchr/testify/assert" - "github.com/jirs5/tracing-proxy/collect/cache" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/sample" - "github.com/jirs5/tracing-proxy/transmit" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/collect/cache" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/sample" + "github.com/opsramp/tracing-proxy/transmit" + "github.com/opsramp/tracing-proxy/types" ) func BenchmarkCollect(b *testing.B) { diff --git a/collect/collect_test.go b/collect/collect_test.go index 279a89c191..cd2608e653 100644 --- a/collect/collect_test.go +++ b/collect/collect_test.go @@ -13,13 +13,13 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/stretchr/testify/assert" - "github.com/jirs5/tracing-proxy/collect/cache" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/sample" - "github.com/jirs5/tracing-proxy/transmit" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/collect/cache" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/sample" + "github.com/opsramp/tracing-proxy/transmit" + "github.com/opsramp/tracing-proxy/types" ) // TestAddRootSpan tests that adding a root span winds up with a trace object in diff --git a/config/file_config.go b/config/file_config.go index 74aec3f077..3f5f70d292 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -10,7 +10,7 @@ import ( "github.com/fsnotify/fsnotify" "github.com/go-playground/validator" - libtrace "github.com/honeycombio/libhoney-go" + libtrace "github.com/opsramp/libtrace-go" "github.com/sirupsen/logrus" viper "github.com/spf13/viper" ) diff --git a/go.mod b/go.mod index a1c497e0d9..3f86af3cfe 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/jirs5/tracing-proxy +module github.com/opsramp/tracing-proxy go 1.16 @@ -17,12 +17,13 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/husky v0.9.0 - github.com/honeycombio/libhoney-go v1.15.8 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.13.6 + github.com/klauspost/compress v1.15.11 + github.com/kr/text v0.2.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect + github.com/opsramp/husky v0.0.0-20221221154514-e7327ac0e292 + github.com/opsramp/libtrace-go v0.0.0-20221221155402-84241b8f0556 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 @@ -32,18 +33,10 @@ require ( github.com/sirupsen/logrus v1.8.1 github.com/spf13/viper v1.10.1 github.com/stretchr/testify v1.7.0 - github.com/vmihailenco/msgpack/v4 v4.3.11 + github.com/vmihailenco/msgpack/v5 v5.3.5 go.opentelemetry.io/proto/otlp v0.9.0 google.golang.org/grpc v1.50.1 gopkg.in/alexcesaro/statsd.v2 v2.0.0 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) - -//replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v0.0.0-20220209113356-39ae92fc19f4 -replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20221219105703-796cb39b0512 - -//replace github.com/honeycombio/libhoney-go v1.15.8 => github.com/jirs5/libtrace-go v1.15.9-0.20221215130906-ffb6698e9c86 - -//replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220302161820-fe16f58d3996 -replace github.com/honeycombio/husky v0.9.0 => github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df diff --git a/go.sum b/go.sum index 77ebf554e1..0778861471 100644 --- a/go.sum +++ b/go.sum @@ -410,6 +410,7 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -602,10 +603,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df h1:vN66WfIFppi2IVEIp00wnmgBbvM6Jd6oT+WN5ChdUnQ= -github.com/jirs5/husky v0.9.1-0.20220616112458-7bb2625f28df/go.mod h1:2+Jrt6E/YFttamS1088JtvK9XalZJ8KcaAwaogPEouY= -github.com/jirs5/libtrace-go v1.15.9-0.20221219105703-796cb39b0512 h1:f2nQnoyzl433SeoDOBost44pMa/lB7vznZOnGGQM+fU= -github.com/jirs5/libtrace-go v1.15.9-0.20221219105703-796cb39b0512/go.mod h1:2LOVRXQk1CJ43G+14WmamU7RtAq/sjw+L2dwFChkq3g= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -619,8 +616,9 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -629,8 +627,9 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= @@ -667,6 +666,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/opsramp/husky v0.0.0-20221221154514-e7327ac0e292 h1:OWUVA83Ajd3x5txpa4CcO9HhgYJnQuMMZ/F2eKF3q2s= +github.com/opsramp/husky v0.0.0-20221221154514-e7327ac0e292/go.mod h1:7QWY3ju8mXdO3nWx2CqOTg4hwdTR9qhgcKelnhq7xiU= +github.com/opsramp/libtrace-go v0.0.0-20221221155402-84241b8f0556 h1:Or3Gll7Me87sFy4TxdBlKiH9Hwn4y4eFbCKNOl7HfjE= +github.com/opsramp/libtrace-go v0.0.0-20221221155402-84241b8f0556/go.mod h1:TgxzuWo01LQRLbJTEvDqzAWJ2tPi5zJhh5AmyQNsC/0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= @@ -740,12 +743,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= -github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1136,7 +1135,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= diff --git a/internal/peer/file.go b/internal/peer/file.go index c6ad23cfe4..1f070296cb 100644 --- a/internal/peer/file.go +++ b/internal/peer/file.go @@ -1,7 +1,7 @@ package peer import ( - "github.com/jirs5/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/config" "net" "sort" "strings" @@ -10,18 +10,20 @@ import ( ) type filePeers struct { - c config.Config - peers []string - callbacks []func() - peerLock sync.Mutex + c config.Config + peers []string + callbacks []func() + peerLock sync.Mutex } + var firstOccurancesOfGetPeers bool = false + // NewFilePeers returns a peers collection backed by the config file func newFilePeers(c config.Config) Peers { p := &filePeers{ - c: c, - peers: make([]string, 1), - callbacks: make([]func(), 0), + c: c, + peers: make([]string, 1), + callbacks: make([]func(), 0), } go p.watchFilePeers() @@ -42,9 +44,9 @@ func (p *filePeers) GetPeers() ([]string, error) { return retList, nil } -func (p *filePeers) watchFilePeers() { +func (p *filePeers) watchFilePeers() { tk := time.NewTicker(20 * time.Second) - originalPeerList, _:= p.c.GetPeers() + originalPeerList, _ := p.c.GetPeers() sort.Strings(originalPeerList) oldPeerList := originalPeerList for range tk.C { @@ -67,7 +69,7 @@ func (p *filePeers) RegisterUpdatedPeersCallback(callback func()) { p.callbacks = append(p.callbacks, callback) } -func getPeerMembers(originalPeerlist []string) []string { +func getPeerMembers(originalPeerlist []string) []string { var workingPeers []string wg := sync.WaitGroup{} for _, peer := range originalPeerlist { @@ -94,4 +96,4 @@ func isOpen(peer string) bool { return true } return false -} \ No newline at end of file +} diff --git a/internal/peer/file_test.go b/internal/peer/file_test.go index 6ae043f502..28bf8d1604 100644 --- a/internal/peer/file_test.go +++ b/internal/peer/file_test.go @@ -6,7 +6,7 @@ package peer import ( "testing" - "github.com/jirs5/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/config" ) func TestFilePeers(t *testing.T) { diff --git a/internal/peer/peers.go b/internal/peer/peers.go index 94ed4629de..3ed9a00b39 100644 --- a/internal/peer/peers.go +++ b/internal/peer/peers.go @@ -3,7 +3,7 @@ package peer import ( "errors" - "github.com/jirs5/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/config" ) // Peers holds the collection of peers for the cluster diff --git a/internal/peer/peers_test.go b/internal/peer/peers_test.go index d48fde80cc..85066ddb45 100644 --- a/internal/peer/peers_test.go +++ b/internal/peer/peers_test.go @@ -6,7 +6,7 @@ package peer import ( "testing" - "github.com/jirs5/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/config" "github.com/stretchr/testify/assert" ) diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 50ecac2aad..2c3b8e5aaa 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -13,8 +13,8 @@ import ( "time" "github.com/gomodule/redigo/redis" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/internal/redimem" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/internal/redimem" "github.com/sirupsen/logrus" ) diff --git a/logger/logrus.go b/logger/logrus.go index 688583fc69..173151b1a0 100644 --- a/logger/logrus.go +++ b/logger/logrus.go @@ -1,9 +1,9 @@ package logger import ( - "github.com/jirs5/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/config" "github.com/sirupsen/logrus" - lumberjack "gopkg.in/natefinch/lumberjack.v2" + "gopkg.in/natefinch/lumberjack.v2" "os" ) diff --git a/metrics/metrics.go b/metrics/metrics.go index 094ae0780a..477a0ad71f 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -1,7 +1,7 @@ package metrics import ( - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/types" ) type Metrics interface { diff --git a/metrics/opsramp.go b/metrics/opsramp.go index e883a11835..134be89282 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -19,15 +19,13 @@ import ( "github.com/gogo/protobuf/proto" "github.com/gorilla/mux" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promhttp" ) - - type OpsRampMetrics struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` @@ -105,22 +103,22 @@ func (p *OpsRampMetrics) Register(name string, metricType string) { if hostname, err := os.Hostname(); err == nil && hostname != "" { - hostMap["hostname"]=hostname + hostMap["hostname"] = hostname } switch metricType { case "counter": newmet = promauto.NewCounter(prometheus.CounterOpts{ - Name: name, - Namespace: p.prefix, - Help: name, + Name: name, + Namespace: p.prefix, + Help: name, ConstLabels: hostMap, }) case "gauge": newmet = promauto.NewGauge(prometheus.GaugeOpts{ - Name: name, - Namespace: p.prefix, - Help: name, + Name: name, + Namespace: p.prefix, + Help: name, ConstLabels: hostMap, }) case "histogram": @@ -130,7 +128,7 @@ func (p *OpsRampMetrics) Register(name string, metricType string) { Help: name, // This is an attempt at a usable set of buckets for a wide range of metrics // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous - Buckets: prometheus.ExponentialBuckets(1, 4, 16), + Buckets: prometheus.ExponentialBuckets(1, 4, 16), ConstLabels: hostMap, }) } @@ -153,32 +151,31 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s hostMap := make(map[string]string) if hostname, err := os.Hostname(); err == nil && hostname != "" { - hostMap["hostname"]=hostname + hostMap["hostname"] = hostname } - switch metricType { case "counter": newmet = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: name, - Namespace: p.prefix, - Help: desc, + Name: name, + Namespace: p.prefix, + Help: desc, ConstLabels: hostMap, }, labels) case "gauge": newmet = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Name: name, - Namespace: p.prefix, - Help: desc, + Name: name, + Namespace: p.prefix, + Help: desc, ConstLabels: hostMap, }, labels) case "histogram": newmet = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Name: name, - Namespace: p.prefix, - Help: desc, + Name: name, + Namespace: p.prefix, + Help: desc, ConstLabels: hostMap, // This is an attempt at a usable set of buckets for a wide range of metrics // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous diff --git a/metrics/opsramp_test.go b/metrics/opsramp_test.go index 2aa368db99..1f176cebe3 100644 --- a/metrics/opsramp_test.go +++ b/metrics/opsramp_test.go @@ -7,8 +7,8 @@ import ( "fmt" "testing" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" "github.com/stretchr/testify/assert" ) diff --git a/route/errors.go b/route/errors.go index 5a13c8c9d8..bd42f72c0e 100644 --- a/route/errors.go +++ b/route/errors.go @@ -5,7 +5,7 @@ import ( "net/http" "runtime/debug" - husky "github.com/honeycombio/husky/otlp" + husky "github.com/opsramp/husky/otlp" ) type handlerError struct { diff --git a/route/errors_test.go b/route/errors_test.go index 327a8662e4..26b8bfc310 100644 --- a/route/errors_test.go +++ b/route/errors_test.go @@ -9,7 +9,7 @@ import ( "net/http/httptest" "testing" - "github.com/jirs5/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/logger" ) func TestHandlerReturnWithError(t *testing.T) { diff --git a/route/middleware.go b/route/middleware.go index c0f13a0454..9aaac0a1df 100644 --- a/route/middleware.go +++ b/route/middleware.go @@ -9,7 +9,7 @@ import ( "time" "github.com/gorilla/mux" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/types" ) // for generating request IDs diff --git a/route/otlp_trace.go b/route/otlp_trace.go index e170d47464..4cadd42ee1 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -4,22 +4,18 @@ import ( "context" "encoding/json" "fmt" - proxypb "github.com/honeycombio/libhoney-go/proto/proxypb" + proxypb "github.com/opsramp/libtrace-go/proto/proxypb" "google.golang.org/grpc/metadata" "log" "net/http" "time" - huskyotlp "github.com/honeycombio/husky/otlp" - "github.com/jirs5/tracing-proxy/types" + huskyotlp "github.com/opsramp/husky/otlp" + "github.com/opsramp/tracing-proxy/types" collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" ) - - - - func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { ri := huskyotlp.GetRequestInfoFromHttpHeaders(req.Header) /*if err := ri.ValidateHeaders(); err != nil { @@ -64,7 +60,7 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac } if len(ri.Dataset) == 0 { - dataset, _ := router.Config.GetDataset() + dataset, _ := router.Config.GetDataset() ri.Dataset = dataset } diff --git a/route/otlp_trace_test.go b/route/otlp_trace_test.go index 05204bcbc4..9e591543c3 100644 --- a/route/otlp_trace_test.go +++ b/route/otlp_trace_test.go @@ -12,12 +12,12 @@ import ( "time" "github.com/golang/protobuf/proto" - huskyotlp "github.com/honeycombio/husky/otlp" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/transmit" "github.com/klauspost/compress/zstd" + huskyotlp "github.com/opsramp/husky/otlp" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/transmit" "github.com/stretchr/testify/assert" collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" common "go.opentelemetry.io/proto/otlp/common/v1" diff --git a/route/route.go b/route/route.go index d82be4b5b6..3e150b9830 100644 --- a/route/route.go +++ b/route/route.go @@ -7,7 +7,8 @@ import ( "encoding/json" "errors" "fmt" - proxypb "github.com/honeycombio/libhoney-go/proto/proxypb" + proxypb "github.com/opsramp/libtrace-go/proto/proxypb" + "github.com/vmihailenco/msgpack/v5" "io" "io/ioutil" "math" @@ -20,7 +21,6 @@ import ( "github.com/gorilla/mux" jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" - "github.com/vmihailenco/msgpack/v4" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -28,13 +28,13 @@ import ( // grpc/gzip compressor, auto registers on import _ "google.golang.org/grpc/encoding/gzip" - "github.com/jirs5/tracing-proxy/collect" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/sharder" - "github.com/jirs5/tracing-proxy/transmit" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/collect" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/sharder" + "github.com/opsramp/tracing-proxy/transmit" + "github.com/opsramp/tracing-proxy/types" collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" ) @@ -50,9 +50,6 @@ const ( defaultSampleRate = 1 ) - - - type Router struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` @@ -637,9 +634,10 @@ func makeDecoders(num int) (chan *zstd.Decoder, error) { func unmarshal(r *http.Request, data io.Reader, v interface{}) error { switch r.Header.Get("Content-Type") { case "application/x-msgpack", "application/msgpack": - return msgpack.NewDecoder(data). - UseDecodeInterfaceLoose(true). - Decode(v) + dec := msgpack.NewDecoder(data) + dec.UseLooseInterfaceDecoding(true) + + return dec.Decode(v) default: return jsoniter.NewDecoder(data).Decode(v) } diff --git a/route/route_test.go b/route/route_test.go index ebe7ff4e78..8e76295c2a 100644 --- a/route/route_test.go +++ b/route/route_test.go @@ -13,16 +13,15 @@ import ( "time" "github.com/facebookgo/inject" - "github.com/jirs5/tracing-proxy/collect" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/transmit" + "github.com/opsramp/tracing-proxy/collect" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/transmit" "github.com/gorilla/mux" - "github.com/jirs5/tracing-proxy/sharder" "github.com/klauspost/compress/zstd" - "github.com/vmihailenco/msgpack/v4" + "github.com/opsramp/tracing-proxy/sharder" "google.golang.org/grpc/metadata" ) diff --git a/sample/deterministic.go b/sample/deterministic.go index c1b8ab848e..216c9fc9ae 100644 --- a/sample/deterministic.go +++ b/sample/deterministic.go @@ -4,9 +4,9 @@ import ( "crypto/sha1" "math" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/types" ) // shardingSalt is a random bit to make sure we don't shard the same as any diff --git a/sample/deterministic_test.go b/sample/deterministic_test.go index 970ca451ca..ad37287edd 100644 --- a/sample/deterministic_test.go +++ b/sample/deterministic_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/types" ) // TestInitialization tests that sample rates are consistently returned diff --git a/sample/dynamic.go b/sample/dynamic.go index ca59685543..ef79d185d8 100644 --- a/sample/dynamic.go +++ b/sample/dynamic.go @@ -5,10 +5,10 @@ import ( dynsampler "github.com/honeycombio/dynsampler-go" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" ) type DynamicSampler struct { diff --git a/sample/dynamic_ema.go b/sample/dynamic_ema.go index 31a323b814..7793a61272 100644 --- a/sample/dynamic_ema.go +++ b/sample/dynamic_ema.go @@ -5,10 +5,10 @@ import ( dynsampler "github.com/honeycombio/dynsampler-go" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" ) type EMADynamicSampler struct { diff --git a/sample/dynamic_ema_test.go b/sample/dynamic_ema_test.go index b1a156e6e7..dcbec561cc 100644 --- a/sample/dynamic_ema_test.go +++ b/sample/dynamic_ema_test.go @@ -6,10 +6,10 @@ package sample import ( "testing" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/sample/dynamic_test.go b/sample/dynamic_test.go index 550e216071..65e7467ff1 100644 --- a/sample/dynamic_test.go +++ b/sample/dynamic_test.go @@ -6,10 +6,10 @@ package sample import ( "testing" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/sample/rules.go b/sample/rules.go index 7a6ca19ee4..89124c7580 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -4,10 +4,10 @@ import ( "math/rand" "strings" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" ) type RulesBasedSampler struct { @@ -73,7 +73,7 @@ func (s *RulesBasedSampler) GetSampleRate(trace *types.Trace) (rate uint, keep b var exists bool attributeMapKeys := []string{"spanAttributes", "resourceAttributes", "eventAttributes"} - + for _, attributeKey := range attributeMapKeys { if attribute, ok := span.Data[attributeKey]; ok && attribute != nil { value, exists = attribute.(map[string]interface{})[condition.Field] diff --git a/sample/rules_test.go b/sample/rules_test.go index 95b57a0e19..d67ebcd679 100644 --- a/sample/rules_test.go +++ b/sample/rules_test.go @@ -6,10 +6,10 @@ package sample import ( "testing" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/sample/sample.go b/sample/sample.go index ac05d7f31b..15c2b703d5 100644 --- a/sample/sample.go +++ b/sample/sample.go @@ -3,10 +3,10 @@ package sample import ( "os" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" ) type Sampler interface { diff --git a/sample/sample_test.go b/sample/sample_test.go index 70c8cf86a8..7ced3005ef 100644 --- a/sample/sample_test.go +++ b/sample/sample_test.go @@ -7,9 +7,9 @@ import ( "testing" "github.com/facebookgo/inject" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" ) func TestDependencyInjection(t *testing.T) { diff --git a/sample/totalthroughput.go b/sample/totalthroughput.go index 25629e2d75..e901b45676 100644 --- a/sample/totalthroughput.go +++ b/sample/totalthroughput.go @@ -5,10 +5,10 @@ import ( dynsampler "github.com/honeycombio/dynsampler-go" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" ) type TotalThroughputSampler struct { diff --git a/sample/totalthroughput_test.go b/sample/totalthroughput_test.go index d5567a4aa2..90f7c352af 100644 --- a/sample/totalthroughput_test.go +++ b/sample/totalthroughput_test.go @@ -6,10 +6,10 @@ package sample import ( "testing" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/sample/trace_key.go b/sample/trace_key.go index 45e5f206fa..bef5309f13 100644 --- a/sample/trace_key.go +++ b/sample/trace_key.go @@ -5,7 +5,7 @@ import ( "sort" "strconv" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/types" ) type traceKey struct { diff --git a/sample/trace_key_test.go b/sample/trace_key_test.go index 1448ecafb2..d5453184c9 100644 --- a/sample/trace_key_test.go +++ b/sample/trace_key_test.go @@ -6,7 +6,7 @@ package sample import ( "testing" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/types" "github.com/stretchr/testify/assert" ) diff --git a/service/debug/debug_service.go b/service/debug/debug_service.go index 3de36d48b0..c5d1cb1f57 100644 --- a/service/debug/debug_service.go +++ b/service/debug/debug_service.go @@ -14,7 +14,7 @@ import ( "sync" "syscall" - "github.com/jirs5/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/config" metrics "github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics/exp" "github.com/sirupsen/logrus" diff --git a/sharder/deterministic.go b/sharder/deterministic.go index 0c21d6b263..d4c802c029 100644 --- a/sharder/deterministic.go +++ b/sharder/deterministic.go @@ -10,9 +10,9 @@ import ( "sync" "time" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/internal/peer" - "github.com/jirs5/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/internal/peer" + "github.com/opsramp/tracing-proxy/logger" "github.com/pkg/errors" ) diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go index 88189abb9e..cda75f1f70 100644 --- a/sharder/deterministic_test.go +++ b/sharder/deterministic_test.go @@ -6,9 +6,9 @@ package sharder import ( "testing" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/internal/peer" - "github.com/jirs5/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/internal/peer" + "github.com/opsramp/tracing-proxy/logger" "github.com/stretchr/testify/assert" ) diff --git a/sharder/sharder.go b/sharder/sharder.go index 548476249d..1df37257bd 100644 --- a/sharder/sharder.go +++ b/sharder/sharder.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/jirs5/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/config" ) // Shard repreesents a single instance of tracing-proxy. diff --git a/sharder/single.go b/sharder/single.go index fd1900b8fc..8dd1072625 100644 --- a/sharder/single.go +++ b/sharder/single.go @@ -1,7 +1,7 @@ package sharder import ( - "github.com/jirs5/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/logger" ) // SingleShard implements the Shard interface diff --git a/transmit/mock.go b/transmit/mock.go index e001978ff1..5a3fe6bcc0 100644 --- a/transmit/mock.go +++ b/transmit/mock.go @@ -3,7 +3,7 @@ package transmit import ( "sync" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/types" ) type MockTransmission struct { diff --git a/transmit/transmit.go b/transmit/transmit.go index 0cd4405f6b..ed1a2f90ed 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -6,13 +6,13 @@ import ( "os" "sync" - libtrace "github.com/honeycombio/libhoney-go" - "github.com/honeycombio/libhoney-go/transmission" + libtrace "github.com/opsramp/libtrace-go" + "github.com/opsramp/libtrace-go/transmission" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" - "github.com/jirs5/tracing-proxy/types" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/types" ) type Transmission interface { @@ -79,11 +79,11 @@ func (d *DefaultTransmission) Start() error { go d.processResponses(processCtx, d.LibhClient.TxResponses()) //proxy support for traces - proto,_:= d.Config.GetProxyProtocol() - server,_ := d.Config.GetProxyServer() - port:= d.Config.GetProxyPort() - username,_ := d.Config.GetProxyUsername() - password,_ := d.Config.GetProxyPassword() + proto, _ := d.Config.GetProxyProtocol() + server, _ := d.Config.GetProxyServer() + port := d.Config.GetProxyPort() + username, _ := d.Config.GetProxyUsername() + password, _ := d.Config.GetProxyPassword() proxyUrl := "" if server != "" && proto != "" { @@ -96,7 +96,6 @@ func (d *DefaultTransmission) Start() error { os.Setenv("HTTP_PROXY", proxyUrl) } - // listen for config reloads d.Config.RegisterReloadCallback(d.reloadTransmissionBuilder) return nil diff --git a/transmit/transmit_test.go b/transmit/transmit_test.go index a69015500b..dd720e8b28 100644 --- a/transmit/transmit_test.go +++ b/transmit/transmit_test.go @@ -7,11 +7,11 @@ import ( "testing" "github.com/facebookgo/inject" - "github.com/jirs5/tracing-proxy/config" - "github.com/jirs5/tracing-proxy/logger" - "github.com/jirs5/tracing-proxy/metrics" + "github.com/opsramp/tracing-proxy/config" + "github.com/opsramp/tracing-proxy/logger" + "github.com/opsramp/tracing-proxy/metrics" - libtrace "github.com/honeycombio/libhoney-go" + libtrace "github.com/opsramp/libtrace-go" "github.com/stretchr/testify/assert" ) From 5214bb6e531512d1bf2464d451e6e567cd2135ac Mon Sep 17 00:00:00 2001 From: Kent Quirk Date: Wed, 21 Dec 2022 12:25:20 -0500 Subject: [PATCH 268/351] chore: update dependabot (#583) Make dependabot do the right thing with titles --- .github/dependabot.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 90c0fdb305..4752e727bf 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -13,3 +13,6 @@ updates: - "type: dependencies" reviewers: - "honeycombio/telemetry-team" + commit-message: + prefix: "maint" + include: "scope" From 4ab9d1dd184ea2d482152629c6740e165d8c3242 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 2 Jan 2023 09:52:44 +0530 Subject: [PATCH 269/351] helm chart for tracing-proxy --- build/opsramp-tracing-proxy/.helmignore | 23 ++ build/opsramp-tracing-proxy/Chart.yaml | 16 + .../templates/_helpers.tpl | 51 +++ .../templates/deployment.yaml | 52 +++ .../templates/k8s-config-cm.yaml | 353 ++++++++++++++++++ .../templates/k8s-rules-cm.yaml | 259 +++++++++++++ .../templates/service.yaml | 15 + build/opsramp-tracing-proxy/values.yaml | 55 +++ 8 files changed, 824 insertions(+) create mode 100644 build/opsramp-tracing-proxy/.helmignore create mode 100644 build/opsramp-tracing-proxy/Chart.yaml create mode 100644 build/opsramp-tracing-proxy/templates/_helpers.tpl create mode 100644 build/opsramp-tracing-proxy/templates/deployment.yaml create mode 100644 build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml create mode 100644 build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml create mode 100644 build/opsramp-tracing-proxy/templates/service.yaml create mode 100644 build/opsramp-tracing-proxy/values.yaml diff --git a/build/opsramp-tracing-proxy/.helmignore b/build/opsramp-tracing-proxy/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/build/opsramp-tracing-proxy/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/build/opsramp-tracing-proxy/Chart.yaml b/build/opsramp-tracing-proxy/Chart.yaml new file mode 100644 index 0000000000..5fd699bfc7 --- /dev/null +++ b/build/opsramp-tracing-proxy/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v2 +name: opsramp-tracing-proxy +description: A Helm chart for OpsRamp Tracing Proxy + +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.0.0" \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/_helpers.tpl b/build/opsramp-tracing-proxy/templates/_helpers.tpl new file mode 100644 index 0000000000..b4fa749a2c --- /dev/null +++ b/build/opsramp-tracing-proxy/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "opsramp-tracing-proxy.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "opsramp-tracing-proxy.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "opsramp-tracing-proxy.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "opsramp-tracing-proxy.labels" -}} +helm.sh/chart: {{ include "opsramp-tracing-proxy.chart" . }} +{{ include "opsramp-tracing-proxy.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "opsramp-tracing-proxy.selectorLabels" -}} +app.kubernetes.io/name: {{ include "opsramp-tracing-proxy.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/build/opsramp-tracing-proxy/templates/deployment.yaml b/build/opsramp-tracing-proxy/templates/deployment.yaml new file mode 100644 index 0000000000..45e66eeacf --- /dev/null +++ b/build/opsramp-tracing-proxy/templates/deployment.yaml @@ -0,0 +1,52 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "opsramp-tracing-proxy.fullname" . }} + labels: + {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: opsramp-tracing-rules + mountPath: /etc/tracing-proxy/rules.toml + subPath: rules.toml + readOnly: true + - name: opsramp-tracing-config + mountPath: /etc/tracing-proxy/config.toml + subPath: config.toml + readOnly: true + volumes: + - configMap: + name: opsramp-tracing-proxy-rules + name: opsramp-tracing-rules + - configMap: + name: opsramp-tracing-proxy-config + name: opsramp-tracing-config + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml new file mode 100644 index 0000000000..7e2f79a251 --- /dev/null +++ b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml @@ -0,0 +1,353 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: opsramp-tracing-proxy-config + labels: + name: opsramp-tracing-proxy-config +data: + config.toml: |- + ##################### + ## Refinery Config ## + ##################### + + # ListenAddr is the IP and port on which to listen for incoming events. Incoming + # traffic is expected to be HTTP, so if using SSL put something like nginx in + # front to do the decryption. + # Should be of the form 0.0.0.0:8080 + # Not eligible for live reload. + ListenAddr = "0.0.0.0:8080" + + # GRPCListenAddr is the IP and port on which to listen for incoming events over + # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put + # something like nginx in front to do the decryption. + # Should be of the form 0.0.0.0:9090 + # Not eligible for live reload. + GRPCListenAddr = "0.0.0.0:4317" + + # PeerListenAddr is the IP and port on which to listen for traffic being + # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL + # put something like nginx in front to do the decryption. Must be different from + # ListenAddr + # Should be of the form 0.0.0.0:8081 + # Not eligible for live reload. + PeerListenAddr = "0.0.0.0:8083" + GRPCPeerListenAddr = "0.0.0.0:8084" + + # ProxyProtocol accepts http and https + # Not Eligible for live reload. + ProxyProtocol = {{ .Values.config.proxy.protocol | default "" | quote }} + + # ProxyServer takes the proxy server address + # Not Eligible for live reload. + ProxyServer = {{ .Values.config.proxy.server | default "" | quote }} + + # ProxyPort takes the proxy server port + # Not Eligible for live reload. + ProxyPort = {{ .Values.config.proxy.port | default 3128 }} + + # ProxyUserName takes the proxy username + # Not Eligible for live reload. + ProxyUserName = {{ .Values.config.proxy.username | default "" | quote }} + + # ProxyPassword takes the proxy password + # Not Eligible for live reload. + ProxyPassword = {{ .Values.config.proxy.password | default "" | quote }} + + # CompressPeerCommunication determines whether refinery will compress span data + # it forwards to peers. If it costs money to transmit data between refinery + # instances (e.g. they're spread across AWS availability zones), then you + # almost certainly want compression enabled to reduce your bill. The option to + # disable it is provided as an escape hatch for deployments that value lower CPU + # utilization over data transfer costs. + CompressPeerCommunication = true + + # APIKeys is a list of Opsramp API keys that the proxy will accept. This list + # only applies to events - other Opsramp API actions will fall through to the + # upstream API directly. + # Adding keys here causes events arriving with API keys not in this list to be + # rejected with an HTTP 401 error If an API key that is a literal '*' is in the + # list, all API keys are accepted. + # Eligible for live reload. + APIKeys = [ + # "replace-me", + # "more-optional-keys", + "*", # wildcard accept all keys + ] + + # OpsrampAPI is the URL for the upstream Opsramp API. + # Eligible for live reload. + OpsrampAPI = {{ .Values.config.api | default "" | quote }} + + # OpsrampKey is used to get the OauthToken + OpsrampKey = {{ .Values.config.key | default "" | quote }} + + # OpsrampSecret is used to get the OauthToken + OpsrampSecret = {{ .Values.config.secret | default "" | quote }} + + # Traces are send to the client with given tenantid + TenantId = {{ .Values.config.tenantId | default "" | quote }} + + # Dataset you want to use for sampling + Dataset = {{ .Values.config.dataset | default "ds" | quote }} + + #Tls Options + UseTls = {{ .Values.config.useTls }} + UseTlsInsecure = {{ .Values.config.UseTlsInsecure }} + + # SendDelay is a short timer that will be triggered when a trace is complete. + # Refinery will wait this duration before actually sending the trace. The + # reason for this short delay is to allow for small network delays or clock + # jitters to elapse and any final spans to arrive before actually sending the + # trace. This supports duration strings with supplied units. Set to 0 for + # immediate sends. + # Eligible for live reload. + SendDelay = "2s" + + # TraceTimeout is a long timer; it represents the outside boundary of how long + # to wait before sending an incomplete trace. Normally traces are sent when the + # root span arrives. Sometimes the root span never arrives (due to crashes or + # whatever), and this timer will send a trace even without having received the + # root span. If you have particularly long-lived traces you should increase this + # timer. This supports duration strings with supplied units. + # Eligible for live reload. + TraceTimeout = "60s" + + # MaxBatchSize is the number of events to be included in the batch for sending + MaxBatchSize = 500 + + # SendTicker is a short timer; it determines the duration to use to check for traces to send + SendTicker = "100ms" + + # LoggingLevel is the level above which we should log. Debug is very verbose, + # and should only be used in pre-production environments. Info is the + # recommended level. Valid options are "debug", "info", "error", and + # "panic" + # Not eligible for live reload. + LoggingLevel = "debug" + + # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use + # when buffering events that will be forwarded to peers or the upstream API. + UpstreamBufferSize = 10000 + PeerBufferSize = 10000 + + # DebugServiceAddr sets the IP and port the debug service will run on + # The debug service will only run if the command line flag -d is specified + # The debug service runs on the first open port between localhost:6060 and :6069 by default + # DebugServiceAddr = "localhost:8085" + + # AddHostMetadataToTrace determines whether or not to add information about + # the host that Refinery is running on to the spans that it processes. + # If enabled, information about the host will be added to each span with the + # prefix `meta.refinery.`. + # Currently the only value added is 'meta.refinery.local_hostname'. + # Not eligible for live reload + AddHostMetadataToTrace = false + + # Metrics are sent to OpsRamp (The collection happens based on configuration specifie + # in OpsRampMetrics and only works when the Metrics is set to "prometheus") + SendMetricsToOpsRamp = true + + ############################ + ## Implementation Choices ## + ############################ + + # Each of the config options below chooses an implementation of a Refinery + # component to use. Depending on the choice there may be more configuration + # required below in the section for that choice. Changing implementation choices + # requires a process restart; these changes will not be picked up by a live + # config reload. (Individual config options for a given implementation may be + # eligible for live reload). + + # Collector describes which collector to use for collecting traces. The only + # current valid option is "InMemCollector".. More can be added by adding + # implementations of the Collector interface. + Collector = "InMemCollector" + + ######################### + ## Peer Management ## + ######################### + + [PeerManagement] + Type = "file" + # Peers is the list of all servers participating in this proxy cluster. Events + # will be sharded evenly across all peers based on the Trace ID. Values here + # should be the base URL used to access the peer, and should include scheme, + # hostname (or ip address) and port. All servers in the cluster should be in + # this list, including this host. + Peers = [ + "http://127.0.0.1:8084", #only grpc peer listener used + # "http://127.0.0.1:8083", + # "http://10.1.2.3.4:8080", + # "http://refinery-1231:8080", + # "http://peer-3.fqdn" // assumes port 80 + ] + + # [PeerManagement] + # Type = "redis" + # RedisHost is is used to connect to redis for peer cluster membership management. + # Further, if the environment variable 'REFINERY_REDIS_HOST' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + # RedisHost = "localhost:6379" + + # RedisPassword is the password used to connect to redis for peer cluster membership management. + # If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + # RedisPassword = "" + + # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + # Not eligible for live reload. + # UseTLS = false + + # UseTLSInsecure disables certificate checks + # Not eligible for live reload. + # UseTLSInsecure = false + + # IdentifierInterfaceName is optional. By default, when using RedisHost, Refinery will use + # the local hostname to identify itself to other peers in Redis. If your environment + # requires that you use IPs as identifiers (for example, if peers can't resolve eachother + # by name), you can specify the network interface that Refinery is listening on here. + # Refinery will use the first unicast address that it finds on the specified network + # interface as its identifier. + # Not eligible for live reload. + # IdentifierInterfaceName = "eth0" + + # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Refinery will default to the first + # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + # the first IPV6 unicast address found. + # UseIPV6Identifier = false + + # RedisIdentifier is optional. By default, when using RedisHost, Refinery will use + # the local hostname to id788714cd-a17a-4d7e-9bac-c35131f4bcc2entify itself to other peers in Redis. If your environment + # requires that you use IPs as identifiers (for example, if peers can't resolve eachother + # by name), you can specify the exact identifier (IP address, etc) to use here. + # Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. + # RedisIdentifier = "192.168.1.1" + + ######################### + ## In-Memory Collector ## + ######################### + + # InMemCollector brings together all the settings that are relevant to + # collecting spans together to make traces. + [InMemCollector] + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + # Eligible for live reload. Growing the cache capacity with a live config reload + # is fine. Avoid shrinking it with a live reload (you can, but it may cause + # temporary odd sampling decisions). + CacheCapacity = 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are + # supported. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. + MaxAlloc = 0 + + ################### + ## Logrus Logger ## + ################### + + # LogrusLogger is a section of the config only used if you are using the + # LogrusLogger to send all logs to STDOUT using the logrus package. If you are + # using a different logger (eg Opsramp logger) you can leave all this + # commented out. + [LogrusLogger] + + # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] + LogFormatter = {{ .Values.logging.log_format | default "json" | quote }} + + # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] + LogOutput = {{ .Values.logging.log_output | default "stdout" | quote }} + + ## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" + [LogrusLogger.File] + + # FileName specifies the location where the logs are supposed be stored + FileName = "/var/log/opsramp/tracing-proxy.log" + + # MaxSize is the maximum size in megabytes of the log file before it gets rotated. + MaxSize = 1 + + # MaxBackups is the maximum number of old log files to retain. + MaxBackups = 3 + + # Compress determines if the rotated log files should be compressed + # using gzip. + Compress = true + + + #####################@## + ## Prometheus Metrics ## + #####################@## + + [OpsRampMetrics] + # MetricsListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Refinery + # listener. + # Not eligible for live reload. + MetricsListenAddr = "localhost:2112" + + # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. + # Not Eligible for live reload. + OpsRampMetricsAPI = {{ .Values.metrics.api | default "" | quote }} + + # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. + # Not Eligible for live reload. + OpsRampTenantID = {{ .Values.metrics.tenantId | default "" | quote }} + + # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. + # This is separate from the APIKeys used to authenticate regular + # traffic. + # Not Eligible for live reload. + OpsRampMetricsAPIKey = {{ .Values.metrics.key | default "" | quote }} + + # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. + # This is separate from the APISecret used to authenticate regular + # traffic. + # Not Eligible for live reload. + OpsRampMetricsAPISecret = {{ .Values.metrics.secret | default "" | quote }} + + # OpsRampMetricsReportingInterval is frequency specified in seconds at which + # the metrics are collected and sent to OpsRamp + # Not Eligible for live reload. + OpsRampMetricsReportingInterval = 10 + + # OpsRampMetricsRetryCount is the number of times we retry incase the send fails + # Not Eligible for live reload. + OpsRampMetricsRetryCount = 2 + + # ProxyProtocol accepts http and https + # Not Eligible for live reload. + ProxyProtocol = {{ .Values.metrics.proxy.protocol | default "" | quote }} + + # ProxyServer takes the proxy server address + # Not Eligible for live reload. + ProxyServer = {{ .Values.metrics.proxy.server | default "" | quote }} + + # ProxyPort takes the proxy server port + # Not Eligible for live reload. + ProxyPort = {{ .Values.metrics.proxy.port | default 3128 }} + + # ProxyUserName takes the proxy username + # Not Eligible for live reload. + ProxyUserName = {{ .Values.metrics.proxy.username | default "" | quote }} + + # ProxyPassword takes the proxy password + # Not Eligible for live reload. + ProxyPassword = {{ .Values.metrics.proxy.password | default "" | quote }} + + # OpsRampMetricsList is a list of regular expressions which match the metric + # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. + # Internally all the regex in the list are concatinated using '|' to make the computation little faster. + # Not Eligible for live reload + OpsRampMetricsList = [{{ .Values.metrics.list | join "," }}] diff --git a/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml b/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml new file mode 100644 index 0000000000..926122599e --- /dev/null +++ b/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml @@ -0,0 +1,259 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: opsramp-tracing-proxy-rules + labels: + name: opsramp-tracing-proxy-rules +data: + rules.toml: |- + ############################ + ## Sampling Rules Config ## + ############################ + + # DryRun - If enabled, marks traces that would be dropped given current sampling rules, + # and sends all traces regardless + DryRun = true + + # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept + DryRunFieldName = "fromProxy" + + # DeterministicSampler is a section of the config for manipulating the + # Deterministic Sampler implementation. This is the simplest sampling algorithm + # - it is a static sample rate, choosing traces randomly to either keep or send + # (at the appropriate rate). It is not influenced by the contents of the trace. + Sampler = "DeterministicSampler" + + # SampleRate is the rate at which to sample. It indicates a ratio, where one + # sample trace is kept for every n traces seen. For example, a SampleRate of 30 + # will keep 1 out of every 30 traces. The choice on whether to keep any specific + # trace is random, so the rate is approximate. + # Eligible for live reload. + SampleRate = 1 + + [dataset1] + + # Note: If your dataset name contains a space, you will have to escape the dataset name + # using single quotes, such as ['dataset 1'] + + # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler + # implementation. This sampler collects the values of a number of fields from a + # trace and uses them to form a key. This key is handed to the standard dynamic + # sampler algorithm which generates a sample rate based on the frequency with + # which that key has appeared in the previous ClearFrequencySec seconds. See + # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics + # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from + # that package. + Sampler = "DynamicSampler" + + # SampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + # Eligible for live reload. + SampleRate = 2 + + # FieldList is a list of all the field names to use to form the key that will be + # handed to the dynamic sampler. The cardinality of the combination of values + # from all of these keys should be reasonable in the face of the frequency of + # those keys. If the combination of fields in these keys essentially makes them + # unique, the dynamic sampler will do no sampling. If the keys have too few + # values, you won't get samples of the most interesting traces. A good key + # selection will have consistent values for high frequency boring traffic and + # unique values for outliers and interesting traffic. Including an error field + # (or something like HTTP status code) is an excellent choice. As an example, + # assuming 30 or so endpoints, a combination of HTTP endpoint and status code + # would be a good set of keys in order to let you see accurately use of all + # endpoints and call out when there is failing traffic to any endpoint. Field + # names may come from any span in the trace. + # Eligible for live reload. + FieldList = ["request.method","response.status_code"] + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Opsramp, set this to true. + # Eligible for live reload. + UseTraceLength = true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace = true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # ClearFrequencySec is the name of the field the sampler will use to determine + # the period over which it will calculate the sample rate. This setting defaults + # to 30. + # Eligible for live reload. + ClearFrequencySec = 60 + + [dataset2] + + # EMADynamicSampler is a section of the config for manipulating the Exponential + # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, + # it attempts to average a given sample rate, weighting rare traffic and frequent + # traffic differently so as to end up with the correct average. + # + # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended + # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs + # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential + # Moving Average of counts seen per key, and adjusts this average at regular intervals. + # The weight applied to more recent intervals is defined by `weight`, a number between + # (0, 1) - larger values weight the average more toward recent observations. In other words, + # a larger weight will cause sample rates more quickly adapt to traffic patterns, + # while a smaller weight will result in sample rates that are less sensitive to bursts or drops + # in traffic and thus more consistent over time. + # + # Keys that are not found in the EMA will always have a sample + # rate of 1. Keys that occur more frequently will be sampled on a logarithmic + # curve. In other words, every key will be represented at least once in any + # given window and more frequent keys will have their sample rate + # increased proportionally to wind up with the goal sample rate. + Sampler = "EMADynamicSampler" + + # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + # Eligible for live reload. + GoalSampleRate = 2 + + # FieldList is a list of all the field names to use to form the key that will be + # handed to the dynamic sampler. The cardinality of the combination of values + # from all of these keys should be reasonable in the face of the frequency of + # those keys. If the combination of fields in these keys essentially makes them + # unique, the dynamic sampler will do no sampling. If the keys have too few + # values, you won't get samples of the most interesting traces. A good key + # selection will have consistent values for high frequency boring traffic and + # unique values for outliers and interesting traffic. Including an error field + # (or something like HTTP status code) is an excellent choice. As an example, + # assuming 30 or so endpoints, a combination of HTTP endpoint and status code + # would be a good set of keys in order to let you see accurately use of all + # endpoints and call out when there is failing traffic to any endpoint. Field + # names may come from any span in the trace. + # Eligible for live reload. + FieldList = ["request.method","response.status_code"] + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Opsramp, set this to true. + # Eligible for live reload. + UseTraceLength = true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace = true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # AdjustmentInterval defines how often (in seconds) we adjust the moving average from + # recent observations. Default 15s + # Eligible for live reload. + AdjustmentInterval = 15 + + # Weight is a value between (0, 1) indicating the weighting factor used to adjust + # the EMA. With larger values, newer data will influence the average more, and older + # values will be factored out more quickly. In mathematical literature concerning EMA, + # this is referred to as the `alpha` constant. + # Default is 0.5 + # Eligible for live reload. + Weight = 0.5 + + # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. + # Once MaxKeys is reached, new keys will not be included in the sample rate map, but + # existing keys will continue to be be counted. You can use this to keep the sample rate + # map size under control. + # Eligible for live reload + MaxKeys = 0 + + # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key + # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to + # decide what constitutes "zero". Keys with averages below this threshold will be removed + # from the EMA. Default is the same as Weight, as this prevents a key with the smallest + # integer value (1) from being aged out immediately. This value should generally be <= Weight, + # unless you have very specific reasons to set it higher. + # Eligible for live reload + AgeOutValue = 0.5 + + # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define + # the burst detection threshold. If total counts observed for a given interval exceed the threshold + # EMA is updated immediately, rather than waiting on the AdjustmentInterval. + # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, + # burst detection will kick in. + # Eligible for live reload + BurstMultiple = 2.0 + + # BurstDetectionDelay indicates the number of intervals to run after Start is called before + # burst detection kicks in. + # Defaults to 3 + # Eligible for live reload + BurstDetectionDelay = 3 + + [dataset3] + + Sampler = "DeterministicSampler" + SampleRate = 10 + + [dataset4] + + Sampler = "RulesBasedSampler" + + [[dataset4.rule]] + name = "drop healtchecks" + drop = true + [[dataset4.rule.condition]] + field = "http.route" + operator = "=" + value = "/health-check" + + [[dataset4.rule]] + name = "keep slow 500 errors" + SampleRate = 1 + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = 500 + [[dataset4.rule.condition]] + field = "duration_ms" + operator = ">=" + value = 1000.789 + + [[dataset4.rule]] + name = "dynamically sample 200 responses" + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = 200 + [dataset4.rule.sampler.EMADynamicSampler] + Sampler = "EMADynamicSampler" + GoalSampleRate = 15 + FieldList = ["request.method", "request.route"] + AddSampleRateKeyToTrace = true + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + [[dataset4.rule]] + SampleRate = 10 # default when no rules match, if missing defaults to 10 + + [dataset5] + + Sampler = "TotalThroughputSampler" + GoalThroughputPerSec = 100 + FieldList = "[request.method]" + diff --git a/build/opsramp-tracing-proxy/templates/service.yaml b/build/opsramp-tracing-proxy/templates/service.yaml new file mode 100644 index 0000000000..42aff90c16 --- /dev/null +++ b/build/opsramp-tracing-proxy/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opsramp-tracing-proxy.fullname" . }} + labels: + {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 4 }} diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml new file mode 100644 index 0000000000..f538a8569a --- /dev/null +++ b/build/opsramp-tracing-proxy/values.yaml @@ -0,0 +1,55 @@ +replicaCount: 1 +podAnnotations: {} +imagePullSecrets: [] + +image: + repository: lokeshopsramp/tracing-proxy + pullPolicy: Always # use "IfNotPresent" to avoid pulling the image every time + tag: "latest" # if empty then defaults to the chart appVersion. + +service: + type: ClusterIP + port: 9090 + +# resources: +# limits: +# cpu: "4" +# memory: "8096Mi" +# requests: +# cpu: "2" +# memory: "2048Mi" + +nodeSelector: {} + +# Trace Proxy config file values +config: + api: "https://int.opsramp.net/" + key: "" + secret: "" + tenantId: "" + dataset: "ds" + useTls: "true" + UseTlsInsecure: "false" + proxy: + protocol: "" + server: "" + port: "" + username: "" + password: "" + +logging: + log_format: "json" # Accepted values are one of ["logfmt", "json"] + log_output: "stdout" # Accepted values are one of["stdout", "stderr"] + +metrics: + api: "https://int.opsramp.net/" + key: "" + secret: "" + tenantId: "" + list: ['".*"'] # escape the " with ' while specifying values + proxy: + protocol: "" + server: "" + port: "" + username: "" + password: "" From f388a5b00470e8e911a768a01ef177377f791130 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 9 Jan 2023 09:24:11 +0530 Subject: [PATCH 270/351] adding rules.yaml cm in values for helm chart --- .../templates/k8s-rules-cm.yaml | 251 +-------------- build/opsramp-tracing-proxy/values.yaml | 293 +++++++++++++++++- 2 files changed, 293 insertions(+), 251 deletions(-) diff --git a/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml b/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml index 926122599e..a23631a123 100644 --- a/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml +++ b/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml @@ -6,254 +6,5 @@ metadata: name: opsramp-tracing-proxy-rules data: rules.toml: |- - ############################ - ## Sampling Rules Config ## - ############################ - - # DryRun - If enabled, marks traces that would be dropped given current sampling rules, - # and sends all traces regardless - DryRun = true - - # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept - DryRunFieldName = "fromProxy" - - # DeterministicSampler is a section of the config for manipulating the - # Deterministic Sampler implementation. This is the simplest sampling algorithm - # - it is a static sample rate, choosing traces randomly to either keep or send - # (at the appropriate rate). It is not influenced by the contents of the trace. - Sampler = "DeterministicSampler" - - # SampleRate is the rate at which to sample. It indicates a ratio, where one - # sample trace is kept for every n traces seen. For example, a SampleRate of 30 - # will keep 1 out of every 30 traces. The choice on whether to keep any specific - # trace is random, so the rate is approximate. - # Eligible for live reload. - SampleRate = 1 - - [dataset1] - - # Note: If your dataset name contains a space, you will have to escape the dataset name - # using single quotes, such as ['dataset 1'] - - # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler - # implementation. This sampler collects the values of a number of fields from a - # trace and uses them to form a key. This key is handed to the standard dynamic - # sampler algorithm which generates a sample rate based on the frequency with - # which that key has appeared in the previous ClearFrequencySec seconds. See - # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics - # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from - # that package. - Sampler = "DynamicSampler" - - # SampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. - SampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. - FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. - UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. - AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # ClearFrequencySec is the name of the field the sampler will use to determine - # the period over which it will calculate the sample rate. This setting defaults - # to 30. - # Eligible for live reload. - ClearFrequencySec = 60 - - [dataset2] - - # EMADynamicSampler is a section of the config for manipulating the Exponential - # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, - # it attempts to average a given sample rate, weighting rare traffic and frequent - # traffic differently so as to end up with the correct average. - # - # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended - # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs - # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential - # Moving Average of counts seen per key, and adjusts this average at regular intervals. - # The weight applied to more recent intervals is defined by `weight`, a number between - # (0, 1) - larger values weight the average more toward recent observations. In other words, - # a larger weight will cause sample rates more quickly adapt to traffic patterns, - # while a smaller weight will result in sample rates that are less sensitive to bursts or drops - # in traffic and thus more consistent over time. - # - # Keys that are not found in the EMA will always have a sample - # rate of 1. Keys that occur more frequently will be sampled on a logarithmic - # curve. In other words, every key will be represented at least once in any - # given window and more frequent keys will have their sample rate - # increased proportionally to wind up with the goal sample rate. - Sampler = "EMADynamicSampler" - - # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. - GoalSampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. - FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. - UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. - AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # AdjustmentInterval defines how often (in seconds) we adjust the moving average from - # recent observations. Default 15s - # Eligible for live reload. - AdjustmentInterval = 15 - - # Weight is a value between (0, 1) indicating the weighting factor used to adjust - # the EMA. With larger values, newer data will influence the average more, and older - # values will be factored out more quickly. In mathematical literature concerning EMA, - # this is referred to as the `alpha` constant. - # Default is 0.5 - # Eligible for live reload. - Weight = 0.5 - - # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. - # Once MaxKeys is reached, new keys will not be included in the sample rate map, but - # existing keys will continue to be be counted. You can use this to keep the sample rate - # map size under control. - # Eligible for live reload - MaxKeys = 0 - - # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key - # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to - # decide what constitutes "zero". Keys with averages below this threshold will be removed - # from the EMA. Default is the same as Weight, as this prevents a key with the smallest - # integer value (1) from being aged out immediately. This value should generally be <= Weight, - # unless you have very specific reasons to set it higher. - # Eligible for live reload - AgeOutValue = 0.5 - - # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define - # the burst detection threshold. If total counts observed for a given interval exceed the threshold - # EMA is updated immediately, rather than waiting on the AdjustmentInterval. - # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, - # burst detection will kick in. - # Eligible for live reload - BurstMultiple = 2.0 - - # BurstDetectionDelay indicates the number of intervals to run after Start is called before - # burst detection kicks in. - # Defaults to 3 - # Eligible for live reload - BurstDetectionDelay = 3 - - [dataset3] - - Sampler = "DeterministicSampler" - SampleRate = 10 - - [dataset4] - - Sampler = "RulesBasedSampler" - - [[dataset4.rule]] - name = "drop healtchecks" - drop = true - [[dataset4.rule.condition]] - field = "http.route" - operator = "=" - value = "/health-check" - - [[dataset4.rule]] - name = "keep slow 500 errors" - SampleRate = 1 - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 500 - [[dataset4.rule.condition]] - field = "duration_ms" - operator = ">=" - value = 1000.789 - - [[dataset4.rule]] - name = "dynamically sample 200 responses" - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 200 - [dataset4.rule.sampler.EMADynamicSampler] - Sampler = "EMADynamicSampler" - GoalSampleRate = 15 - FieldList = ["request.method", "request.route"] - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - [[dataset4.rule]] - SampleRate = 10 # default when no rules match, if missing defaults to 10 - - [dataset5] - - Sampler = "TotalThroughputSampler" - GoalThroughputPerSec = 100 - FieldList = "[request.method]" + {{ .Values.rules | nindent 4 }} diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index f538a8569a..4394fb6d41 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -3,7 +3,7 @@ podAnnotations: {} imagePullSecrets: [] image: - repository: lokeshopsramp/tracing-proxy + repository: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy pullPolicy: Always # use "IfNotPresent" to avoid pulling the image every time tag: "latest" # if empty then defaults to the chart appVersion. @@ -53,3 +53,294 @@ metrics: port: "" username: "" password: "" + +rules: |- + ############################ + ## Sampling Rules Config ## + ############################ + + # DryRun - If enabled, marks traces that would be dropped given current sampling rules, + # and sends all traces regardless + DryRun = true + + # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept + DryRunFieldName = "fromProxy" + + # DeterministicSampler is a section of the config for manipulating the + # Deterministic Sampler implementation. This is the simplest sampling algorithm + # - it is a static sample rate, choosing traces randomly to either keep or send + # (at the appropriate rate). It is not influenced by the contents of the trace. + Sampler = "DeterministicSampler" + + # SampleRate is the rate at which to sample. It indicates a ratio, where one + # sample trace is kept for every n traces seen. For example, a SampleRate of 30 + # will keep 1 out of every 30 traces. The choice on whether to keep any specific + # trace is random, so the rate is approximate. + # Eligible for live reload. + SampleRate = 1 + + [dataset1] + + # Note: If your dataset name contains a space, you will have to escape the dataset name + # using single quotes, such as ['dataset 1'] + + # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler + # implementation. This sampler collects the values of a number of fields from a + # trace and uses them to form a key. This key is handed to the standard dynamic + # sampler algorithm which generates a sample rate based on the frequency with + # which that key has appeared in the previous ClearFrequencySec seconds. See + # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics + # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from + # that package. + Sampler = "DynamicSampler" + + # SampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + # Eligible for live reload. + SampleRate = 2 + + # FieldList is a list of all the field names to use to form the key that will be + # handed to the dynamic sampler. The cardinality of the combination of values + # from all of these keys should be reasonable in the face of the frequency of + # those keys. If the combination of fields in these keys essentially makes them + # unique, the dynamic sampler will do no sampling. If the keys have too few + # values, you won't get samples of the most interesting traces. A good key + # selection will have consistent values for high frequency boring traffic and + # unique values for outliers and interesting traffic. Including an error field + # (or something like HTTP status code) is an excellent choice. As an example, + # assuming 30 or so endpoints, a combination of HTTP endpoint and status code + # would be a good set of keys in order to let you see accurately use of all + # endpoints and call out when there is failing traffic to any endpoint. Field + # names may come from any span in the trace. + # Eligible for live reload. + FieldList = ["request.method","response.status_code"] + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Opsramp, set this to true. + # Eligible for live reload. + UseTraceLength = true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace = true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # ClearFrequencySec is the name of the field the sampler will use to determine + # the period over which it will calculate the sample rate. This setting defaults + # to 30. + # Eligible for live reload. + ClearFrequencySec = 60 + + [dataset2] + + # EMADynamicSampler is a section of the config for manipulating the Exponential + # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, + # it attempts to average a given sample rate, weighting rare traffic and frequent + # traffic differently so as to end up with the correct average. + # + # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended + # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs + # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential + # Moving Average of counts seen per key, and adjusts this average at regular intervals. + # The weight applied to more recent intervals is defined by `weight`, a number between + # (0, 1) - larger values weight the average more toward recent observations. In other words, + # a larger weight will cause sample rates more quickly adapt to traffic patterns, + # while a smaller weight will result in sample rates that are less sensitive to bursts or drops + # in traffic and thus more consistent over time. + # + # Keys that are not found in the EMA will always have a sample + # rate of 1. Keys that occur more frequently will be sampled on a logarithmic + # curve. In other words, every key will be represented at least once in any + # given window and more frequent keys will have their sample rate + # increased proportionally to wind up with the goal sample rate. + Sampler = "EMADynamicSampler" + + # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + # Eligible for live reload. + GoalSampleRate = 2 + + # FieldList is a list of all the field names to use to form the key that will be + # handed to the dynamic sampler. The cardinality of the combination of values + # from all of these keys should be reasonable in the face of the frequency of + # those keys. If the combination of fields in these keys essentially makes them + # unique, the dynamic sampler will do no sampling. If the keys have too few + # values, you won't get samples of the most interesting traces. A good key + # selection will have consistent values for high frequency boring traffic and + # unique values for outliers and interesting traffic. Including an error field + # (or something like HTTP status code) is an excellent choice. As an example, + # assuming 30 or so endpoints, a combination of HTTP endpoint and status code + # would be a good set of keys in order to let you see accurately use of all + # endpoints and call out when there is failing traffic to any endpoint. Field + # names may come from any span in the trace. + # Eligible for live reload. + FieldList = ["request.method","response.status_code"] + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Opsramp, set this to true. + # Eligible for live reload. + UseTraceLength = true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace = true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # AdjustmentInterval defines how often (in seconds) we adjust the moving average from + # recent observations. Default 15s + # Eligible for live reload. + AdjustmentInterval = 15 + + # Weight is a value between (0, 1) indicating the weighting factor used to adjust + # the EMA. With larger values, newer data will influence the average more, and older + # values will be factored out more quickly. In mathematical literature concerning EMA, + # this is referred to as the `alpha` constant. + # Default is 0.5 + # Eligible for live reload. + Weight = 0.5 + + # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. + # Once MaxKeys is reached, new keys will not be included in the sample rate map, but + # existing keys will continue to be be counted. You can use this to keep the sample rate + # map size under control. + # Eligible for live reload + MaxKeys = 0 + + # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key + # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to + # decide what constitutes "zero". Keys with averages below this threshold will be removed + # from the EMA. Default is the same as Weight, as this prevents a key with the smallest + # integer value (1) from being aged out immediately. This value should generally be <= Weight, + # unless you have very specific reasons to set it higher. + # Eligible for live reload + AgeOutValue = 0.5 + + # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define + # the burst detection threshold. If total counts observed for a given interval exceed the threshold + # EMA is updated immediately, rather than waiting on the AdjustmentInterval. + # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, + # burst detection will kick in. + # Eligible for live reload + BurstMultiple = 2.0 + + # BurstDetectionDelay indicates the number of intervals to run after Start is called before + # burst detection kicks in. + # Defaults to 3 + # Eligible for live reload + BurstDetectionDelay = 3 + + [dataset3] + + Sampler = "DeterministicSampler" + SampleRate = 10 + + [dataset4] + + Sampler = "RulesBasedSampler" + # Optional, if set to true then the rules will also check nested json fields, in the format of parent.child + CheckNestedFields = false + + [[dataset4.rule]] + name = "drop healthchecks" + drop = true + [[dataset4.rule.condition]] + field = "http.route" + operator = "=" + value = "/health-check" + + [[dataset4.rule]] + name = "keep slow 500 errors" + SampleRate = 1 + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = 500 + [[dataset4.rule.condition]] + field = "duration_ms" + operator = ">=" + value = 1000.789 + + [[dataset4.rule]] + name = "dynamically sample 200 responses" + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = 200 + [dataset4.rule.sampler.EMADynamicSampler] + Sampler = "EMADynamicSampler" + GoalSampleRate = 15 + FieldList = ["request.method", "request.route"] + AddSampleRateKeyToTrace = true + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # Note that Refinery comparisons are type-dependent. If you are operating in an environment where different + # telemetry may send the same field with different types (for example, some systems send status codes as "200" + # instead of 200), you may need to create additional rules to cover these cases. + [[dataset4.rule]] + name = "dynamically sample 200 string responses" + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = "200" + [dataset4.rule.sampler.EMADynamicSampler] + Sampler = "EMADynamicSampler" + GoalSampleRate = 15 + FieldList = ["request.method", "request.route"] + AddSampleRateKeyToTrace = true + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + [[dataset4.rule]] + name = "sample traces originating from a service" + # if scope is set to "span", a single span in the trace must match + # *all* of the conditions associated with this rule for the rule to + # apply to the trace. + # + # this is especially helpful when sampling a dataset written to + # by multiple services that call one another in normal operation - + # you can set Scope to 'span' to attribute traces to an origin + # service in a way that would be difficult without it. + Scope = "span" + SampleRate = 5 + [[dataset4.rule.condition]] + field = "service name" + operator = "=" + value = "users" + [[dataset4.rule.condition]] + field = "meta.span_type" + operator = "=" + value = "root" + + [[dataset4.rule]] + SampleRate = 10 # default when no rules match, if missing defaults to 10 + + [dataset5] + + Sampler = "TotalThroughputSampler" + GoalThroughputPerSec = 100 + FieldList = "[request.method]" From 60bb49b2f14146cda203914bdfc6dcd8266d4b09 Mon Sep 17 00:00:00 2001 From: "saikalyan.bhagavathula" Date: Mon, 9 Jan 2023 12:18:28 +0530 Subject: [PATCH 271/351] tracing-proxy rpm and debian packaging --- build/tracing-deb/configure.go | 57 ++++++++++++++++++ build/tracing-deb/script.sh | 24 ++++++++ build/tracing-deb/tracing/DEBIAN/conffiles | 2 + build/tracing-deb/tracing/DEBIAN/control | 7 +++ build/tracing-deb/tracing/DEBIAN/postinst | 2 + build/tracing-deb/tracing/DEBIAN/prerm | 9 +++ .../etc/systemd/system/tracing-proxy.service | 13 ++++ build/tracing-rpm/configure.go | 58 ++++++++++++++++++ .../etc/systemd/system/tracing-proxy.service | 13 ++++ build/tracing-rpm/script.sh | 31 ++++++++++ build/tracing-rpm/tracing-proxy.spec | 60 +++++++++++++++++++ 11 files changed, 276 insertions(+) create mode 100644 build/tracing-deb/configure.go create mode 100644 build/tracing-deb/script.sh create mode 100644 build/tracing-deb/tracing/DEBIAN/conffiles create mode 100644 build/tracing-deb/tracing/DEBIAN/control create mode 100755 build/tracing-deb/tracing/DEBIAN/postinst create mode 100755 build/tracing-deb/tracing/DEBIAN/prerm create mode 100644 build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service create mode 100644 build/tracing-rpm/configure.go create mode 100644 build/tracing-rpm/etc/systemd/system/tracing-proxy.service create mode 100644 build/tracing-rpm/script.sh create mode 100644 build/tracing-rpm/tracing-proxy.spec diff --git a/build/tracing-deb/configure.go b/build/tracing-deb/configure.go new file mode 100644 index 0000000000..f8663aabc7 --- /dev/null +++ b/build/tracing-deb/configure.go @@ -0,0 +1,57 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" +) + +func main() { + var configFile, updatedConfigFile []byte + var err error + configFile, err = ioutil.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml") + + api := flag.String("A", "", "API To Send Data") + key := flag.String("K", "", "Opsramp Key") + secret := flag.String("S", "", "Opsramp Secret") + tenant := flag.String("T", "", "Opsramp TenantID") + flag.Parse() + + opsrampApiHost := "OpsrampAPI = \"" + *api + "\"" + opsrampMetricsApiHost := "OpsRampMetricsAPI = \"" + *api + "\"" + + updatedConfigFile = bytes.Replace(configFile, []byte("OpsrampAPI = "), []byte(opsrampApiHost), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPI = "), []byte(opsrampMetricsApiHost), 1) + + opsrampKey := "OpsrampKey = \"" + *key + "\"" + opsrampMetricsApiKey := "OpsRampMetricsAPIKey = \"" + *key + "\"" + + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampKey = "), []byte(opsrampKey), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPIKey = "), []byte(opsrampMetricsApiKey), 1) + + OpsrampSecret := "OpsrampSecret = \"" + *secret + "\"" + OpsRampMetricsAPISecret := "OpsRampMetricsAPISecret = \"" + *secret + "\"" + + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampSecret = "), []byte(OpsrampSecret), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPISecret = "), []byte(OpsRampMetricsAPISecret), 1) + + opsrampTenantID := "OpsRampTenantID = \"" + *tenant + "\"" + TenantId := "TenantId = \"" + *tenant + "\"" + + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampTenantID = "), []byte(opsrampTenantID), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("TenantId = "), []byte(TenantId), 1) + + if err = ioutil.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml", updatedConfigFile, 0666); err != nil { + fmt.Println(err) + os.Exit(1) + } + + if _, err := exec.Command("systemctl", "start", "tracing-proxy").Output(); err != nil { + fmt.Println(err) + os.Exit(1) + } + fmt.Println("Tracing-Proxy Started Successfully") +} diff --git a/build/tracing-deb/script.sh b/build/tracing-deb/script.sh new file mode 100644 index 0000000000..5645b05e0d --- /dev/null +++ b/build/tracing-deb/script.sh @@ -0,0 +1,24 @@ +# $1 is a version of the package +Version=$1 +sed -i "/^Version/s/:.*$/: ${Version}/g" tracing/DEBIAN/control + +architecture=$(uname -m) +if [ "$architecture" = "x86_64" ]; then + architecture='amd64' +fi + + +sed -i "/^Architecture/s/:.*$/: ${architecture}/g" tracing/DEBIAN/control + +# Updating the files +cp ../../config_complete.toml tracing/opt/opsramp/tracing-proxy/conf/config_complete.toml +cp ../../rules_complete.toml tracing/opt/opsramp/tracing-proxy/conf/rules_complete.toml +go build ../cmd/tracing-proxy/main.go +cp ../../cmd/tracing-proxy/main tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy + +dpkg -b tracing + + +# Rename the package with version and architecture +packageName="tracing-proxy_"$architecture"-"$Version".deb" +mv tracing.deb $packageName diff --git a/build/tracing-deb/tracing/DEBIAN/conffiles b/build/tracing-deb/tracing/DEBIAN/conffiles new file mode 100644 index 0000000000..aee0592e98 --- /dev/null +++ b/build/tracing-deb/tracing/DEBIAN/conffiles @@ -0,0 +1,2 @@ +/opt/opsramp/tracing-proxy/conf/config_complete.toml +/opt/opsramp/tracing-proxy/conf/rules_complete.toml diff --git a/build/tracing-deb/tracing/DEBIAN/control b/build/tracing-deb/tracing/DEBIAN/control new file mode 100644 index 0000000000..5180c14cc4 --- /dev/null +++ b/build/tracing-deb/tracing/DEBIAN/control @@ -0,0 +1,7 @@ +Package: tracing-proxy +Version: 5.0.0 +Architecture: amd64 +Essential: no +Priority: optional +Maintainer: sai kalyan +Description: This is tracing proxy debian package diff --git a/build/tracing-deb/tracing/DEBIAN/postinst b/build/tracing-deb/tracing/DEBIAN/postinst new file mode 100755 index 0000000000..59770d8662 --- /dev/null +++ b/build/tracing-deb/tracing/DEBIAN/postinst @@ -0,0 +1,2 @@ +mkdir -p /var/log/opsramp +touch /var/log/opsramp/tracing-proxy.log diff --git a/build/tracing-deb/tracing/DEBIAN/prerm b/build/tracing-deb/tracing/DEBIAN/prerm new file mode 100755 index 0000000000..13e56fab3b --- /dev/null +++ b/build/tracing-deb/tracing/DEBIAN/prerm @@ -0,0 +1,9 @@ +echo "Uninstalling Tracing Proxy" +systemctl stop tracing-proxy +if [ -f /etc/systemd/system/tracing-proxy.service ]; then + rm -rf /etc/systemd/system/tracing-proxy.service > /dev/null 2>&1 +fi +rm -rf /opt/opsramp/tracing-proxy +systemctl daemon-reload +systemctl reset-failed tracing-proxy.service +echo "Uninstalled Tracing Proxy Successfully" diff --git a/build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service b/build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service new file mode 100644 index 0000000000..c233840ab3 --- /dev/null +++ b/build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=tracing-proxy OpsRamp Trace-Aware Sampling Proxy +After=network.target + +[Service] +ExecStart=/opt/opsramp/tracing-proxy/bin/tracing-proxy -c /opt/opsramp/tracing-proxy/conf/config_complete.toml -r /opt/opsramp/tracing-proxy/conf/rules_complete.toml +KillMode=process +Restart=on-failure +LimitNOFILE=infinity + +[Install] +Alias=tracing-proxy tracing-proxy.service + diff --git a/build/tracing-rpm/configure.go b/build/tracing-rpm/configure.go new file mode 100644 index 0000000000..521a55474f --- /dev/null +++ b/build/tracing-rpm/configure.go @@ -0,0 +1,58 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" +) + +func main() { + var configFile, updatedConfigFile []byte + var err error + configFile, err = ioutil.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml") + + api := flag.String("A", "", "API To Send Data") + key := flag.String("K", "", "Opsramp Key") + secret := flag.String("S", "", "Opsramp Secret") + tenant := flag.String("T", "", "Opsramp TenantID") + flag.Parse() + + opsrampApiHost := "OpsrampAPI = \"" + *api + "\"" + opsrampMetricsApiHost := "OpsRampMetricsAPI = \"" + *api + "\"" + + updatedConfigFile = bytes.Replace(configFile, []byte("OpsrampAPI = "), []byte(opsrampApiHost), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPI = "), []byte(opsrampMetricsApiHost), 1) + + opsrampKey := "OpsrampKey = \"" + *key + "\"" + opsrampMetricsApiKey := "OpsRampMetricsAPIKey = \"" + *key + "\"" + + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampKey = "), []byte(opsrampKey), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPIKey = "), []byte(opsrampMetricsApiKey), 1) + + OpsrampSecret := "OpsrampSecret = \"" + *secret + "\"" + OpsRampMetricsAPISecret := "OpsRampMetricsAPISecret = \"" + *secret + "\"" + + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampSecret = "), []byte(OpsrampSecret), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPISecret = "), []byte(OpsRampMetricsAPISecret), 1) + + opsrampTenantID := "OpsRampTenantID = \"" + *tenant + "\"" + TenantId := "TenantId = \"" + *tenant + "\"" + + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampTenantID = "), []byte(opsrampTenantID), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("TenantId = "), []byte(TenantId), 1) + + if err = ioutil.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml", updatedConfigFile, 0666); err != nil { + fmt.Println(err) + os.Exit(1) + } + + if _, err := exec.Command("systemctl", "start", "tracing-proxy").Output(); err != nil { + fmt.Println(err) + os.Exit(1) + } + fmt.Println("Tracing-Proxy Started Successfully") +} + diff --git a/build/tracing-rpm/etc/systemd/system/tracing-proxy.service b/build/tracing-rpm/etc/systemd/system/tracing-proxy.service new file mode 100644 index 0000000000..c233840ab3 --- /dev/null +++ b/build/tracing-rpm/etc/systemd/system/tracing-proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=tracing-proxy OpsRamp Trace-Aware Sampling Proxy +After=network.target + +[Service] +ExecStart=/opt/opsramp/tracing-proxy/bin/tracing-proxy -c /opt/opsramp/tracing-proxy/conf/config_complete.toml -r /opt/opsramp/tracing-proxy/conf/rules_complete.toml +KillMode=process +Restart=on-failure +LimitNOFILE=infinity + +[Install] +Alias=tracing-proxy tracing-proxy.service + diff --git a/build/tracing-rpm/script.sh b/build/tracing-rpm/script.sh new file mode 100644 index 0000000000..cac0ec9b12 --- /dev/null +++ b/build/tracing-rpm/script.sh @@ -0,0 +1,31 @@ +yum -y install rpmdevtools +rpmdev-setuptree + +# $2 is a release of the package +Release=$2 +sed -i "/^\%define release/s/^.*$/\%define release ${Release}/g" tracing-proxy.spec +# $1 is a version of the package +Version=$1 +sed -i "/^\%define version/s/^.*$/\%define version ${Version}/g" tracing-proxy.spec + +# Updating the files +cp ../../config_complete.toml opt/opsramp/tracing-proxy/conf/config_complete.toml +cp ../../rules_complete.toml opt/opsramp/tracing-proxy/conf/rules_complete.toml +go build ../../cmd/tracing-proxy/main.go +cp ../../cmd/tracing-proxy/main opt/opsramp/tracing-proxy/bin/tracing-proxy + + +mkdir tracing-proxy-$1 +cp -r opt tracing-proxy-$1 +cp -r etc tracing-proxy-$1 +tar -czvf tracing-proxy-$1.tar.gz tracing-proxy-$1 + + +cp tracing-proxy-$1.tar.gz /root/rpmbuild/SOURCES/ +cp tracing-proxy.spec /root/rpmbuild/SPECS/tracing-proxy.spec + + +rpmbuild -ba --clean /root/rpmbuild/SPECS/tracing-proxy.spec + + +echo "***** rpm package can be found in /root/rpmbuild/RPMS/x86_64/ ****" diff --git a/build/tracing-rpm/tracing-proxy.spec b/build/tracing-rpm/tracing-proxy.spec new file mode 100644 index 0000000000..1d7f6cf50f --- /dev/null +++ b/build/tracing-rpm/tracing-proxy.spec @@ -0,0 +1,60 @@ +# SPEC file for creating tracing-proxy RPM + +%define name tracing-proxy +%define release 1.0 +%define version 2.0.1 + +Summary: Tracing Proxy +License: OpsRamp +Name: %{name} +Version: %{version} +Source0: %{name}-%{version}.tar.gz +Release: %{release} +Provides: tracing-proxy +BuildRequires: bash + +%description +Tracing Proxy + +%prep +%setup -q -n %{name}-%{version} + +%install +%__rm -rf %{buildroot} +install -p -d -m 0755 %{buildroot}/opt/opsramp/tracing-proxy/bin +install -p -d -m 0755 %{buildroot}/opt/opsramp/tracing-proxy/conf +install -p -d -m 0755 %{buildroot}/etc/systemd/system +install -m 0775 opt/opsramp/tracing-proxy/bin/tracing-proxy %{buildroot}/opt/opsramp/tracing-proxy/bin/ +install -m 0775 opt/opsramp/tracing-proxy/bin/configure %{buildroot}/opt/opsramp/tracing-proxy/bin +install -m 0644 opt/opsramp/tracing-proxy/conf/config_complete.toml %{buildroot}/opt/opsramp/tracing-proxy/conf/ +install -m 0644 opt/opsramp/tracing-proxy/conf/rules_complete.toml %{buildroot}/opt/opsramp/tracing-proxy/conf/ +install -m 0644 etc/systemd/system/tracing-proxy.service %{buildroot}/etc/systemd/system + +%clean +%__rm -rf %{buildroot} + +%files +/opt/opsramp/tracing-proxy/bin/ +/opt/opsramp/tracing-proxy/conf/ +/etc/systemd/system/tracing-proxy.service + + +%post -p /bin/bash +mkdir -p /var/log/opsramp +touch /var/log/opsramp/tracing-proxy.log +systemctl start tracing-proxy + + +%preun -p /bin/bash +echo "Uninstalling Tracing Proxy" +systemctl stop tracing-proxy +#if [ -f /etc/systemd/system/tracing-proxy.service ]; then +# rm -rf /etc/systemd/system/tracing-proxy.service > /dev/null 2>&1 +#fi +#rm -rf /opt/opsramp/tracing-proxy +#systemctl daemon-reload +#systemctl reset-failed tracing-proxy.service + +%postun -p /bin/bash +rm -d /opt/opsramp/tracing-proxy +echo "Uninstalled Tracing Proxy Successfully" From b0dfad243292bdf3a0155e5784c1e6f570a06e9c Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Thu, 12 Jan 2023 15:32:02 +0530 Subject: [PATCH 272/351] use same config for metrics --- Dockerfile | 2 +- cmd/tracing-proxy/main.go | 8 ++++---- config/file_config.go | 26 ++++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index eebb3e37a9..f49116842c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,7 +20,7 @@ RUN CGO_ENABLED=0 \ -o tracing-proxy \ ./cmd/tracing-proxy -FROM alpine:latest +FROM alpine:3.17 RUN apk update && apk add --no-cache bash ca-certificates && update-ca-certificates diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 52033680fe..863d312ee3 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -116,18 +116,18 @@ func main() { // upstreamTransport is the http transport used to send things on to Honeycomb upstreamTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ + DialContext: (&net.Dialer{ Timeout: 10 * time.Second, - }).Dial, + }).DialContext, TLSHandshakeTimeout: 15 * time.Second, } // peerTransport is the http transport used to send things to a local peer peerTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ + DialContext: (&net.Dialer{ Timeout: 3 * time.Second, - }).Dial, + }).DialContext, TLSHandshakeTimeout: 1200 * time.Millisecond, } diff --git a/config/file_config.go b/config/file_config.go index 18a20c5dd1..c024d79e7d 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -764,6 +764,32 @@ func (f *fileConfig) GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) { opsRampMetricsConfig.OpsRampMetricsList = []string{".*"} } + // setting values from main configurations when OpsRampMetrics is empty + if opsRampMetricsConfig.OpsRampMetricsAPIKey == "" { + opsRampMetricsConfig.OpsRampMetricsAPIKey = f.conf.OpsrampAPI + } + if opsRampMetricsConfig.OpsRampMetricsAPISecret == "" { + opsRampMetricsConfig.OpsRampMetricsAPISecret = f.conf.OpsrampSecret + } + if opsRampMetricsConfig.OpsRampTenantID == "" { + opsRampMetricsConfig.OpsRampTenantID = f.conf.TenantId + } + if opsRampMetricsConfig.ProxyServer == "" { + opsRampMetricsConfig.ProxyServer = f.conf.ProxyServer + } + if opsRampMetricsConfig.ProxyPort <= 0 { + opsRampMetricsConfig.ProxyPort = f.conf.ProxyPort + } + if opsRampMetricsConfig.ProxyProtocol != "" { + opsRampMetricsConfig.ProxyProtocol = f.conf.ProxyProtocol + } + if opsRampMetricsConfig.ProxyUserName != "" { + opsRampMetricsConfig.ProxyUserName = f.conf.ProxyUsername + } + if opsRampMetricsConfig.ProxyPassword != "" { + opsRampMetricsConfig.ProxyPassword = f.conf.ProxyPassword + } + v := validator.New() err = v.Struct(opsRampMetricsConfig) if err != nil { From b9497bcf655a05fe622693f470e465c90c7296e7 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Fri, 13 Jan 2023 12:28:27 +0530 Subject: [PATCH 273/351] same config for metrics key and secrets --- .../templates/k8s-config-cm.yaml | 11 ++++++----- build/opsramp-tracing-proxy/values.yaml | 12 +----------- config/file_config.go | 12 ++++++------ 3 files changed, 13 insertions(+), 22 deletions(-) diff --git a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml index 7e2f79a251..22857513b9 100644 --- a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml +++ b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml @@ -328,23 +328,24 @@ data: # ProxyProtocol accepts http and https # Not Eligible for live reload. - ProxyProtocol = {{ .Values.metrics.proxy.protocol | default "" | quote }} + {{- $proxy := .Values.metrics.proxy | default dict }} + ProxyProtocol = {{ $proxy.protocol | default "" | quote }} # ProxyServer takes the proxy server address # Not Eligible for live reload. - ProxyServer = {{ .Values.metrics.proxy.server | default "" | quote }} + ProxyServer = {{ $proxy.server | default "" | quote }} # ProxyPort takes the proxy server port # Not Eligible for live reload. - ProxyPort = {{ .Values.metrics.proxy.port | default 3128 }} + ProxyPort = {{ $proxy.port | default 3128 }} # ProxyUserName takes the proxy username # Not Eligible for live reload. - ProxyUserName = {{ .Values.metrics.proxy.username | default "" | quote }} + ProxyUserName = {{ $proxy.username | default "" | quote }} # ProxyPassword takes the proxy password # Not Eligible for live reload. - ProxyPassword = {{ .Values.metrics.proxy.password | default "" | quote }} + ProxyPassword = {{ $proxy.password | default "" | quote }} # OpsRampMetricsList is a list of regular expressions which match the metric # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index 4394fb6d41..b98bbd8c58 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -5,7 +5,7 @@ imagePullSecrets: [] image: repository: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy pullPolicy: Always # use "IfNotPresent" to avoid pulling the image every time - tag: "latest" # if empty then defaults to the chart appVersion. + tag: "latest" # if empty, then defaults to the chart appVersion. service: type: ClusterIP @@ -42,17 +42,7 @@ logging: log_output: "stdout" # Accepted values are one of["stdout", "stderr"] metrics: - api: "https://int.opsramp.net/" - key: "" - secret: "" - tenantId: "" list: ['".*"'] # escape the " with ' while specifying values - proxy: - protocol: "" - server: "" - port: "" - username: "" - password: "" rules: |- ############################ diff --git a/config/file_config.go b/config/file_config.go index c024d79e7d..80235df6f3 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -96,12 +96,12 @@ type LogrusLoggerConfig struct { type OpsRampMetricsConfig struct { MetricsListenAddr string `validate:"required"` - OpsRampMetricsAPI string `validate:"required,url"` - OpsRampTenantID string `validate:"required"` - OpsRampMetricsAPIKey string `validate:"required"` - OpsRampMetricsAPISecret string `validate:"required"` - OpsRampMetricsReportingInterval int64 `validate:"required"` - OpsRampMetricsRetryCount int64 `validate:"required"` + OpsRampMetricsAPI string + OpsRampTenantID string + OpsRampMetricsAPIKey string + OpsRampMetricsAPISecret string + OpsRampMetricsReportingInterval int64 + OpsRampMetricsRetryCount int64 ProxyProtocol string ProxyServer string ProxyPort int64 From 1671080734c3943636c71330cbb73dd2f4fc23f1 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Fri, 13 Jan 2023 14:18:31 +0530 Subject: [PATCH 274/351] helm chart bug fix --- build/opsramp-tracing-proxy/templates/deployment.yaml | 3 ++- build/opsramp-tracing-proxy/templates/service.yaml | 4 ++-- config/file_config.go | 5 ++++- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/build/opsramp-tracing-proxy/templates/deployment.yaml b/build/opsramp-tracing-proxy/templates/deployment.yaml index 45e66eeacf..b6ea5fe508 100644 --- a/build/opsramp-tracing-proxy/templates/deployment.yaml +++ b/build/opsramp-tracing-proxy/templates/deployment.yaml @@ -21,13 +21,14 @@ spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} - {{- end }} + {{- end }} containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - containerPort: {{ .Values.service.port }} + name: http-tp resources: {{- toYaml .Values.resources | nindent 12 }} volumeMounts: diff --git a/build/opsramp-tracing-proxy/templates/service.yaml b/build/opsramp-tracing-proxy/templates/service.yaml index 42aff90c16..5d9d2c4d13 100644 --- a/build/opsramp-tracing-proxy/templates/service.yaml +++ b/build/opsramp-tracing-proxy/templates/service.yaml @@ -8,8 +8,8 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: http + targetPort: http-tp protocol: TCP - name: http + name: http-tp selector: {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 4 }} diff --git a/config/file_config.go b/config/file_config.go index 80235df6f3..3cfbefb52a 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -765,8 +765,11 @@ func (f *fileConfig) GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) { } // setting values from main configurations when OpsRampMetrics is empty + if opsRampMetricsConfig.OpsRampMetricsAPI == "" { + opsRampMetricsConfig.OpsRampMetricsAPI = f.conf.OpsrampAPI + } if opsRampMetricsConfig.OpsRampMetricsAPIKey == "" { - opsRampMetricsConfig.OpsRampMetricsAPIKey = f.conf.OpsrampAPI + opsRampMetricsConfig.OpsRampMetricsAPIKey = f.conf.OpsrampKey } if opsRampMetricsConfig.OpsRampMetricsAPISecret == "" { opsRampMetricsConfig.OpsRampMetricsAPISecret = f.conf.OpsrampSecret From 0200355ea9fe3e78fcd70c25382698b23ddce9ce Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Fri, 13 Jan 2023 14:30:28 +0530 Subject: [PATCH 275/351] helm chart bug fix --- build/opsramp-tracing-proxy/templates/service.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/opsramp-tracing-proxy/templates/service.yaml b/build/opsramp-tracing-proxy/templates/service.yaml index 5d9d2c4d13..2f1dce7922 100644 --- a/build/opsramp-tracing-proxy/templates/service.yaml +++ b/build/opsramp-tracing-proxy/templates/service.yaml @@ -8,7 +8,7 @@ spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} - targetPort: http-tp + targetPort: {{ .Values.service.port }} protocol: TCP name: http-tp selector: From dc8c4c430b2240d4ac25ca11aea0199927c9cfe5 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Sat, 14 Jan 2023 00:07:27 +0530 Subject: [PATCH 276/351] helm chart bug fixes --- .../templates/_helpers.tpl | 77 +++++ .../templates/deployment.yaml | 22 +- .../templates/k8s-config-cm.yaml | 230 ++++++++++++-- .../templates/k8s-rules-cm.yaml | 11 +- .../opsramp-tracing-proxy/templates/rules.tpl | 295 ++++++++++++++++++ .../templates/service.yaml | 8 +- build/opsramp-tracing-proxy/values.yaml | 199 +----------- 7 files changed, 598 insertions(+), 244 deletions(-) create mode 100644 build/opsramp-tracing-proxy/templates/rules.tpl diff --git a/build/opsramp-tracing-proxy/templates/_helpers.tpl b/build/opsramp-tracing-proxy/templates/_helpers.tpl index b4fa749a2c..bd9dc6a52b 100644 --- a/build/opsramp-tracing-proxy/templates/_helpers.tpl +++ b/build/opsramp-tracing-proxy/templates/_helpers.tpl @@ -49,3 +49,80 @@ Selector labels app.kubernetes.io/name: {{ include "opsramp-tracing-proxy.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} + +{{/* +Service Defaults +*/}} +{{- define "serviceType" -}} +{{ if .Values.service }} {{ default "ClusterIP" .Values.service.type | quote }} {{ else }} "ClusterIP" {{ end }} +{{- end }} +{{- define "servicePort" -}} +{{ if .Values.service }} {{ default 9090 .Values.service.port }} {{ else }} 9090 {{ end }} +{{- end }} + +{{/* +Image Defaults +*/}} +{{- define "imagePullPolicy" -}} +{{ if .Values.image }} {{ default "Always" .Values.image.pullPolicy | quote }} {{ else }} "Always" {{ end }} +{{- end }} + +{{/* +Config Defautls +*/}} +{{- define "opsrampApiServer" -}} +{{ if .Values.config }} {{ default "" .Values.config.api | quote }} {{ else }} "" {{ end }} +{{- end }} +{{- define "opsrampKey" -}} +{{ if .Values.config }} {{ default "" .Values.config.key | quote }} {{ else }} "" {{ end }} +{{- end }} +{{- define "opsrampSecret" -}} +{{ if .Values.config }} {{ default "" .Values.config.secret | quote }} {{ else }} "" {{ end }} +{{- end }} +{{- define "opsrampTenantId" -}} +{{ if .Values.config }} {{ default "" .Values.config.tenantId | quote }} {{ else }} "" {{ end }} +{{- end }} +{{- define "dataset" -}} +{{ if .Values.config }} {{ default "ds" .Values.config.dataset | quote }} {{ else }} "ds" {{ end }} +{{- end }} +{{- define "useTLS" -}} +{{ if .Values.config }} {{ default true .Values.config.useTls }} {{ else }} true {{ end }} +{{- end }} +{{- define "useTlsInsecure" -}} +{{ if .Values.config }} {{ default false .Values.config.useTlsInsecure }} {{ else }} false {{ end }} +{{- end }} +{{- define "sendMetricsToOpsRamp" -}} +{{ if .Values.config }} {{ default true .Values.config.sendMetricsToOpsRamp }} {{ else }} true {{ end }} +{{- end }} +{{- define "proxyProtocol" -}} +{{ if and .Values.config .Values.config.proxy }} {{ default "" $.Values.config.proxy.protocol | quote }} {{ else }} "" {{ end }} +{{- end }} +{{- define "proxyServer" -}} +{{ if and .Values.config .Values.config.proxy }} {{ default "" $.Values.config.proxy.server | quote }} {{ else }} "" {{ end }} +{{- end }} +{{- define "proxyPort" -}} +{{ if and .Values.config .Values.config.proxy }} {{ default 3128 $.Values.config.proxy.port }} {{ else }} 3128 {{ end }} +{{- end }} +{{- define "proxyUsername" -}} +{{ if and .Values.config .Values.config.proxy }} {{ default "" $.Values.config.proxy.username | quote }} {{ else }} "" {{ end }} +{{- end }} +{{- define "proxyPassword" -}} +{{ if and .Values.config .Values.config.proxy }} {{ default "" $.Values.config.proxy.password | quote }} {{ else }} "" {{ end }} +{{- end }} + +{{/* +Logging Defautls +*/}} +{{- define "logFormat" -}} +{{ if .Values.logging }} {{ default "json" .Values.logging.logFormat | quote }} {{ else }} "json" {{ end }} +{{- end }} +{{- define "logOutput" -}} +{{ if .Values.logging }} {{ default "stdout" .Values.logging.logOutput | quote }} {{ else }} "stdout" {{ end }} +{{- end }} + +{{/* +Metrics Defaults +*/}} +{{- define "metricsList" -}} +{{ if .Values.metrics }} {{ default `[".*"]` .Values.metrics.list }} {{ else }} [".*"] {{ end }} +{{- end }} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/deployment.yaml b/build/opsramp-tracing-proxy/templates/deployment.yaml index b6ea5fe508..f03dedf22d 100644 --- a/build/opsramp-tracing-proxy/templates/deployment.yaml +++ b/build/opsramp-tracing-proxy/templates/deployment.yaml @@ -24,29 +24,31 @@ spec: {{- end }} containers: - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{- include "imagePullPolicy" . }} ports: - - containerPort: {{ .Values.service.port }} - name: http-tp + - containerPort: {{- include "servicePort" . }} + name: tp-http + {{- with .Values.resources }} resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} volumeMounts: - - name: opsramp-tracing-rules + - name: {{ include "opsramp-tracing-proxy.fullname" . }}-rules mountPath: /etc/tracing-proxy/rules.toml subPath: rules.toml readOnly: true - - name: opsramp-tracing-config + - name: {{ include "opsramp-tracing-proxy.fullname" . }}-config mountPath: /etc/tracing-proxy/config.toml subPath: config.toml readOnly: true volumes: - configMap: - name: opsramp-tracing-proxy-rules - name: opsramp-tracing-rules + name: {{ include "opsramp-tracing-proxy.fullname" . }}-rules + name: {{ include "opsramp-tracing-proxy.fullname" . }}-rules - configMap: - name: opsramp-tracing-proxy-config - name: opsramp-tracing-config + name: {{ include "opsramp-tracing-proxy.fullname" . }}-config + name: {{ include "opsramp-tracing-proxy.fullname" . }}-config {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml index 22857513b9..4475e4a36e 100644 --- a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml +++ b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml @@ -1,9 +1,9 @@ apiVersion: v1 kind: ConfigMap metadata: - name: opsramp-tracing-proxy-config + name: {{ include "opsramp-tracing-proxy.fullname" . }}-config labels: - name: opsramp-tracing-proxy-config + {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }} data: config.toml: |- ##################### @@ -35,29 +35,29 @@ data: # ProxyProtocol accepts http and https # Not Eligible for live reload. - ProxyProtocol = {{ .Values.config.proxy.protocol | default "" | quote }} + ProxyProtocol = {{- include "proxyProtocol" . }} # ProxyServer takes the proxy server address # Not Eligible for live reload. - ProxyServer = {{ .Values.config.proxy.server | default "" | quote }} + ProxyServer = {{- include "proxyServer" . }} # ProxyPort takes the proxy server port # Not Eligible for live reload. - ProxyPort = {{ .Values.config.proxy.port | default 3128 }} + ProxyPort = {{- include "proxyPort" . }} # ProxyUserName takes the proxy username # Not Eligible for live reload. - ProxyUserName = {{ .Values.config.proxy.username | default "" | quote }} + ProxyUserName = {{- include "proxyUsername" . }} # ProxyPassword takes the proxy password # Not Eligible for live reload. - ProxyPassword = {{ .Values.config.proxy.password | default "" | quote }} + ProxyPassword = {{- include "proxyPassword" . }} # CompressPeerCommunication determines whether refinery will compress span data # it forwards to peers. If it costs money to transmit data between refinery # instances (e.g. they're spread across AWS availability zones), then you # almost certainly want compression enabled to reduce your bill. The option to - # disable it is provided as an escape hatch for deployments that value lower CPU + # disable it is provided as an escape hatch for deployments that value lower CPU # utilization over data transfer costs. CompressPeerCommunication = true @@ -76,23 +76,23 @@ data: # OpsrampAPI is the URL for the upstream Opsramp API. # Eligible for live reload. - OpsrampAPI = {{ .Values.config.api | default "" | quote }} + OpsrampAPI = {{- include "opsrampApiServer" . }} # OpsrampKey is used to get the OauthToken - OpsrampKey = {{ .Values.config.key | default "" | quote }} + OpsrampKey = {{- include "opsrampKey" . }} # OpsrampSecret is used to get the OauthToken - OpsrampSecret = {{ .Values.config.secret | default "" | quote }} + OpsrampSecret = {{- include "opsrampSecret" . }} # Traces are send to the client with given tenantid - TenantId = {{ .Values.config.tenantId | default "" | quote }} + TenantId = {{- include "opsrampTenantId" . }} # Dataset you want to use for sampling - Dataset = {{ .Values.config.dataset | default "ds" | quote }} + Dataset = {{- include "dataset" . }} #Tls Options - UseTls = {{ .Values.config.useTls }} - UseTlsInsecure = {{ .Values.config.UseTlsInsecure }} + UseTls = {{- include "useTLS" . }} + UseTlsInsecure = {{- include "useTlsInsecure" . }} # SendDelay is a short timer that will be triggered when a trace is complete. # Refinery will wait this duration before actually sending the trace. The @@ -103,6 +103,11 @@ data: # Eligible for live reload. SendDelay = "2s" + # BatchTimeout dictates how frequently to send unfulfilled batches. By default + # this will use the DefaultBatchTimeout in libhoney as its value, which is 100ms. + # Eligible for live reload. + BatchTimeout = "1s" + # TraceTimeout is a long timer; it represents the outside boundary of how long # to wait before sending an incomplete trace. Normally traces are sent when the # root span arrives. Sometimes the root span never arrives (due to crashes or @@ -143,9 +148,59 @@ data: # Not eligible for live reload AddHostMetadataToTrace = false + # EnvironmentCacheTTL is the amount of time a cache entry will live that associates + # an API key with an environment name. + # Cache misses lookup the environment name using HoneycombAPI config value. + # Default is 1 hour ("1h"). + # Not eligible for live reload. + EnvironmentCacheTTL = "1h" + + # QueryAuthToken, if specified, provides a token that must be specified with + # the header "X-Honeycomb-Refinery-Query" in order for a /query request to succeed. + # These /query requests are intended for debugging refinery installations and + # are not typically needed in normal operation. + # Can be specified in the environment as REFINERY_QUERY_AUTH_TOKEN. + # If left unspecified, the /query endpoints are inaccessible. + # Not eligible for live reload. + # QueryAuthToken = "some-random-value" + + # AddRuleReasonToTrace causes traces that are sent to Honeycomb to include the field `meta.refinery.reason`. + # This field contains text indicating which rule was evaluated that caused the trace to be included. + # Eligible for live reload. + # AddRuleReasonToTrace = true + + # AdditionalErrorFields should be a list of span fields that should be included when logging + # errors that happen during ingestion of events (for example, the span too large error). + # This is primarily useful in trying to track down misbehaving senders in a large installation. + # The fields `dataset`, `apihost`, and `environment` are always included. + # If a field is not present in the span, it will not be present in the error log. + # Default is ["trace.span_id"]. + # Eligible for live reload. + AdditionalErrorFields = [ + "trace.span_id" + ] + + # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate + # the number of child spans on the trace at the time the sampling decision was made. + # This value is available to the rules-based sampler, making it possible to write rules that + # are dependent upon the number of spans in the trace. + # Default is false. + # Eligible for live reload. + # AddSpanCountToRoot = true + + # CacheOverrunStrategy controls the cache management behavior under memory pressure. + # "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, + # which is generally not helpful unless it occurs because of a permanent change in traffic patterns. + # In the "impact" strategy, the items having the most impact on the cache size are + # ejected from the cache earlier than normal but the cache is not resized. + # In all cases, it only applies if MaxAlloc is nonzero. + # Default is "resize" for compatibility but "impact" is recommended for most installations. + # Eligible for live reload. + # CacheOverrunStrategy = "impact" + # Metrics are sent to OpsRamp (The collection happens based on configuration specifie # in OpsRampMetrics and only works when the Metrics is set to "prometheus") - SendMetricsToOpsRamp = true + SendMetricsToOpsRamp = {{- include "sendMetricsToOpsRamp" . }} ############################ ## Implementation Choices ## @@ -190,6 +245,12 @@ data: # Not eligible for live reload. # RedisHost = "localhost:6379" + # RedisUsername is the username used to connect to redis for peer cluster membership management. + # If the environment variable 'REFINERY_REDIS_USERNAME' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + # RedisUsername = "" + # RedisPassword is the password used to connect to redis for peer cluster membership management. # If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes # precedence and this value is ignored. @@ -225,6 +286,20 @@ data: # Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. # RedisIdentifier = "192.168.1.1" + # Timeout is optional. By default, when using RedisHost, Refinery will timeout + # after 5s when communicating with Redis. + # Timeout = "5s" + + # Strategy controls the way that traces are assigned to refinery nodes. + # The "legacy" strategy uses a simple algorithm that unfortunately causes + # 1/2 of the in-flight traces to be assigned to a different node whenever the + # number of nodes changes. + # The legacy strategy is deprecated and is intended to be removed in a future release. + # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the + # number of nodes) are disrupted when the node count changes. + # Not eligible for live reload. + # Strategy = "hash" + ######################### ## In-Memory Collector ## ######################### @@ -248,7 +323,8 @@ data: # supported. # If set to a non-zero value, once per tick (see SendTicker) the collector # will compare total allocated bytes to this value. If allocation is too - # high, cache capacity will be reduced and an error will be logged. + # high, cache capacity will be adjusted according to the setting for + # CacheOverrunStrategy. # Useful values for this setting are generally in the range of 75%-90% of # available system memory. MaxAlloc = 0 @@ -264,10 +340,10 @@ data: [LogrusLogger] # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] - LogFormatter = {{ .Values.logging.log_format | default "json" | quote }} + LogFormatter = {{- include "logFormat" . }} # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] - LogOutput = {{ .Values.logging.log_output | default "stdout" | quote }} + LogOutput = {{- include "logOutput" . }} ## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" [LogrusLogger.File] @@ -286,9 +362,9 @@ data: Compress = true - #####################@## + ####################### ## Prometheus Metrics ## - #####################@## + ####################### [OpsRampMetrics] # MetricsListenAddr determines the interface and port on which Prometheus will @@ -299,23 +375,23 @@ data: # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. # Not Eligible for live reload. - OpsRampMetricsAPI = {{ .Values.metrics.api | default "" | quote }} + OpsRampMetricsAPI = {{- include "opsrampApiServer" . }} # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. # Not Eligible for live reload. - OpsRampTenantID = {{ .Values.metrics.tenantId | default "" | quote }} + OpsRampTenantID = {{- include "opsrampTenantId" . }} # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. # This is separate from the APIKeys used to authenticate regular # traffic. # Not Eligible for live reload. - OpsRampMetricsAPIKey = {{ .Values.metrics.key | default "" | quote }} + OpsRampMetricsAPIKey = {{- include "opsrampKey" . }} # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. # This is separate from the APISecret used to authenticate regular # traffic. # Not Eligible for live reload. - OpsRampMetricsAPISecret = {{ .Values.metrics.secret | default "" | quote }} + OpsRampMetricsAPISecret = {{- include "opsrampSecret" . }} # OpsRampMetricsReportingInterval is frequency specified in seconds at which # the metrics are collected and sent to OpsRamp @@ -328,27 +404,117 @@ data: # ProxyProtocol accepts http and https # Not Eligible for live reload. - {{- $proxy := .Values.metrics.proxy | default dict }} - ProxyProtocol = {{ $proxy.protocol | default "" | quote }} + ProxyProtocol = {{- include "proxyProtocol" . }} # ProxyServer takes the proxy server address # Not Eligible for live reload. - ProxyServer = {{ $proxy.server | default "" | quote }} + ProxyServer = {{- include "proxyServer" . }} # ProxyPort takes the proxy server port # Not Eligible for live reload. - ProxyPort = {{ $proxy.port | default 3128 }} + ProxyPort = {{- include "proxyPort" . }} # ProxyUserName takes the proxy username # Not Eligible for live reload. - ProxyUserName = {{ $proxy.username | default "" | quote }} + ProxyUserName = {{- include "proxyUsername" . }} # ProxyPassword takes the proxy password # Not Eligible for live reload. - ProxyPassword = {{ $proxy.password | default "" | quote }} + ProxyPassword = {{- include "proxyPassword" . }} # OpsRampMetricsList is a list of regular expressions which match the metric # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. # Internally all the regex in the list are concatinated using '|' to make the computation little faster. # Not Eligible for live reload - OpsRampMetricsList = [{{ .Values.metrics.list | join "," }}] + OpsRampMetricsList = {{- include "metricsList" . }} + + + [GRPCServerParameters] + + # MaxConnectionIdle is a duration for the amount of time after which an + # idle connection would be closed by sending a GoAway. Idleness duration is + # defined since the most recent time the number of outstanding RPCs became + # zero or the connection establishment. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 + # Not eligible for live reload. + # MaxConnectionIdle = "1m" + + # MaxConnectionAge is a duration for the maximum amount of time a + # connection may exist before it will be closed by sending a GoAway. A + # random jitter of +/-10% will be added to MaxConnectionAge to spread out + # connection storms. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 + # Not eligible for live reload. + # MaxConnectionAge = "0s" + + # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + # which the connection will be forcibly closed. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 + # Not eligible for live reload. + # MaxConnectionAgeGrace = "0s" + + # After a duration of this time if the server doesn't see any activity it + # pings the client to see if the transport is still alive. + # If set below 1s, a minimum value of 1s will be used instead. + # 0s sets duration to 2 hours which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 + # Not eligible for live reload. + # Time = "10s" + + # After having pinged for keepalive check, the server waits for a duration + # of Timeout and if no activity is seen even after that the connection is + # closed. + # 0s sets duration to 20 seconds which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 + # Not eligible for live reload. + # Timeout = "2s" + + + + ################################ + ## Sample Cache Configuration ## + ################################ + + # Sample Cache Configuration controls the sample cache used to retain information about trace + # status after the sampling decision has been made. + + [SampleCacheConfig] + + # Type controls the type of sample cache used. + # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is + # 5x the size of the trace cache. This is Refinery's original sample cache strategy. + # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember + # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. + # It is also more configurable. The cuckoo filter is recommended for most installations. + # Default is "legacy". + # Not eligible for live reload (you cannot change the type of cache with reload). + # Type = "cuckoo" + + # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. + # Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some + # statistical information. This is most useful in cases where the trace was sent before sending + # the root span, so that the root span can be decorated with accurate metadata. + # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). + # Does not apply to the "legacy" type of cache. + # Eligible for live reload. + # KeptSize = 10_000 + + # DroppedSize controls the size of the cuckoo dropped traces cache. + # This cache consumes 4-6 bytes per trace at a scale of millions of traces. + # Changing its size with live reload sets a future limit, but does not have an immediate effect. + # Default is 1_000_000 traces. + # Does not apply to the "legacy" type of cache. + # Eligible for live reload. + # DroppedSize = 1_000_000 + + # SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates + # the remaining capacity of its dropped traces cache and possibly cycles it. + # This cache is quite resilient so it doesn't need to happen very often, but the + # operation is also inexpensive. + # Default is 10 seconds. + # Does not apply to the "legacy" type of cache. + # Eligible for live reload. + # SizeCheckInterval = "10s" \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml b/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml index a23631a123..a5ad1d7294 100644 --- a/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml +++ b/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml @@ -1,10 +1,13 @@ apiVersion: v1 kind: ConfigMap metadata: - name: opsramp-tracing-proxy-rules + name: {{ include "opsramp-tracing-proxy.fullname" . }}-rules labels: - name: opsramp-tracing-proxy-rules + {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }} data: rules.toml: |- - {{ .Values.rules | nindent 4 }} - + {{- with .Values.rules }} + {{ . | nindent 4 }} + {{ else }} + {{- include "rulesTOML" | nindent 4}} + {{- end }} diff --git a/build/opsramp-tracing-proxy/templates/rules.tpl b/build/opsramp-tracing-proxy/templates/rules.tpl new file mode 100644 index 0000000000..d64590d2ce --- /dev/null +++ b/build/opsramp-tracing-proxy/templates/rules.tpl @@ -0,0 +1,295 @@ +{{/* +Complete Rules TOML +*/}} + +{{- define "rulesTOML" -}} +############################ +## Sampling Rules Config ## +############################ + +# DryRun - If enabled, marks traces that would be dropped given current sampling rules, +# and sends all traces regardless +DryRun = true + +# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept +DryRunFieldName = "fromProxy" + +# DeterministicSampler is a section of the config for manipulating the +# Deterministic Sampler implementation. This is the simplest sampling algorithm +# - it is a static sample rate, choosing traces randomly to either keep or send +# (at the appropriate rate). It is not influenced by the contents of the trace. +Sampler = "DeterministicSampler" + +# SampleRate is the rate at which to sample. It indicates a ratio, where one +# sample trace is kept for every n traces seen. For example, a SampleRate of 30 +# will keep 1 out of every 30 traces. The choice on whether to keep any specific +# trace is random, so the rate is approximate. +# Eligible for live reload. +SampleRate = 1 + +[dataset1] + + # Note: If your dataset name contains a space, you will have to escape the dataset name + # using single quotes, such as ['dataset 1'] + + # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler + # implementation. This sampler collects the values of a number of fields from a + # trace and uses them to form a key. This key is handed to the standard dynamic + # sampler algorithm which generates a sample rate based on the frequency with + # which that key has appeared in the previous ClearFrequencySec seconds. See + # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics + # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from + # that package. + Sampler = "DynamicSampler" + + # SampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + # Eligible for live reload. + SampleRate = 2 + + # FieldList is a list of all the field names to use to form the key that will be + # handed to the dynamic sampler. The cardinality of the combination of values + # from all of these keys should be reasonable in the face of the frequency of + # those keys. If the combination of fields in these keys essentially makes them + # unique, the dynamic sampler will do no sampling. If the keys have too few + # values, you won't get samples of the most interesting traces. A good key + # selection will have consistent values for high frequency boring traffic and + # unique values for outliers and interesting traffic. Including an error field + # (or something like HTTP status code) is an excellent choice. As an example, + # assuming 30 or so endpoints, a combination of HTTP endpoint and status code + # would be a good set of keys in order to let you see accurately use of all + # endpoints and call out when there is failing traffic to any endpoint. Field + # names may come from any span in the trace. + # Eligible for live reload. + FieldList = ["request.method","response.status_code"] + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Opsramp, set this to true. + # Eligible for live reload. + UseTraceLength = true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace = true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # ClearFrequencySec is the name of the field the sampler will use to determine + # the period over which it will calculate the sample rate. This setting defaults + # to 30. + # Eligible for live reload. + ClearFrequencySec = 60 + +[dataset2] + + # EMADynamicSampler is a section of the config for manipulating the Exponential + # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, + # it attempts to average a given sample rate, weighting rare traffic and frequent + # traffic differently so as to end up with the correct average. + # + # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended + # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs + # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential + # Moving Average of counts seen per key, and adjusts this average at regular intervals. + # The weight applied to more recent intervals is defined by `weight`, a number between + # (0, 1) - larger values weight the average more toward recent observations. In other words, + # a larger weight will cause sample rates more quickly adapt to traffic patterns, + # while a smaller weight will result in sample rates that are less sensitive to bursts or drops + # in traffic and thus more consistent over time. + # + # Keys that are not found in the EMA will always have a sample + # rate of 1. Keys that occur more frequently will be sampled on a logarithmic + # curve. In other words, every key will be represented at least once in any + # given window and more frequent keys will have their sample rate + # increased proportionally to wind up with the goal sample rate. + Sampler = "EMADynamicSampler" + + # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + # Eligible for live reload. + GoalSampleRate = 2 + + # FieldList is a list of all the field names to use to form the key that will be + # handed to the dynamic sampler. The cardinality of the combination of values + # from all of these keys should be reasonable in the face of the frequency of + # those keys. If the combination of fields in these keys essentially makes them + # unique, the dynamic sampler will do no sampling. If the keys have too few + # values, you won't get samples of the most interesting traces. A good key + # selection will have consistent values for high frequency boring traffic and + # unique values for outliers and interesting traffic. Including an error field + # (or something like HTTP status code) is an excellent choice. As an example, + # assuming 30 or so endpoints, a combination of HTTP endpoint and status code + # would be a good set of keys in order to let you see accurately use of all + # endpoints and call out when there is failing traffic to any endpoint. Field + # names may come from any span in the trace. + # Eligible for live reload. + FieldList = ["request.method","response.status_code"] + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Opsramp, set this to true. + # Eligible for live reload. + UseTraceLength = true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace = true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # AdjustmentInterval defines how often (in seconds) we adjust the moving average from + # recent observations. Default 15s + # Eligible for live reload. + AdjustmentInterval = 15 + + # Weight is a value between (0, 1) indicating the weighting factor used to adjust + # the EMA. With larger values, newer data will influence the average more, and older + # values will be factored out more quickly. In mathematical literature concerning EMA, + # this is referred to as the `alpha` constant. + # Default is 0.5 + # Eligible for live reload. + Weight = 0.5 + + # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. + # Once MaxKeys is reached, new keys will not be included in the sample rate map, but + # existing keys will continue to be be counted. You can use this to keep the sample rate + # map size under control. + # Eligible for live reload + MaxKeys = 0 + + # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key + # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to + # decide what constitutes "zero". Keys with averages below this threshold will be removed + # from the EMA. Default is the same as Weight, as this prevents a key with the smallest + # integer value (1) from being aged out immediately. This value should generally be <= Weight, + # unless you have very specific reasons to set it higher. + # Eligible for live reload + AgeOutValue = 0.5 + + # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define + # the burst detection threshold. If total counts observed for a given interval exceed the threshold + # EMA is updated immediately, rather than waiting on the AdjustmentInterval. + # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, + # burst detection will kick in. + # Eligible for live reload + BurstMultiple = 2.0 + + # BurstDetectionDelay indicates the number of intervals to run after Start is called before + # burst detection kicks in. + # Defaults to 3 + # Eligible for live reload + BurstDetectionDelay = 3 + +[dataset3] + + Sampler = "DeterministicSampler" + SampleRate = 10 + +[dataset4] + + Sampler = "RulesBasedSampler" + # Optional, if set to true then the rules will also check nested json fields, in the format of parent.child + CheckNestedFields = false + + [[dataset4.rule]] + name = "drop healthchecks" + drop = true + [[dataset4.rule.condition]] + field = "http.route" + operator = "=" + value = "/health-check" + + [[dataset4.rule]] + name = "keep slow 500 errors" + SampleRate = 1 + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = 500 + [[dataset4.rule.condition]] + field = "duration_ms" + operator = ">=" + value = 1000.789 + + [[dataset4.rule]] + name = "dynamically sample 200 responses" + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = 200 + [dataset4.rule.sampler.EMADynamicSampler] + Sampler = "EMADynamicSampler" + GoalSampleRate = 15 + FieldList = ["request.method", "request.route"] + AddSampleRateKeyToTrace = true + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + # Note that Refinery comparisons are type-dependent. If you are operating in an environment where different + # telemetry may send the same field with different types (for example, some systems send status codes as "200" + # instead of 200), you may need to create additional rules to cover these cases. + [[dataset4.rule]] + name = "dynamically sample 200 string responses" + [[dataset4.rule.condition]] + field = "status_code" + operator = "=" + value = "200" + [dataset4.rule.sampler.EMADynamicSampler] + Sampler = "EMADynamicSampler" + GoalSampleRate = 15 + FieldList = ["request.method", "request.route"] + AddSampleRateKeyToTrace = true + AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" + + [[dataset4.rule]] + name = "sample traces originating from a service" + # if scope is set to "span", a single span in the trace must match + # *all* of the conditions associated with this rule for the rule to + # apply to the trace. + # + # this is especially helpful when sampling a dataset written to + # by multiple services that call one another in normal operation – + # you can set Scope to 'span' to attribute traces to an origin + # service in a way that would be difficult without it. + Scope = "span" + SampleRate = 5 + [[dataset4.rule.condition]] + field = "service name" + operator = "=" + value = "users" + [[dataset4.rule.condition]] + field = "meta.span_type" + operator = "=" + value = "root" + + [[dataset4.rule]] + SampleRate = 10 # default when no rules match, if missing defaults to 10 + +[dataset5] + + Sampler = "TotalThroughputSampler" + GoalThroughputPerSec = 100 + FieldList = "[request.method]" +{{- end }} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/service.yaml b/build/opsramp-tracing-proxy/templates/service.yaml index 2f1dce7922..0c901e1ed3 100644 --- a/build/opsramp-tracing-proxy/templates/service.yaml +++ b/build/opsramp-tracing-proxy/templates/service.yaml @@ -5,11 +5,11 @@ metadata: labels: {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }} spec: - type: {{ .Values.service.type }} + type: {{- include "serviceType" . }} ports: - - port: {{ .Values.service.port }} - targetPort: {{ .Values.service.port }} + - port: {{- include "servicePort" . }} + targetPort: {{- include "servicePort" . }} protocol: TCP - name: http-tp + name: tp-http selector: {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 4 }} diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index b98bbd8c58..9a8913c6be 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -28,8 +28,9 @@ config: secret: "" tenantId: "" dataset: "ds" - useTls: "true" - UseTlsInsecure: "false" + useTls: true + UseTlsInsecure: false + sendMetricsToOpsRamp: true proxy: protocol: "" server: "" @@ -38,222 +39,44 @@ config: password: "" logging: - log_format: "json" # Accepted values are one of ["logfmt", "json"] - log_output: "stdout" # Accepted values are one of["stdout", "stderr"] + logFormat: "json" # Accepted values are one of ["logfmt", "json"] + logOutput: "stdout" # Accepted values are one of["stdout", "stderr"] metrics: list: ['".*"'] # escape the " with ' while specifying values rules: |- - ############################ - ## Sampling Rules Config ## - ############################ - - # DryRun - If enabled, marks traces that would be dropped given current sampling rules, - # and sends all traces regardless - DryRun = true - - # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept - DryRunFieldName = "fromProxy" - - # DeterministicSampler is a section of the config for manipulating the - # Deterministic Sampler implementation. This is the simplest sampling algorithm - # - it is a static sample rate, choosing traces randomly to either keep or send - # (at the appropriate rate). It is not influenced by the contents of the trace. - Sampler = "DeterministicSampler" - - # SampleRate is the rate at which to sample. It indicates a ratio, where one - # sample trace is kept for every n traces seen. For example, a SampleRate of 30 - # will keep 1 out of every 30 traces. The choice on whether to keep any specific - # trace is random, so the rate is approximate. - # Eligible for live reload. SampleRate = 1 [dataset1] - - # Note: If your dataset name contains a space, you will have to escape the dataset name - # using single quotes, such as ['dataset 1'] - - # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler - # implementation. This sampler collects the values of a number of fields from a - # trace and uses them to form a key. This key is handed to the standard dynamic - # sampler algorithm which generates a sample rate based on the frequency with - # which that key has appeared in the previous ClearFrequencySec seconds. See - # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics - # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from - # that package. Sampler = "DynamicSampler" - - # SampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. SampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # ClearFrequencySec is the name of the field the sampler will use to determine - # the period over which it will calculate the sample rate. This setting defaults - # to 30. - # Eligible for live reload. ClearFrequencySec = 60 [dataset2] - - # EMADynamicSampler is a section of the config for manipulating the Exponential - # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, - # it attempts to average a given sample rate, weighting rare traffic and frequent - # traffic differently so as to end up with the correct average. - # - # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended - # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs - # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential - # Moving Average of counts seen per key, and adjusts this average at regular intervals. - # The weight applied to more recent intervals is defined by `weight`, a number between - # (0, 1) - larger values weight the average more toward recent observations. In other words, - # a larger weight will cause sample rates more quickly adapt to traffic patterns, - # while a smaller weight will result in sample rates that are less sensitive to bursts or drops - # in traffic and thus more consistent over time. - # - # Keys that are not found in the EMA will always have a sample - # rate of 1. Keys that occur more frequently will be sampled on a logarithmic - # curve. In other words, every key will be represented at least once in any - # given window and more frequent keys will have their sample rate - # increased proportionally to wind up with the goal sample rate. Sampler = "EMADynamicSampler" - - # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. GoalSampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # AdjustmentInterval defines how often (in seconds) we adjust the moving average from - # recent observations. Default 15s - # Eligible for live reload. AdjustmentInterval = 15 - - # Weight is a value between (0, 1) indicating the weighting factor used to adjust - # the EMA. With larger values, newer data will influence the average more, and older - # values will be factored out more quickly. In mathematical literature concerning EMA, - # this is referred to as the `alpha` constant. - # Default is 0.5 - # Eligible for live reload. Weight = 0.5 - - # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. - # Once MaxKeys is reached, new keys will not be included in the sample rate map, but - # existing keys will continue to be be counted. You can use this to keep the sample rate - # map size under control. - # Eligible for live reload MaxKeys = 0 - - # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key - # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to - # decide what constitutes "zero". Keys with averages below this threshold will be removed - # from the EMA. Default is the same as Weight, as this prevents a key with the smallest - # integer value (1) from being aged out immediately. This value should generally be <= Weight, - # unless you have very specific reasons to set it higher. - # Eligible for live reload AgeOutValue = 0.5 - - # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define - # the burst detection threshold. If total counts observed for a given interval exceed the threshold - # EMA is updated immediately, rather than waiting on the AdjustmentInterval. - # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, - # burst detection will kick in. - # Eligible for live reload BurstMultiple = 2.0 - - # BurstDetectionDelay indicates the number of intervals to run after Start is called before - # burst detection kicks in. - # Defaults to 3 - # Eligible for live reload BurstDetectionDelay = 3 [dataset3] - Sampler = "DeterministicSampler" SampleRate = 10 [dataset4] - Sampler = "RulesBasedSampler" - # Optional, if set to true then the rules will also check nested json fields, in the format of parent.child CheckNestedFields = false [[dataset4.rule]] @@ -288,10 +111,6 @@ rules: |- FieldList = ["request.method", "request.route"] AddSampleRateKeyToTrace = true AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # Note that Refinery comparisons are type-dependent. If you are operating in an environment where different - # telemetry may send the same field with different types (for example, some systems send status codes as "200" - # instead of 200), you may need to create additional rules to cover these cases. [[dataset4.rule]] name = "dynamically sample 200 string responses" [[dataset4.rule.condition]] @@ -307,14 +126,6 @@ rules: |- [[dataset4.rule]] name = "sample traces originating from a service" - # if scope is set to "span", a single span in the trace must match - # *all* of the conditions associated with this rule for the rule to - # apply to the trace. - # - # this is especially helpful when sampling a dataset written to - # by multiple services that call one another in normal operation - - # you can set Scope to 'span' to attribute traces to an origin - # service in a way that would be difficult without it. Scope = "span" SampleRate = 5 [[dataset4.rule.condition]] From 829129d83af88c1858a971d7d13c7d3af53a0fbd Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Tue, 17 Jan 2023 11:07:26 +0530 Subject: [PATCH 277/351] helm chart bug fixes --- build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml | 2 +- build/opsramp-tracing-proxy/values.yaml | 2 +- config_complete.toml | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml index 4475e4a36e..bfca4ee767 100644 --- a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml +++ b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml @@ -22,7 +22,7 @@ data: # something like nginx in front to do the decryption. # Should be of the form 0.0.0.0:9090 # Not eligible for live reload. - GRPCListenAddr = "0.0.0.0:4317" + GRPCListenAddr = "0.0.0.0:{{- include "servicePort" . | trim }}" # PeerListenAddr is the IP and port on which to listen for traffic being # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index 9a8913c6be..119dbc1349 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -23,7 +23,7 @@ nodeSelector: {} # Trace Proxy config file values config: - api: "https://int.opsramp.net/" + api: "" key: "" secret: "" tenantId: "" diff --git a/config_complete.toml b/config_complete.toml index 837cdd91d8..b2ae653260 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -368,23 +368,23 @@ MetricsListenAddr = "localhost:2112" # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. # Not Eligible for live reload. -OpsRampMetricsAPI = "https://int.opsramp.net" +OpsRampMetricsAPI = "" # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. # Not Eligible for live reload. -OpsRampTenantID = "3748c67e-bec1-4cad-bd8b-8f2f8ea840f3" +OpsRampTenantID = "" # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. # This is separate from the APIKeys used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPIKey = "***REMOVED***" +OpsRampMetricsAPIKey = "" # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. # This is separate from the APISecret used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPISecret = "***REMOVED***" +OpsRampMetricsAPISecret = "" # OpsRampMetricsReportingInterval is frequency specified in seconds at which # the metrics are collected and sent to OpsRamp From 726fcd8a7787f70dddb4d2580d054c8290938288 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 25 Jan 2023 10:45:39 +0530 Subject: [PATCH 278/351] removing prints --- cmd/tracing-proxy/main.go | 2 -- go.mod | 4 ++-- go.sum | 8 ++++---- route/otlp_trace.go | 38 +++++++++++++------------------------- 4 files changed, 19 insertions(+), 33 deletions(-) diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 863d312ee3..4a6b9b190a 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -162,8 +162,6 @@ func main() { os.Exit(1) } - fmt.Println("upstream client created..") - peerClient, err := libtrace.NewClient(libtrace.ClientConfig{ Transmission: &transmission.Opsramptraceproxy{ MaxBatchSize: c.GetMaxBatchSize(), diff --git a/go.mod b/go.mod index 9ba7016b18..fc5cb7192b 100644 --- a/go.mod +++ b/go.mod @@ -19,8 +19,8 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 - github.com/opsramp/husky v0.0.0-20230106175229-05ae58a2f113 - github.com/opsramp/libtrace-go v0.0.0-20230105172035-b8e891da9e4f + github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b + github.com/opsramp/libtrace-go v0.0.0-20230125043249-3c7090a38201 github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index ed28ad3fb8..848fdbea17 100644 --- a/go.sum +++ b/go.sum @@ -585,10 +585,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opsramp/husky v0.0.0-20230106175229-05ae58a2f113 h1:T1akIY4QpFHE+0ff9Yc9RAhtQRHTC0zu2jyZPf7U6/Y= -github.com/opsramp/husky v0.0.0-20230106175229-05ae58a2f113/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230105172035-b8e891da9e4f h1:6o+Egn2XdbibnEyd6qeAQOwL77m/rumnDVXzSMsBH0Y= -github.com/opsramp/libtrace-go v0.0.0-20230105172035-b8e891da9e4f/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b h1:UCPQdTGlfqS/Cs4BO7C8lRppKy23Ok8OyQlz5c6Nt+U= +github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= +github.com/opsramp/libtrace-go v0.0.0-20230125043249-3c7090a38201 h1:x+lUyWYj+gOmne8XVwZBOS1YRv+2ekezCGwh+XN5WeI= +github.com/opsramp/libtrace-go v0.0.0-20230125043249-3c7090a38201/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index c956f194c2..732d84e8b2 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -16,36 +16,27 @@ import ( collectortrace "github.com/opsramp/husky/proto/otlp/collector/trace/v1" ) -func (router *Router) postOTLP(w http.ResponseWriter, req *http.Request) { +func (r *Router) postOTLP(w http.ResponseWriter, req *http.Request) { ri := huskyotlp.GetRequestInfoFromHttpHeaders(req.Header) - //if err := ri.ValidateTracesHeaders(); err != nil { - // if errors.Is(err, huskyotlp.ErrInvalidContentType) { - // router.handlerReturnWithError(w, ErrInvalidContentType, err) - // } else { - // router.handlerReturnWithError(w, ErrAuthNeeded, err) - // } - // return - //} - result, err := huskyotlp.TranslateTraceRequestFromReader(req.Body, ri) if err != nil { - router.handlerReturnWithError(w, ErrUpstreamFailed, err) + r.handlerReturnWithError(w, ErrUpstreamFailed, err) return } token := ri.ApiToken tenantId := ri.ApiTenantId - if err := processTraceRequest(req.Context(), router, result.Batches, ri.Dataset, token, tenantId); err != nil { - router.handlerReturnWithError(w, ErrUpstreamFailed, err) + if err := processTraceRequest(req.Context(), r, result.Batches, ri.Dataset, token, tenantId); err != nil { + r.handlerReturnWithError(w, ErrUpstreamFailed, err) } } -func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { +func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { ri := huskyotlp.GetRequestInfoFromGrpcMetadata(ctx) - router.Metrics.Increment(router.incomingOrPeer + "_router_batch") - fmt.Println("Translating Trace Req ..") + r.Metrics.Increment(r.incomingOrPeer + "_router_batch") + result, err := huskyotlp.TranslateTraceRequest(req, ri) if err != nil { return nil, huskyotlp.AsGRPCError(err) @@ -53,19 +44,19 @@ func (router *Router) Export(ctx context.Context, req *collectortrace.ExportTrac token := ri.ApiToken tenantId := ri.ApiTenantId if len(tenantId) == 0 { - OpsrampTenantId, _ := router.Config.GetTenantId() + OpsrampTenantId, _ := r.Config.GetTenantId() tenantId = OpsrampTenantId } if len(ri.Dataset) == 0 { - dataset, _ := router.Config.GetDataset() + dataset, _ := r.Config.GetDataset() ri.Dataset = dataset } - fmt.Println("TenantId:", tenantId) - fmt.Println("dataset:", ri.Dataset) + r.Logger.Debug().Logf("TenantId: %s", tenantId) + r.Logger.Debug().Logf("dataset:", ri.Dataset) - if err := processTraceRequest(ctx, router, result.Batches, ri.Dataset, token, tenantId); err != nil { + if err := processTraceRequest(ctx, r, result.Batches, ri.Dataset, token, tenantId); err != nil { return nil, huskyotlp.AsGRPCError(err) } @@ -111,7 +102,7 @@ func processTraceRequest( func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTraceProxyServiceRequest) (*proxypb.ExportTraceProxyServiceResponse, error) { - fmt.Println("Received Trace data from peer \n") + r.Logger.Debug().Logf("Received Trace data from peer") r.Metrics.Increment(r.incomingOrPeer + "_router_batch") var token, tenantId, datasetName string @@ -126,7 +117,6 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get request metadata", Status: "Failed"}, nil } else { authorization := md.Get("Authorization") - fmt.Println("authorization is ", authorization) if len(authorization) == 0 { return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get Authorization", Status: "Failed"}, nil } else { @@ -139,8 +129,6 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr datasetName = md.Get("dataset")[0] } } - log.Printf("\nauthorization:%v", token) - log.Printf("\nTenantId:%v", tenantId) } var requestID types.RequestIDContextKey From 39408cdb7e05b4afe165b5ccd9330192392614c9 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 1 Feb 2023 10:04:03 +0530 Subject: [PATCH 279/351] update libtrace-go --- go.mod | 2 +- go.sum | 4 ++-- route/otlp_trace.go | 31 +++++++++++++++++++++++++------ 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index fc5cb7192b..898286334f 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b - github.com/opsramp/libtrace-go v0.0.0-20230125043249-3c7090a38201 + github.com/opsramp/libtrace-go v0.0.0-20230201042643-641737bf3d05 github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 848fdbea17..aea53f9b7b 100644 --- a/go.sum +++ b/go.sum @@ -587,8 +587,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b h1:UCPQdTGlfqS/Cs4BO7C8lRppKy23Ok8OyQlz5c6Nt+U= github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230125043249-3c7090a38201 h1:x+lUyWYj+gOmne8XVwZBOS1YRv+2ekezCGwh+XN5WeI= -github.com/opsramp/libtrace-go v0.0.0-20230125043249-3c7090a38201/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/libtrace-go v0.0.0-20230201042643-641737bf3d05 h1:yiAd0O0Mdx4/fQF7hTJ0dMkfCywlLF/ZgJyQn3QbJyI= +github.com/opsramp/libtrace-go v0.0.0-20230201042643-641737bf3d05/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 732d84e8b2..0199c87e1e 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -6,8 +6,8 @@ import ( "fmt" proxypb "github.com/opsramp/libtrace-go/proto/proxypb" "google.golang.org/grpc/metadata" - "log" "net/http" + "strings" "time" huskyotlp "github.com/opsramp/husky/otlp" @@ -113,7 +113,6 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr } md, ok := metadata.FromIncomingContext(ctx) if !ok { - log.Println("Failed to get metadata") return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get request metadata", Status: "Failed"}, nil } else { authorization := md.Get("Authorization") @@ -123,12 +122,20 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr token = authorization[0] recvdTenantId := md.Get("tenantId") if len(recvdTenantId) == 0 { - return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get TenantId", Status: "Failed"}, nil + tenantId = strings.TrimSpace(in.TenantId) + if tenantId == "" { + return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get TenantId", Status: "Failed"}, nil + } } else { tenantId = recvdTenantId[0] - datasetName = md.Get("dataset")[0] } } + + if dataSets := md.Get("dataset"); len(dataSets) > 0 { + datasetName = dataSets[0] + } else { + return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get dataset", Status: "Failed"}, nil + } } var requestID types.RequestIDContextKey @@ -136,10 +143,22 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr for _, item := range in.Items { layout := "2006-01-02 15:04:05.000000000 +0000 UTC" timestamp, err := time.Parse(layout, item.Timestamp) + if err != nil { + r.Logger.Error().Logf("failed to parse timestamp: %v", err) + continue + } var data map[string]interface{} - inrec, _ := json.Marshal(item.Data) - json.Unmarshal(inrec, &data) + inrec, err := json.Marshal(item.Data) + if err != nil { + r.Logger.Error().Logf("failed to marshal: %v", err) + continue + } + err = json.Unmarshal(inrec, &data) + if err != nil { + r.Logger.Error().Logf("failed to unmarshal: %v", err) + continue + } //Translate ResourceAttributes , SpanAttributes, EventAttributes from proto format to interface{} attributes := make(map[string]interface{}) From b533b3d45d23df05c0d65041f4a85aa3f745601e Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 1 Feb 2023 12:29:03 +0530 Subject: [PATCH 280/351] trace-proxy helm chart for cluster --- .../templates/_helpers.tpl | 76 +-- .../templates/deployment-redis.yaml | 49 ++ .../templates/deployment.yaml | 26 +- .../templates/k8s-config-cm.yaml | 515 +---------------- .../templates/k8s-rules-cm.yaml | 8 +- .../templates/service-redis.yaml | 18 + .../templates/service.yaml | 19 +- build/opsramp-tracing-proxy/values.yaml | 521 +++++++++++++----- 8 files changed, 503 insertions(+), 729 deletions(-) create mode 100644 build/opsramp-tracing-proxy/templates/deployment-redis.yaml create mode 100644 build/opsramp-tracing-proxy/templates/service-redis.yaml diff --git a/build/opsramp-tracing-proxy/templates/_helpers.tpl b/build/opsramp-tracing-proxy/templates/_helpers.tpl index bd9dc6a52b..9280a8fcf7 100644 --- a/build/opsramp-tracing-proxy/templates/_helpers.tpl +++ b/build/opsramp-tracing-proxy/templates/_helpers.tpl @@ -50,16 +50,6 @@ app.kubernetes.io/name: {{ include "opsramp-tracing-proxy.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} -{{/* -Service Defaults -*/}} -{{- define "serviceType" -}} -{{ if .Values.service }} {{ default "ClusterIP" .Values.service.type | quote }} {{ else }} "ClusterIP" {{ end }} -{{- end }} -{{- define "servicePort" -}} -{{ if .Values.service }} {{ default 9090 .Values.service.port }} {{ else }} 9090 {{ end }} -{{- end }} - {{/* Image Defaults */}} @@ -67,62 +57,22 @@ Image Defaults {{ if .Values.image }} {{ default "Always" .Values.image.pullPolicy | quote }} {{ else }} "Always" {{ end }} {{- end }} -{{/* -Config Defautls -*/}} -{{- define "opsrampApiServer" -}} -{{ if .Values.config }} {{ default "" .Values.config.api | quote }} {{ else }} "" {{ end }} -{{- end }} -{{- define "opsrampKey" -}} -{{ if .Values.config }} {{ default "" .Values.config.key | quote }} {{ else }} "" {{ end }} -{{- end }} -{{- define "opsrampSecret" -}} -{{ if .Values.config }} {{ default "" .Values.config.secret | quote }} {{ else }} "" {{ end }} -{{- end }} -{{- define "opsrampTenantId" -}} -{{ if .Values.config }} {{ default "" .Values.config.tenantId | quote }} {{ else }} "" {{ end }} -{{- end }} -{{- define "dataset" -}} -{{ if .Values.config }} {{ default "ds" .Values.config.dataset | quote }} {{ else }} "ds" {{ end }} -{{- end }} -{{- define "useTLS" -}} -{{ if .Values.config }} {{ default true .Values.config.useTls }} {{ else }} true {{ end }} -{{- end }} -{{- define "useTlsInsecure" -}} -{{ if .Values.config }} {{ default false .Values.config.useTlsInsecure }} {{ else }} false {{ end }} -{{- end }} -{{- define "sendMetricsToOpsRamp" -}} -{{ if .Values.config }} {{ default true .Values.config.sendMetricsToOpsRamp }} {{ else }} true {{ end }} -{{- end }} -{{- define "proxyProtocol" -}} -{{ if and .Values.config .Values.config.proxy }} {{ default "" $.Values.config.proxy.protocol | quote }} {{ else }} "" {{ end }} -{{- end }} -{{- define "proxyServer" -}} -{{ if and .Values.config .Values.config.proxy }} {{ default "" $.Values.config.proxy.server | quote }} {{ else }} "" {{ end }} -{{- end }} -{{- define "proxyPort" -}} -{{ if and .Values.config .Values.config.proxy }} {{ default 3128 $.Values.config.proxy.port }} {{ else }} 3128 {{ end }} -{{- end }} -{{- define "proxyUsername" -}} -{{ if and .Values.config .Values.config.proxy }} {{ default "" $.Values.config.proxy.username | quote }} {{ else }} "" {{ end }} -{{- end }} -{{- define "proxyPassword" -}} -{{ if and .Values.config .Values.config.proxy }} {{ default "" $.Values.config.proxy.password | quote }} {{ else }} "" {{ end }} -{{- end }} {{/* -Logging Defautls +Redis Defaults */}} -{{- define "logFormat" -}} -{{ if .Values.logging }} {{ default "json" .Values.logging.logFormat | quote }} {{ else }} "json" {{ end }} +{{- define "opsramp-tracing-proxy.redis.fullname" -}} +{{ include "opsramp-tracing-proxy.fullname" . }}-redis +{{- end }} +{{- define "opsramp-tracing-proxy.redis.labels" -}} +helm.sh/chart: {{ include "opsramp-tracing-proxy.chart" . }} +{{ include "opsramp-tracing-proxy.redis.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} -{{- define "logOutput" -}} -{{ if .Values.logging }} {{ default "stdout" .Values.logging.logOutput | quote }} {{ else }} "stdout" {{ end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} - -{{/* -Metrics Defaults -*/}} -{{- define "metricsList" -}} -{{ if .Values.metrics }} {{ default `[".*"]` .Values.metrics.list }} {{ else }} [".*"] {{ end }} +{{- define "opsramp-tracing-proxy.redis.selectorLabels" -}} +app.kubernetes.io/name: {{ include "opsramp-tracing-proxy.name" . }}-redis +app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/deployment-redis.yaml b/build/opsramp-tracing-proxy/templates/deployment-redis.yaml new file mode 100644 index 0000000000..925f537abd --- /dev/null +++ b/build/opsramp-tracing-proxy/templates/deployment-redis.yaml @@ -0,0 +1,49 @@ +{{- if .Values.redis.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "opsramp-tracing-proxy.redis.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "opsramp-tracing-proxy.redis.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "opsramp-tracing-proxy.redis.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "opsramp-tracing-proxy.redis.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: redis + image: "{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}" + imagePullPolicy: {{ .Values.redis.image.pullPolicy }} + ports: + - name: redis + containerPort: 6379 + protocol: TCP + {{- with .Values.redis.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.redis.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.redis.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/deployment.yaml b/build/opsramp-tracing-proxy/templates/deployment.yaml index f03dedf22d..99b1ec7200 100644 --- a/build/opsramp-tracing-proxy/templates/deployment.yaml +++ b/build/opsramp-tracing-proxy/templates/deployment.yaml @@ -26,21 +26,33 @@ spec: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{- include "imagePullPolicy" . }} + command: + - "/usr/bin/tracing-proxy" + - "-c" + - "/etc/tracing-proxy/config.yaml" + - "-r" + - "/etc/tracing-proxy/rules.yaml" ports: - - containerPort: {{- include "servicePort" . }} - name: tp-http + - name: data + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: peer + containerPort: {{ .Values.service.peerPort }} + protocol: TCP + - containerPort: {{ .Values.service.grpcPort }} + name: grpc {{- with .Values.resources }} resources: - {{- toYaml .Values.resources | nindent 12 }} + {{- toYaml . | nindent 12 }} {{- end }} volumeMounts: - name: {{ include "opsramp-tracing-proxy.fullname" . }}-rules - mountPath: /etc/tracing-proxy/rules.toml - subPath: rules.toml + mountPath: /etc/tracing-proxy/rules.yaml + subPath: rules.yaml readOnly: true - name: {{ include "opsramp-tracing-proxy.fullname" . }}-config - mountPath: /etc/tracing-proxy/config.toml - subPath: config.toml + mountPath: /etc/tracing-proxy/config.yaml + subPath: config.yaml readOnly: true volumes: - configMap: diff --git a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml index bfca4ee767..865274a11f 100644 --- a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml +++ b/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml @@ -5,516 +5,5 @@ metadata: labels: {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }} data: - config.toml: |- - ##################### - ## Refinery Config ## - ##################### - - # ListenAddr is the IP and port on which to listen for incoming events. Incoming - # traffic is expected to be HTTP, so if using SSL put something like nginx in - # front to do the decryption. - # Should be of the form 0.0.0.0:8080 - # Not eligible for live reload. - ListenAddr = "0.0.0.0:8080" - - # GRPCListenAddr is the IP and port on which to listen for incoming events over - # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put - # something like nginx in front to do the decryption. - # Should be of the form 0.0.0.0:9090 - # Not eligible for live reload. - GRPCListenAddr = "0.0.0.0:{{- include "servicePort" . | trim }}" - - # PeerListenAddr is the IP and port on which to listen for traffic being - # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL - # put something like nginx in front to do the decryption. Must be different from - # ListenAddr - # Should be of the form 0.0.0.0:8081 - # Not eligible for live reload. - PeerListenAddr = "0.0.0.0:8083" - GRPCPeerListenAddr = "0.0.0.0:8084" - - # ProxyProtocol accepts http and https - # Not Eligible for live reload. - ProxyProtocol = {{- include "proxyProtocol" . }} - - # ProxyServer takes the proxy server address - # Not Eligible for live reload. - ProxyServer = {{- include "proxyServer" . }} - - # ProxyPort takes the proxy server port - # Not Eligible for live reload. - ProxyPort = {{- include "proxyPort" . }} - - # ProxyUserName takes the proxy username - # Not Eligible for live reload. - ProxyUserName = {{- include "proxyUsername" . }} - - # ProxyPassword takes the proxy password - # Not Eligible for live reload. - ProxyPassword = {{- include "proxyPassword" . }} - - # CompressPeerCommunication determines whether refinery will compress span data - # it forwards to peers. If it costs money to transmit data between refinery - # instances (e.g. they're spread across AWS availability zones), then you - # almost certainly want compression enabled to reduce your bill. The option to - # disable it is provided as an escape hatch for deployments that value lower CPU - # utilization over data transfer costs. - CompressPeerCommunication = true - - # APIKeys is a list of Opsramp API keys that the proxy will accept. This list - # only applies to events - other Opsramp API actions will fall through to the - # upstream API directly. - # Adding keys here causes events arriving with API keys not in this list to be - # rejected with an HTTP 401 error If an API key that is a literal '*' is in the - # list, all API keys are accepted. - # Eligible for live reload. - APIKeys = [ - # "replace-me", - # "more-optional-keys", - "*", # wildcard accept all keys - ] - - # OpsrampAPI is the URL for the upstream Opsramp API. - # Eligible for live reload. - OpsrampAPI = {{- include "opsrampApiServer" . }} - - # OpsrampKey is used to get the OauthToken - OpsrampKey = {{- include "opsrampKey" . }} - - # OpsrampSecret is used to get the OauthToken - OpsrampSecret = {{- include "opsrampSecret" . }} - - # Traces are send to the client with given tenantid - TenantId = {{- include "opsrampTenantId" . }} - - # Dataset you want to use for sampling - Dataset = {{- include "dataset" . }} - - #Tls Options - UseTls = {{- include "useTLS" . }} - UseTlsInsecure = {{- include "useTlsInsecure" . }} - - # SendDelay is a short timer that will be triggered when a trace is complete. - # Refinery will wait this duration before actually sending the trace. The - # reason for this short delay is to allow for small network delays or clock - # jitters to elapse and any final spans to arrive before actually sending the - # trace. This supports duration strings with supplied units. Set to 0 for - # immediate sends. - # Eligible for live reload. - SendDelay = "2s" - - # BatchTimeout dictates how frequently to send unfulfilled batches. By default - # this will use the DefaultBatchTimeout in libhoney as its value, which is 100ms. - # Eligible for live reload. - BatchTimeout = "1s" - - # TraceTimeout is a long timer; it represents the outside boundary of how long - # to wait before sending an incomplete trace. Normally traces are sent when the - # root span arrives. Sometimes the root span never arrives (due to crashes or - # whatever), and this timer will send a trace even without having received the - # root span. If you have particularly long-lived traces you should increase this - # timer. This supports duration strings with supplied units. - # Eligible for live reload. - TraceTimeout = "60s" - - # MaxBatchSize is the number of events to be included in the batch for sending - MaxBatchSize = 500 - - # SendTicker is a short timer; it determines the duration to use to check for traces to send - SendTicker = "100ms" - - # LoggingLevel is the level above which we should log. Debug is very verbose, - # and should only be used in pre-production environments. Info is the - # recommended level. Valid options are "debug", "info", "error", and - # "panic" - # Not eligible for live reload. - LoggingLevel = "debug" - - # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use - # when buffering events that will be forwarded to peers or the upstream API. - UpstreamBufferSize = 10000 - PeerBufferSize = 10000 - - # DebugServiceAddr sets the IP and port the debug service will run on - # The debug service will only run if the command line flag -d is specified - # The debug service runs on the first open port between localhost:6060 and :6069 by default - # DebugServiceAddr = "localhost:8085" - - # AddHostMetadataToTrace determines whether or not to add information about - # the host that Refinery is running on to the spans that it processes. - # If enabled, information about the host will be added to each span with the - # prefix `meta.refinery.`. - # Currently the only value added is 'meta.refinery.local_hostname'. - # Not eligible for live reload - AddHostMetadataToTrace = false - - # EnvironmentCacheTTL is the amount of time a cache entry will live that associates - # an API key with an environment name. - # Cache misses lookup the environment name using HoneycombAPI config value. - # Default is 1 hour ("1h"). - # Not eligible for live reload. - EnvironmentCacheTTL = "1h" - - # QueryAuthToken, if specified, provides a token that must be specified with - # the header "X-Honeycomb-Refinery-Query" in order for a /query request to succeed. - # These /query requests are intended for debugging refinery installations and - # are not typically needed in normal operation. - # Can be specified in the environment as REFINERY_QUERY_AUTH_TOKEN. - # If left unspecified, the /query endpoints are inaccessible. - # Not eligible for live reload. - # QueryAuthToken = "some-random-value" - - # AddRuleReasonToTrace causes traces that are sent to Honeycomb to include the field `meta.refinery.reason`. - # This field contains text indicating which rule was evaluated that caused the trace to be included. - # Eligible for live reload. - # AddRuleReasonToTrace = true - - # AdditionalErrorFields should be a list of span fields that should be included when logging - # errors that happen during ingestion of events (for example, the span too large error). - # This is primarily useful in trying to track down misbehaving senders in a large installation. - # The fields `dataset`, `apihost`, and `environment` are always included. - # If a field is not present in the span, it will not be present in the error log. - # Default is ["trace.span_id"]. - # Eligible for live reload. - AdditionalErrorFields = [ - "trace.span_id" - ] - - # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate - # the number of child spans on the trace at the time the sampling decision was made. - # This value is available to the rules-based sampler, making it possible to write rules that - # are dependent upon the number of spans in the trace. - # Default is false. - # Eligible for live reload. - # AddSpanCountToRoot = true - - # CacheOverrunStrategy controls the cache management behavior under memory pressure. - # "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, - # which is generally not helpful unless it occurs because of a permanent change in traffic patterns. - # In the "impact" strategy, the items having the most impact on the cache size are - # ejected from the cache earlier than normal but the cache is not resized. - # In all cases, it only applies if MaxAlloc is nonzero. - # Default is "resize" for compatibility but "impact" is recommended for most installations. - # Eligible for live reload. - # CacheOverrunStrategy = "impact" - - # Metrics are sent to OpsRamp (The collection happens based on configuration specifie - # in OpsRampMetrics and only works when the Metrics is set to "prometheus") - SendMetricsToOpsRamp = {{- include "sendMetricsToOpsRamp" . }} - - ############################ - ## Implementation Choices ## - ############################ - - # Each of the config options below chooses an implementation of a Refinery - # component to use. Depending on the choice there may be more configuration - # required below in the section for that choice. Changing implementation choices - # requires a process restart; these changes will not be picked up by a live - # config reload. (Individual config options for a given implementation may be - # eligible for live reload). - - # Collector describes which collector to use for collecting traces. The only - # current valid option is "InMemCollector".. More can be added by adding - # implementations of the Collector interface. - Collector = "InMemCollector" - - ######################### - ## Peer Management ## - ######################### - - [PeerManagement] - Type = "file" - # Peers is the list of all servers participating in this proxy cluster. Events - # will be sharded evenly across all peers based on the Trace ID. Values here - # should be the base URL used to access the peer, and should include scheme, - # hostname (or ip address) and port. All servers in the cluster should be in - # this list, including this host. - Peers = [ - "http://127.0.0.1:8084", #only grpc peer listener used - # "http://127.0.0.1:8083", - # "http://10.1.2.3.4:8080", - # "http://refinery-1231:8080", - # "http://peer-3.fqdn" // assumes port 80 - ] - - # [PeerManagement] - # Type = "redis" - # RedisHost is is used to connect to redis for peer cluster membership management. - # Further, if the environment variable 'REFINERY_REDIS_HOST' is set it takes - # precedence and this value is ignored. - # Not eligible for live reload. - # RedisHost = "localhost:6379" - - # RedisUsername is the username used to connect to redis for peer cluster membership management. - # If the environment variable 'REFINERY_REDIS_USERNAME' is set it takes - # precedence and this value is ignored. - # Not eligible for live reload. - # RedisUsername = "" - - # RedisPassword is the password used to connect to redis for peer cluster membership management. - # If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes - # precedence and this value is ignored. - # Not eligible for live reload. - # RedisPassword = "" - - # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. - # Not eligible for live reload. - # UseTLS = false - - # UseTLSInsecure disables certificate checks - # Not eligible for live reload. - # UseTLSInsecure = false - - # IdentifierInterfaceName is optional. By default, when using RedisHost, Refinery will use - # the local hostname to identify itself to other peers in Redis. If your environment - # requires that you use IPs as identifiers (for example, if peers can't resolve eachother - # by name), you can specify the network interface that Refinery is listening on here. - # Refinery will use the first unicast address that it finds on the specified network - # interface as its identifier. - # Not eligible for live reload. - # IdentifierInterfaceName = "eth0" - - # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Refinery will default to the first - # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use - # the first IPV6 unicast address found. - # UseIPV6Identifier = false - - # RedisIdentifier is optional. By default, when using RedisHost, Refinery will use - # the local hostname to id788714cd-a17a-4d7e-9bac-c35131f4bcc2entify itself to other peers in Redis. If your environment - # requires that you use IPs as identifiers (for example, if peers can't resolve eachother - # by name), you can specify the exact identifier (IP address, etc) to use here. - # Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. - # RedisIdentifier = "192.168.1.1" - - # Timeout is optional. By default, when using RedisHost, Refinery will timeout - # after 5s when communicating with Redis. - # Timeout = "5s" - - # Strategy controls the way that traces are assigned to refinery nodes. - # The "legacy" strategy uses a simple algorithm that unfortunately causes - # 1/2 of the in-flight traces to be assigned to a different node whenever the - # number of nodes changes. - # The legacy strategy is deprecated and is intended to be removed in a future release. - # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the - # number of nodes) are disrupted when the node count changes. - # Not eligible for live reload. - # Strategy = "hash" - - ######################### - ## In-Memory Collector ## - ######################### - - # InMemCollector brings together all the settings that are relevant to - # collecting spans together to make traces. - [InMemCollector] - - # The collection cache is used to collect all spans into a trace as well as - # remember the sampling decision for any spans that might come in after the - # trace has been marked "complete" (either by timing out or seeing the root - # span). The number of traces in the cache should be many multiples (100x to - # 1000x) of the total number of concurrently active traces (trace throughput * - # trace duration). - # Eligible for live reload. Growing the cache capacity with a live config reload - # is fine. Avoid shrinking it with a live reload (you can, but it may cause - # temporary odd sampling decisions). - CacheCapacity = 1000 - - # MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are - # supported. - # If set to a non-zero value, once per tick (see SendTicker) the collector - # will compare total allocated bytes to this value. If allocation is too - # high, cache capacity will be adjusted according to the setting for - # CacheOverrunStrategy. - # Useful values for this setting are generally in the range of 75%-90% of - # available system memory. - MaxAlloc = 0 - - ################### - ## Logrus Logger ## - ################### - - # LogrusLogger is a section of the config only used if you are using the - # LogrusLogger to send all logs to STDOUT using the logrus package. If you are - # using a different logger (eg Opsramp logger) you can leave all this - # commented out. - [LogrusLogger] - - # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] - LogFormatter = {{- include "logFormat" . }} - - # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] - LogOutput = {{- include "logOutput" . }} - - ## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" - [LogrusLogger.File] - - # FileName specifies the location where the logs are supposed be stored - FileName = "/var/log/opsramp/tracing-proxy.log" - - # MaxSize is the maximum size in megabytes of the log file before it gets rotated. - MaxSize = 1 - - # MaxBackups is the maximum number of old log files to retain. - MaxBackups = 3 - - # Compress determines if the rotated log files should be compressed - # using gzip. - Compress = true - - - ####################### - ## Prometheus Metrics ## - ####################### - - [OpsRampMetrics] - # MetricsListenAddr determines the interface and port on which Prometheus will - # listen for requests for /metrics. Must be different from the main Refinery - # listener. - # Not eligible for live reload. - MetricsListenAddr = "localhost:2112" - - # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. - # Not Eligible for live reload. - OpsRampMetricsAPI = {{- include "opsrampApiServer" . }} - - # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. - # Not Eligible for live reload. - OpsRampTenantID = {{- include "opsrampTenantId" . }} - - # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. - # This is separate from the APIKeys used to authenticate regular - # traffic. - # Not Eligible for live reload. - OpsRampMetricsAPIKey = {{- include "opsrampKey" . }} - - # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. - # This is separate from the APISecret used to authenticate regular - # traffic. - # Not Eligible for live reload. - OpsRampMetricsAPISecret = {{- include "opsrampSecret" . }} - - # OpsRampMetricsReportingInterval is frequency specified in seconds at which - # the metrics are collected and sent to OpsRamp - # Not Eligible for live reload. - OpsRampMetricsReportingInterval = 10 - - # OpsRampMetricsRetryCount is the number of times we retry incase the send fails - # Not Eligible for live reload. - OpsRampMetricsRetryCount = 2 - - # ProxyProtocol accepts http and https - # Not Eligible for live reload. - ProxyProtocol = {{- include "proxyProtocol" . }} - - # ProxyServer takes the proxy server address - # Not Eligible for live reload. - ProxyServer = {{- include "proxyServer" . }} - - # ProxyPort takes the proxy server port - # Not Eligible for live reload. - ProxyPort = {{- include "proxyPort" . }} - - # ProxyUserName takes the proxy username - # Not Eligible for live reload. - ProxyUserName = {{- include "proxyUsername" . }} - - # ProxyPassword takes the proxy password - # Not Eligible for live reload. - ProxyPassword = {{- include "proxyPassword" . }} - - # OpsRampMetricsList is a list of regular expressions which match the metric - # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. - # Internally all the regex in the list are concatinated using '|' to make the computation little faster. - # Not Eligible for live reload - OpsRampMetricsList = {{- include "metricsList" . }} - - - [GRPCServerParameters] - - # MaxConnectionIdle is a duration for the amount of time after which an - # idle connection would be closed by sending a GoAway. Idleness duration is - # defined since the most recent time the number of outstanding RPCs became - # zero or the connection establishment. - # 0s sets duration to infinity which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 - # Not eligible for live reload. - # MaxConnectionIdle = "1m" - - # MaxConnectionAge is a duration for the maximum amount of time a - # connection may exist before it will be closed by sending a GoAway. A - # random jitter of +/-10% will be added to MaxConnectionAge to spread out - # connection storms. - # 0s sets duration to infinity which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 - # Not eligible for live reload. - # MaxConnectionAge = "0s" - - # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after - # which the connection will be forcibly closed. - # 0s sets duration to infinity which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 - # Not eligible for live reload. - # MaxConnectionAgeGrace = "0s" - - # After a duration of this time if the server doesn't see any activity it - # pings the client to see if the transport is still alive. - # If set below 1s, a minimum value of 1s will be used instead. - # 0s sets duration to 2 hours which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 - # Not eligible for live reload. - # Time = "10s" - - # After having pinged for keepalive check, the server waits for a duration - # of Timeout and if no activity is seen even after that the connection is - # closed. - # 0s sets duration to 20 seconds which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 - # Not eligible for live reload. - # Timeout = "2s" - - - - ################################ - ## Sample Cache Configuration ## - ################################ - - # Sample Cache Configuration controls the sample cache used to retain information about trace - # status after the sampling decision has been made. - - [SampleCacheConfig] - - # Type controls the type of sample cache used. - # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is - # 5x the size of the trace cache. This is Refinery's original sample cache strategy. - # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember - # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. - # It is also more configurable. The cuckoo filter is recommended for most installations. - # Default is "legacy". - # Not eligible for live reload (you cannot change the type of cache with reload). - # Type = "cuckoo" - - # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. - # Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some - # statistical information. This is most useful in cases where the trace was sent before sending - # the root span, so that the root span can be decorated with accurate metadata. - # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). - # Does not apply to the "legacy" type of cache. - # Eligible for live reload. - # KeptSize = 10_000 - - # DroppedSize controls the size of the cuckoo dropped traces cache. - # This cache consumes 4-6 bytes per trace at a scale of millions of traces. - # Changing its size with live reload sets a future limit, but does not have an immediate effect. - # Default is 1_000_000 traces. - # Does not apply to the "legacy" type of cache. - # Eligible for live reload. - # DroppedSize = 1_000_000 - - # SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates - # the remaining capacity of its dropped traces cache and possibly cycles it. - # This cache is quite resilient so it doesn't need to happen very often, but the - # operation is also inexpensive. - # Default is 10 seconds. - # Does not apply to the "legacy" type of cache. - # Eligible for live reload. - # SizeCheckInterval = "10s" \ No newline at end of file + config.yaml: |- + {{- tpl (toYaml .Values.config) . | nindent 4 }} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml b/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml index a5ad1d7294..584cf7b035 100644 --- a/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml +++ b/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml @@ -5,9 +5,5 @@ metadata: labels: {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }} data: - rules.toml: |- - {{- with .Values.rules }} - {{ . | nindent 4 }} - {{ else }} - {{- include "rulesTOML" | nindent 4}} - {{- end }} + rules.yaml: |- + {{- toYaml .Values.rules | nindent 4 }} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/service-redis.yaml b/build/opsramp-tracing-proxy/templates/service-redis.yaml new file mode 100644 index 0000000000..835a3bd372 --- /dev/null +++ b/build/opsramp-tracing-proxy/templates/service-redis.yaml @@ -0,0 +1,18 @@ +{{- if .Values.redis.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opsramp-tracing-proxy.redis.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "opsramp-tracing-proxy.redis.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - name: tcp-redis + port: 6379 + protocol: TCP + targetPort: redis + selector: + {{- include "opsramp-tracing-proxy.redis.selectorLabels" . | nindent 4 }} +{{- end}} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/service.yaml b/build/opsramp-tracing-proxy/templates/service.yaml index 0c901e1ed3..04de2144d6 100644 --- a/build/opsramp-tracing-proxy/templates/service.yaml +++ b/build/opsramp-tracing-proxy/templates/service.yaml @@ -2,14 +2,23 @@ apiVersion: v1 kind: Service metadata: name: {{ include "opsramp-tracing-proxy.fullname" . }} + namespace: {{ .Release.Namespace }} labels: {{- include "opsramp-tracing-proxy.labels" . | nindent 4 }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} spec: - type: {{- include "serviceType" . }} + type: {{ .Values.service.type }} ports: - - port: {{- include "servicePort" . }} - targetPort: {{- include "servicePort" . }} + - port: {{ .Values.service.grpcPort }} + targetPort: grpc protocol: TCP - name: tp-http + name: grpc + - port: {{ .Values.service.port }} + targetPort: data + protocol: TCP + name: data selector: - {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 4 }} + {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index 119dbc1349..ecde9b21d4 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -1,147 +1,398 @@ -replicaCount: 1 -podAnnotations: {} -imagePullSecrets: [] +# use replicaCount to specify the size of the trace-proxy cluster +replicaCount: 3 + +# configure the cpu and memory limits for each node in the cluster +#resources: +# limits: +# cpu: "2000m" +# memory: "4Gi" +# requests: +# cpu: "500m" +# memory: "1Gi" + image: repository: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy pullPolicy: Always # use "IfNotPresent" to avoid pulling the image every time tag: "latest" # if empty, then defaults to the chart appVersion. + +podAnnotations: { } +imagePullSecrets: [ ] +nameOverride: "" +fullnameOverride: "" + service: type: ClusterIP - port: 9090 + port: 80 + grpcPort: 9090 + peerPort: 8081 + annotations: { } -# resources: -# limits: -# cpu: "4" -# memory: "8096Mi" -# requests: -# cpu: "2" -# memory: "2048Mi" +config: + # ListenAddr is the IP and port on which to listen for incoming events. + ListenAddr: 0.0.0.0:8080 -nodeSelector: {} + # GRPCListenAddr is the IP and port on which to listen for incoming events over gRPC. + GRPCListenAddr: 0.0.0.0:9090 -# Trace Proxy config file values -config: - api: "" - key: "" - secret: "" - tenantId: "" - dataset: "ds" - useTls: true + # PeerListenAddr is the IP and port on which to listen for traffic being rerouted from a peer. + PeerListenAddr: 0.0.0.0:8081 + + GRPCPeerListenAddr: 0.0.0.0:8084 + + # ProxyProtocol accepts http and https + # Not Eligible for live reload. + ProxyProtocol: "" + # ProxyServer takes the proxy server address + # Not Eligible for live reload. + ProxyServer: "" + # ProxyPort takes the proxy server port + # Not Eligible for live reload. + ProxyPort: 3128 + # ProxyUserName takes the proxy username + # Not Eligible for live reload. + ProxyUserName: "" + # ProxyPassword takes the proxy password + # Not Eligible for live reload. + ProxyPassword: "" + + # CompressPeerCommunication determines whether trace-proxy will compress span data + # it forwards to peers. If it costs money to transmit data between refinery + # instances (e.g. they're spread across AWS availability zones), then you + # almost certainly want compression enabled to reduce your bill. The option to + # disable it is provided as an escape hatch for deployments that value lower CPU + # utilization over data transfer costs. + CompressPeerCommunication: true + + # APIKeys is a list of OpsRamp API keys that the proxy will accept. This list + # only applies to events - other OpsRamp API actions will fall through to the + # upstream API directly. + # Adding keys here causes events arriving with API keys not in this list to be + # rejected with an HTTP 401 error If an API key that is a literal '*' is in the + # list, all API keys are accepted. + # Eligible for live reload. + APIKeys: [ "*" ]# wildcard accepts all keys + + # OpsrampAPI is the URL for the upstream Opsramp API. + # Eligible for live reload. + OpsrampAPI: "" + # OpsrampKey is used to get the OauthToken + OpsrampKey: "" + # OpsrampSecret is used to get the OauthToken + OpsrampSecret: "" + # Traces are sent to the client with the given tenantId + TenantId: "" + # Dataset you want to use for sampling + Dataset: "ds" + #Tls Options + UseTls: true UseTlsInsecure: false - sendMetricsToOpsRamp: true - proxy: - protocol: "" - server: "" - port: "" - username: "" - password: "" - -logging: - logFormat: "json" # Accepted values are one of ["logfmt", "json"] - logOutput: "stdout" # Accepted values are one of["stdout", "stderr"] - -metrics: - list: ['".*"'] # escape the " with ' while specifying values - -rules: |- - SampleRate = 1 - - [dataset1] - Sampler = "DynamicSampler" - SampleRate = 2 - FieldList = ["request.method","response.status_code"] - UseTraceLength = true - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - ClearFrequencySec = 60 - - [dataset2] - Sampler = "EMADynamicSampler" - GoalSampleRate = 2 - FieldList = ["request.method","response.status_code"] - UseTraceLength = true - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - AdjustmentInterval = 15 - Weight = 0.5 - MaxKeys = 0 - AgeOutValue = 0.5 - BurstMultiple = 2.0 - BurstDetectionDelay = 3 - - [dataset3] - Sampler = "DeterministicSampler" - SampleRate = 10 - - [dataset4] - Sampler = "RulesBasedSampler" - CheckNestedFields = false - - [[dataset4.rule]] - name = "drop healthchecks" - drop = true - [[dataset4.rule.condition]] - field = "http.route" - operator = "=" - value = "/health-check" - - [[dataset4.rule]] - name = "keep slow 500 errors" - SampleRate = 1 - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 500 - [[dataset4.rule.condition]] - field = "duration_ms" - operator = ">=" - value = 1000.789 - - [[dataset4.rule]] - name = "dynamically sample 200 responses" - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 200 - [dataset4.rule.sampler.EMADynamicSampler] - Sampler = "EMADynamicSampler" - GoalSampleRate = 15 - FieldList = ["request.method", "request.route"] - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - [[dataset4.rule]] - name = "dynamically sample 200 string responses" - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = "200" - [dataset4.rule.sampler.EMADynamicSampler] - Sampler = "EMADynamicSampler" - GoalSampleRate = 15 - FieldList = ["request.method", "request.route"] - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - [[dataset4.rule]] - name = "sample traces originating from a service" - Scope = "span" - SampleRate = 5 - [[dataset4.rule.condition]] - field = "service name" - operator = "=" - value = "users" - [[dataset4.rule.condition]] - field = "meta.span_type" - operator = "=" - value = "root" - - [[dataset4.rule]] - SampleRate = 10 # default when no rules match, if missing defaults to 10 - - [dataset5] - - Sampler = "TotalThroughputSampler" - GoalThroughputPerSec = 100 - FieldList = "[request.method]" + + # LoggingLevel valid options are "debug", "info", "error", and "panic". + LoggingLevel: error + + # SendDelay is a short timer that will be triggered when a trace is complete. + # Trace Proxy will wait for this duration before actually sending the trace. The + # reason for this short delay is to allow for small network delays or clock + # jitters to elapse and any final spans to arrive before actually sending the + # trace. This supports duration strings with supplied units. Set to 0 for + # immediate sends. + SendDelay: 2s + + # BatchTimeout dictates how frequently to send unfulfilled batches. By default + # this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms. + # Eligible for live reload. + BatchTimeout: 1s + + # TraceTimeout is a long timer; it represents the outside boundary of how long + # to wait before sending an incomplete trace. Normally traces are sent when the + # root span arrives. Sometimes the root span never arrives (due to crashes or + # whatever), and this timer will send a trace even without having received the + # root span. If you have particularly long-lived traces you should increase this + # timer. This supports duration strings with supplied units. + TraceTimeout: 60s + + # MaxBatchSize is the number of events to be included in the batch for sending + MaxBatchSize: 500 + + # SendTicker is a short timer; it determines the duration to use to check for traces to send + SendTicker: 100ms + + # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use + # when buffering events that will be forwarded to peers or the upstream API. + UpstreamBufferSize: 1000 + PeerBufferSize: 1000 + + # EnvironmentCacheTTL is the amount of time a cache entry will live that associates + # an API key with an environment name. + # Cache misses lookup the environment name using HoneycombAPI config value. + # Default is 1 hour ("1h"). + # Not eligible for live reload. + EnvironmentCacheTTL: "1h" + + # QueryAuthToken, if specified, provides a token that must be specified with + # the header "X-Honeycomb-Refinery-Query" in order for a /query request to succeed. + # These /query requests are intended for debugging refinery installations and + # are not typically needed in normal operation. + # Can be specified in the environment as REFINERY_QUERY_AUTH_TOKEN. + # If left unspecified, the /query endpoints are inaccessible. + # Not eligible for live reload. + # QueryAuthToken: "some-random-value" + + # AddRuleReasonToTrace causes traces that are sent to Honeycomb to include the field `meta.refinery.reason`. + # This field contains text indicating which rule was evaluated that caused the trace to be included. + # Eligible for live reload. + AddRuleReasonToTrace: true + + # AdditionalErrorFields should be a list of span fields that should be included when logging + # errors that happen during ingestion of events (for example, the span too large error). + # This is primarily useful in trying to track down misbehaving senders in a large installation. + # The fields `dataset`, `apihost`, and `environment` are always included. + # If a field is not present in the span, it will not be present in the error log. + # Default is ["trace.span_id"]. + # Eligible for live reload. + AdditionalErrorFields: + - trace.span_id + + # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate + # the number of child spans on the trace at the time the sampling decision was made. + # This value is available to the rules-based sampler, making it possible to write rules that + # are dependent upon the number of spans in the trace. + # Default is false. + # Eligible for live reload. + AddSpanCountToRoot: false + + # CacheOverrunStrategy controls the cache management behavior under memory pressure. + # "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, + # which is generally not helpful unless it occurs because of a permanent change in traffic patterns. + # In the "impact" strategy, the items having the most impact on the cache size are + # ejected from the cache earlier than normal but the cache is not resized. + # In all cases, it only applies if MaxAlloc is nonzero. + # Default is "resize" for compatibility but "impact" is recommended for most installations. + # Eligible for live reload. + CacheOverrunStrategy: "impact" + + # Metrics are sent to OpsRamp (The collection happens based on configuration specifie + # in OpsRampMetrics and only works when the Metrics is set to "prometheus") + SendMetricsToOpsRamp: false + + # Configure how Refinery peers are discovered and managed + PeerManagement: + Strategy: "hash" # Always use hash for balanced distribution of traces + + # The type should always be redis when deployed to Kubernetes environments + Type: "redis" + + # RedisHost is used to connect to redis for peer cluster membership management. + # Further, if the environment variable 'REFINERY_REDIS_HOST' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + # RedisHost will default to the name used for the release or name overrides depending on what is used, + # but can be overriden to a specific value. + RedisHost: '{{include "opsramp-tracing-proxy.redis.fullname" .}}:6379' + + # RedisUsername is the username used to connect to redis for peer cluster membership management. + # If the environment variable 'REFINERY_REDIS_USERNAME' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + RedisUsername: "" + + # RedisPassword is the password used to connect to redis for peer cluster membership management. + # If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + RedisPassword: "" + + # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + # Not eligible for live reload. + UseTLS: false + + # UseTLSInsecure disables certificate checks + # Not eligible for live reload. + UseTLSInsecure: false + + # IdentifierInterfaceName is optional. + # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. + # When configured the pod's IP will be used in the peer list + IdentifierInterfaceName: eth0 + + # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first + # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + # the first IPV6 unicast address found. + UseIPV6Identifier: false + + ############################ + ## Implementation Choices ## + ############################ + # Each of the config options below chooses an implementation of a Refinery + # component to use. Depending on the choice there may be more configuration + # required below in the section for that choice. Changing implementation choices + # requires a process restart; these changes will not be picked up by a live + # config reload. (Individual config options for a given implementation may be + # eligible for live reload). + # Collector describes which collector to use for collecting traces. The only + # current valid option is "InMemCollector".. More can be added by adding + # implementations of the Collector interface. + Collector: "InMemCollector" + + # InMemCollector brings together all the settings that are relevant to + # collecting spans together to make traces. + InMemCollector: + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + CacheCapacity: 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. Using 80% is the recommended. + # This value should be set in according to the resources.limits.memory + # By default that setting is 4GB, and this is set to 85% of that limit + # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 + # MaxAlloc: 3435973836 + MaxAlloc: 0 + + # LogrusLogger is a section of the config only used if you are using the + # LogrusLogger to send all logs to STDOUT using the logrus package. + LogrusLogger: + # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] + LogFormatter: 'json' + # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"] + LogOutput: 'stdout' + + OpsRampMetrics: + # MetricsListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Refinery + # listener. + # Not eligible for live reload. + MetricsListenAddr: 'localhost:2112' + + # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. + # Not Eligible for live reload. + OpsRampMetricsAPI: '' + + # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. + OpsRampTenantID: '' + + # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. + # This is separate from the APIKeys used to authenticate regular + # traffic. + # Not Eligible for live reload. + OpsRampMetricsAPIKey: '' + + # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. + # This is separate from the APISecret used to authenticate regular + # traffic. + # Not Eligible for live reload. + OpsRampMetricsAPISecret: '' + + # OpsRampMetricsReportingInterval is frequency specified in seconds at which + # the metrics are collected and sent to OpsRamp + # Not Eligible for live reload. + OpsRampMetricsReportingInterval: 10 + + # OpsRampMetricsRetryCount is the number of times we retry incase the send fails + # Not Eligible for live reload. + OpsRampMetricsRetryCount: 2 + + # ProxyProtocol accepts http and https + # Not Eligible for live reload. + ProxyProtocol: '' + + # ProxyServer takes the proxy server address + # Not Eligible for live reload. + ProxyServer: '' + + # ProxyPort takes the proxy server port + # Not Eligible for live reload. + ProxyPort: 3128 + + # ProxyUserName takes the proxy username + # Not Eligible for live reload. + ProxyUserName: '' + + # ProxyPassword takes the proxy password + # Not Eligible for live reload. + ProxyPassword: '' + + # OpsRampMetricsList is a list of regular expressions which match the metric + # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. + # Internally all the regex in the list are concatinated using '|' to make the computation little faster. + # Not Eligible for live reload + OpsRampMetricsList: [ ".*" ] + + +rules: + # DryRun - If enabled, marks traces that would be dropped given current sampling rules, + # and sends all traces regardless + DryRun: true + # lb: + # This is the default sampler used. + # Any traces received that are not for a defined dataset will use this sampler. + # Deterministic Sampler implementation. This is the simplest sampling algorithm + # - it is a static sample rate, choosing traces randomly to either keep or send + # (at the appropriate rate). It is not influenced by the contents of the trace. + Sampler: DeterministicSampler + + # SampleRate is the rate at which to sample. It indicates a ratio, where one + # sample trace is kept for every n traces seen. For example, a SampleRate of 30 + # will keep 1 out of every 30 traces. + SampleRate: 1 + + ## Dataset sampling rules ## + # Specify dataset rules by creating an object for each dataset + # Note: If your dataset name contains a space, you will have to escape the dataset name + # using single quotes, such as "dataset 1": + # + # This example creates a sampling definition for a dataset called: test-dataset + # test-dataset: + # Sampler: EMADynamicSampler + # GoalSampleRate: 5 + # FieldList: + # - request.method + # - response.status_code + + # LiveReload - If disabled, triggers a rolling restart of the cluster whenever + # the Rules configmap changes + LiveReload: true + + +# Redis configuration +redis: + # To install a simple single pod Redis deployment set this to true. + # If false, you must specify a value for existingHost + # For production, it is recommended to set this to false and provide + # a highly available Redis configuration using redis.existingHost + enabled: true + + # If redis.enabled is false this needs to be specified. + # This needs to be the name:port of a Redis configuration + # existingHost: + + # If redis.enabled is true, this the image that will be used to create + # the Redis deployment + image: + repository: redis + tag: 6.2.5 + pullPolicy: IfNotPresent + + # Node selector specific to installed Redis configuration. Requires redis.enabled to be true + nodeSelector: { } + + # Tolerations specific to installed Redis configuration. Requires redis.enabled to be true + tolerations: [ ] + + # Affinity specific to installed Redis configuration. Requires redis.enabled to be true + affinity: { } + + +nodeSelector: { } + From 435b928a32d50b6e9da92b8ae9d7a7720cf4fec1 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 1 Feb 2023 12:48:00 +0530 Subject: [PATCH 281/351] setting a standard time format of RFC3339Nano --- go.mod | 2 +- go.sum | 4 ++-- route/otlp_trace.go | 3 +-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 898286334f..18cac3b8cd 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b - github.com/opsramp/libtrace-go v0.0.0-20230201042643-641737bf3d05 + github.com/opsramp/libtrace-go v0.0.0-20230201071021-120f60f5e6db github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index aea53f9b7b..8e8c6d00c3 100644 --- a/go.sum +++ b/go.sum @@ -587,8 +587,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b h1:UCPQdTGlfqS/Cs4BO7C8lRppKy23Ok8OyQlz5c6Nt+U= github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230201042643-641737bf3d05 h1:yiAd0O0Mdx4/fQF7hTJ0dMkfCywlLF/ZgJyQn3QbJyI= -github.com/opsramp/libtrace-go v0.0.0-20230201042643-641737bf3d05/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/libtrace-go v0.0.0-20230201071021-120f60f5e6db h1:Mr6PCQUs/pwLWT59h7jFyGS6S4mLvSgHw/EO86ZPk7Y= +github.com/opsramp/libtrace-go v0.0.0-20230201071021-120f60f5e6db/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 0199c87e1e..a8ce8b21fe 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -141,8 +141,7 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr var requestID types.RequestIDContextKey for _, item := range in.Items { - layout := "2006-01-02 15:04:05.000000000 +0000 UTC" - timestamp, err := time.Parse(layout, item.Timestamp) + timestamp, err := time.Parse(time.RFC3339Nano, item.Timestamp) if err != nil { r.Logger.Error().Logf("failed to parse timestamp: %v", err) continue From 46d2c7d68caa2ad97500bfca49d2e6b67795daf0 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 1 Feb 2023 13:07:35 +0530 Subject: [PATCH 282/351] parsing url to check if its proper before returning --- cmd/tracing-proxy/main.go | 5 ++++- config/file_config.go | 8 +++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 4a6b9b190a..f39e67b4d7 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -136,7 +136,10 @@ func main() { opsrampkey, _ := c.GetOpsrampKey() opsrampsecret, _ := c.GetOpsrampSecret() - opsrampapi, _ := c.GetOpsrampAPI() + opsrampapi, err := c.GetOpsrampAPI() + if err != nil { + logrusLogger.Fatal(err) + } userAgentAddition := "tracing-proxy/" + version upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{ diff --git a/config/file_config.go b/config/file_config.go index 3cfbefb52a..2162f977eb 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net" + "net/url" "os" "strings" "sync" @@ -566,7 +567,12 @@ func (f *fileConfig) GetOpsrampAPI() (string, error) { f.mux.RLock() defer f.mux.RUnlock() - return f.conf.OpsrampAPI, nil + u, err := url.Parse(f.conf.OpsrampAPI) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s://%s", u.Scheme, u.Hostname()), nil } func (f *fileConfig) GetOpsrampKey() (string, error) { From 9f288ccad12d13f298b494a64c8baedfa89a751d Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 1 Feb 2023 17:11:04 +0530 Subject: [PATCH 283/351] logrus caller fix --- logger/logrus.go | 61 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/logger/logrus.go b/logger/logrus.go index 173151b1a0..67faeaf996 100644 --- a/logger/logrus.go +++ b/logger/logrus.go @@ -1,10 +1,15 @@ package logger import ( + "fmt" "github.com/opsramp/tracing-proxy/config" "github.com/sirupsen/logrus" "gopkg.in/natefinch/lumberjack.v2" "os" + "path" + "runtime" + "strings" + "sync" ) // LogrusLogger is a Logger implementation that sends all logs to stdout using @@ -23,7 +28,8 @@ type LogrusEntry struct { func (l *LogrusLogger) Start() error { l.logger.SetLevel(l.level) - l.logger.SetReportCaller(true) + l.logger.SetReportCaller(false) // using a hook to do the same, so avoiding additional processing here + l.logger.AddHook(&CallerHook{}) logrusConfig, err := l.Config.GetLogrusConfig() if err != nil { @@ -208,3 +214,56 @@ func (l *LogrusEntry) Logf(f string, args ...interface{}) { l.entry.Errorf(f, args...) } } + +var ( + callerInitOnce sync.Once + presentProjectRoot string +) + +type CallerHook struct { +} + +func (h *CallerHook) Fire(entry *logrus.Entry) error { + functionName, fileName := h.caller() + if fileName != "" { + entry.Data[logrus.FieldKeyFile] = fileName + } + if functionName != "" { + entry.Data[logrus.FieldKeyFunc] = functionName + } + + return nil +} + +func (h *CallerHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} + +func (h *CallerHook) caller() (function string, file string) { + callerInitOnce.Do(func() { + presentProjectRoot, _ = os.Getwd() + presentProjectRoot = path.Join(presentProjectRoot, "../") + }) + + pcs := make([]uintptr, 25) + _ = runtime.Callers(0, pcs) + frames := runtime.CallersFrames(pcs) + + for next, again := frames.Next(); again; next, again = frames.Next() { + if !strings.Contains(next.File, "/usr/local/go/") && + !strings.Contains(next.File, "logger") && + !strings.Contains(next.File, "logrus") && + strings.HasPrefix(next.File, presentProjectRoot) { + return next.Function, fmt.Sprintf("%s:%d", strings.TrimPrefix(next.File, presentProjectRoot), next.Line) + } + } + + return +} From 8225ee12137fd6b66ecda06130f4bfd57575f8d2 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Thu, 2 Feb 2023 09:54:20 +0530 Subject: [PATCH 284/351] updating husky and libtrace-go deps --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 18cac3b8cd..d303090a66 100644 --- a/go.mod +++ b/go.mod @@ -19,8 +19,8 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 - github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b - github.com/opsramp/libtrace-go v0.0.0-20230201071021-120f60f5e6db + github.com/opsramp/husky v0.0.0-20230202041143-752f307df09e + github.com/opsramp/libtrace-go v0.0.0-20230202041014-73d98b9c4b7c github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 8e8c6d00c3..b13f48f459 100644 --- a/go.sum +++ b/go.sum @@ -585,10 +585,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b h1:UCPQdTGlfqS/Cs4BO7C8lRppKy23Ok8OyQlz5c6Nt+U= -github.com/opsramp/husky v0.0.0-20230125043214-9eaefd645d5b/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230201071021-120f60f5e6db h1:Mr6PCQUs/pwLWT59h7jFyGS6S4mLvSgHw/EO86ZPk7Y= -github.com/opsramp/libtrace-go v0.0.0-20230201071021-120f60f5e6db/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/husky v0.0.0-20230202041143-752f307df09e h1:o4MyRmiFnLsWPfynxvl21bKofcaMxKpLsvwriAlnZKk= +github.com/opsramp/husky v0.0.0-20230202041143-752f307df09e/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= +github.com/opsramp/libtrace-go v0.0.0-20230202041014-73d98b9c4b7c h1:Mmydh9H0k2SEhHKCbwKXNfKyRCX37u2oOqgb5WV0weU= +github.com/opsramp/libtrace-go v0.0.0-20230202041014-73d98b9c4b7c/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= From f56eea8371e4b249eba3707aa43ef95eb43a3802 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 20 Feb 2023 14:12:18 +0530 Subject: [PATCH 285/351] adding README.md for packaging helm chart --- build/README.md | 82 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 build/README.md diff --git a/build/README.md b/build/README.md new file mode 100644 index 0000000000..20a71e90e6 --- /dev/null +++ b/build/README.md @@ -0,0 +1,82 @@ +# Publishing Helm Chart + +## Packaging the Chart + +```shell +$ helm package CHART-PATH +``` + +Replace CHART-PATH with the path to the directory that contains your Chart.yaml file. + +Helm uses the chart name and version for the archive file name. In case of opsramp-tracing-proxy it would be similar to +opsramp-tracing-proxy-0.1.0.tgz + +## Pushing the Chart to Google Artifact Repository + +### Install and Initialize Google Cloud CLI + +**Link:** https://cloud.google.com/sdk/docs/install-sdk + +### Configure Docker Config for Push + +```shell +$ gcloud auth configure-docker REPO-LOCATION +``` + +REPO-LOCATION can be found [location](https://cloud.google.com/artifact-registry/docs/repositories/repo-locations) + +### Pushing the Chart + +```shell +$ helm push opsramp-tracing-proxy-0.1.0.tgz oci://LOCATION-docker.pkg.dev/PROJECT/REPOSITORY +``` + +Replace the following values: + +**LOCATION** is the regional or +multi-regional [location](https://cloud.google.com/artifact-registry/docs/repositories/repo-locations) of the +repository. + +**PROJECT** is your Google +Cloud [project ID](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects). If +your project ID contains a colon (:), see Domain-scoped projects. + +**REPOSITORY** is the name of the repository. + +### Verify that the push operation was successful + +```shell +$ gcloud artifacts docker images list LOCATION-docker.pkg.dev/PROJECT/REPOSITORY +``` + +## Installing the Helm Chart + +### Installing the Chart + +```shell +$ helm pull oci://LOCATION-docker.pkg.dev/PROJECT/REPOSITORY/IMAGE \ + --version VERSION \ + --untar + +$ helm create ns NAMESPACE + +$ cd opsramp-tracing-proxy +$ helm install opsramp-tracing-proxy -n opsramp-tracing-proxy . +``` + +Replace the following values: + +**LOCATION** is the regional or +multi-regional [location](https://cloud.google.com/artifact-registry/docs/repositories/repo-locations) of the +repository. + +**PROJECT** is your Google Cloud project ID. If +your [project ID](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) +contains a colon (:), see [Domain-scoped](https://cloud.google.com/artifact-registry/docs/docker/names#domain) projects. + +**REPOSITORY** is the name of the repository where the image is stored. + +**IMAGE** is the name of the image in the repository. + +**VERSION** is semantic version of the chart. This flag is required. Helm does not support pulling a chart using a tag. + From 0114dfacc5a2663645f78068e36ffbe921d7ff8f Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 27 Feb 2023 15:17:12 +0530 Subject: [PATCH 286/351] changing version of helm chart to 1.0.0 --- build/README.md | 5 ++--- build/opsramp-tracing-proxy/Chart.yaml | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/build/README.md b/build/README.md index 20a71e90e6..a7ad4b4923 100644 --- a/build/README.md +++ b/build/README.md @@ -57,10 +57,9 @@ $ gcloud artifacts docker images list LOCATION-docker.pkg.dev/PROJECT/REPOSITORY $ helm pull oci://LOCATION-docker.pkg.dev/PROJECT/REPOSITORY/IMAGE \ --version VERSION \ --untar - -$ helm create ns NAMESPACE - $ cd opsramp-tracing-proxy + +$ kubectl create ns NAMESPACE $ helm install opsramp-tracing-proxy -n opsramp-tracing-proxy . ``` diff --git a/build/opsramp-tracing-proxy/Chart.yaml b/build/opsramp-tracing-proxy/Chart.yaml index 5fd699bfc7..b2fe70359b 100644 --- a/build/opsramp-tracing-proxy/Chart.yaml +++ b/build/opsramp-tracing-proxy/Chart.yaml @@ -7,7 +7,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 1.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to From ff2f8d2e9f543ebee3aad61074d94c187d3de575 Mon Sep 17 00:00:00 2001 From: saikalyan-bhagavathula <103252261+saikalyan-bhagavathula@users.noreply.github.com> Date: Mon, 27 Feb 2023 15:46:05 +0530 Subject: [PATCH 287/351] Kalyan develop (#2) * removing usage of github.com/davecgh/go-spew/spew * adding configure in opt directory * creating bin and conf folders in tracing/opt/opsramp/tracing-proxy * copying latest binary to the package --- build/tracing-deb/script.sh | 6 ++- build/tracing-rpm/script.sh | 5 +++ cmd/test_redimem/main.go | 87 ++++++++++++++++++------------------- go.mod | 3 +- go.sum | 2 + 5 files changed, 57 insertions(+), 46 deletions(-) diff --git a/build/tracing-deb/script.sh b/build/tracing-deb/script.sh index 5645b05e0d..4d9b8c7f21 100644 --- a/build/tracing-deb/script.sh +++ b/build/tracing-deb/script.sh @@ -11,10 +11,14 @@ fi sed -i "/^Architecture/s/:.*$/: ${architecture}/g" tracing/DEBIAN/control # Updating the files +mkdir -p tracing/opt/opsramp/tracing-proxy/bin +mkdir -p tracing/opt/opsramp/tracing-proxy/conf cp ../../config_complete.toml tracing/opt/opsramp/tracing-proxy/conf/config_complete.toml cp ../../rules_complete.toml tracing/opt/opsramp/tracing-proxy/conf/rules_complete.toml -go build ../cmd/tracing-proxy/main.go +go build ../../cmd/tracing-proxy/main.go cp ../../cmd/tracing-proxy/main tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy +go build configure.go +cp configure tracing/opt/opsramp/tracing-proxy/bin/configure dpkg -b tracing diff --git a/build/tracing-rpm/script.sh b/build/tracing-rpm/script.sh index cac0ec9b12..ec474f63fb 100644 --- a/build/tracing-rpm/script.sh +++ b/build/tracing-rpm/script.sh @@ -9,10 +9,15 @@ Version=$1 sed -i "/^\%define version/s/^.*$/\%define version ${Version}/g" tracing-proxy.spec # Updating the files +mkdir -p opt/opsramp/tracing-proxy/conf +mkdir -p opt/opsramp/tracing-proxy/bin cp ../../config_complete.toml opt/opsramp/tracing-proxy/conf/config_complete.toml cp ../../rules_complete.toml opt/opsramp/tracing-proxy/conf/rules_complete.toml go build ../../cmd/tracing-proxy/main.go +go build configure.go cp ../../cmd/tracing-proxy/main opt/opsramp/tracing-proxy/bin/tracing-proxy +cp configure opt/opsramp/tracing-proxy/bin/configure + mkdir tracing-proxy-$1 diff --git a/cmd/test_redimem/main.go b/cmd/test_redimem/main.go index a9732fce13..d889125036 100644 --- a/cmd/test_redimem/main.go +++ b/cmd/test_redimem/main.go @@ -9,7 +9,6 @@ import ( "sync" "time" - "github.com/davecgh/go-spew/spew" "github.com/gomodule/redigo/redis" "github.com/sirupsen/logrus" @@ -234,49 +233,49 @@ func singleTestRandomLength(limit, registerDurLimitSec int, rm *redimem.RedisMem // adds two entries with various sleeps and verifies they're there at the // expected times -func linearTest(rm *redimem.RedisMembership) { - ctx := context.Background() - logrus.Infoln("about to register one for 3sec") - rm.Register(ctx, "one", 3*time.Second) - - logrus.Infoln("about to sleep for 2sec") - time.Sleep(2 * time.Second) - - logrus.Infoln("checking for one") - list, _ := rm.GetMembers(ctx) - spew.Dump(list) - - logrus.Infoln("about to register two for 3sec") - rm.Register(ctx, "two", 3*time.Second) - - logrus.Infoln("checking for one and two") - list, _ = rm.GetMembers(ctx) - spew.Dump(list) - - logrus.Infoln("about to sleep for 1.5sec") - time.Sleep(1500 * time.Millisecond) - - logrus.Infoln("checking list; one should be missing, two should be there") - list, _ = rm.GetMembers(ctx) - spew.Dump(list) - - logrus.Infoln("about to re-register two for 3sec") - rm.Register(ctx, "two", 3*time.Second) - - logrus.Infoln("about to sleep for 2sec") - time.Sleep(2 * time.Second) - - logrus.Infoln("checking list; one should be missing, two should be there") - list, _ = rm.GetMembers(ctx) - spew.Dump(list) - - logrus.Infoln("about to sleep for 1.5sec") - time.Sleep(1500 * time.Millisecond) - - logrus.Infoln("checking list; both should be missing") - list, _ = rm.GetMembers(ctx) - spew.Dump(list) -} +//func linearTest(rm *redimem.RedisMembership) { +// ctx := context.Background() +// logrus.Infoln("about to register one for 3sec") +// rm.Register(ctx, "one", 3*time.Second) +// +// logrus.Infoln("about to sleep for 2sec") +// time.Sleep(2 * time.Second) +// +// logrus.Infoln("checking for one") +// list, _ := rm.GetMembers(ctx) +// spew.Dump(list) +// +// logrus.Infoln("about to register two for 3sec") +// rm.Register(ctx, "two", 3*time.Second) +// +// logrus.Infoln("checking for one and two") +// list, _ = rm.GetMembers(ctx) +// spew.Dump(list) +// +// logrus.Infoln("about to sleep for 1.5sec") +// time.Sleep(1500 * time.Millisecond) +// +// logrus.Infoln("checking list; one should be missing, two should be there") +// list, _ = rm.GetMembers(ctx) +// spew.Dump(list) +// +// logrus.Infoln("about to re-register two for 3sec") +// rm.Register(ctx, "two", 3*time.Second) +// +// logrus.Infoln("about to sleep for 2sec") +// time.Sleep(2 * time.Second) +// +// logrus.Infoln("checking list; one should be missing, two should be there") +// list, _ = rm.GetMembers(ctx) +// spew.Dump(list) +// +// logrus.Infoln("about to sleep for 1.5sec") +// time.Sleep(1500 * time.Millisecond) +// +// logrus.Infoln("checking list; both should be missing") +// list, _ = rm.GetMembers(ctx) +// spew.Dump(list) +//} const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" diff --git a/go.mod b/go.mod index d303090a66..addb162d36 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/opsramp/tracing-proxy go 1.19 require ( - github.com/davecgh/go-spew v1.1.1 github.com/dgryski/go-wyhash v0.0.0-20191203203029-c4841ae36371 github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b github.com/facebookgo/startstop v0.0.0-20161013234910-bc158412526d @@ -42,6 +41,7 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 // indirect @@ -71,6 +71,7 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + golang.org/x/crypto v0.1.0 // indirect golang.org/x/net v0.4.0 // indirect golang.org/x/sys v0.3.0 // indirect golang.org/x/text v0.5.0 // indirect diff --git a/go.sum b/go.sum index b13f48f459..2712316920 100644 --- a/go.sum +++ b/go.sum @@ -676,6 +676,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= From e7abffedb2eed48f636fdd8271bfe371fa72a604 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Wed, 8 Mar 2023 12:36:18 +0530 Subject: [PATCH 288/351] updating default ports in trace-proxy helm --- Dockerfile | 2 +- .../templates/_helpers.tpl | 17 + .../templates/deployment.yaml | 10 +- .../opsramp-tracing-proxy/templates/rules.tpl | 295 ------------------ .../templates/service.yaml | 19 +- build/opsramp-tracing-proxy/values.yaml | 15 +- 6 files changed, 45 insertions(+), 313 deletions(-) delete mode 100644 build/opsramp-tracing-proxy/templates/rules.tpl diff --git a/Dockerfile b/Dockerfile index f49116842c..d45f7cd878 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM golang:alpine as builder RUN apk update && apk add --no-cache git bash ca-certificates && update-ca-certificates -ARG BUILD_ID=dev +ARG BUILD_ID="1.0.0" WORKDIR /app diff --git a/build/opsramp-tracing-proxy/templates/_helpers.tpl b/build/opsramp-tracing-proxy/templates/_helpers.tpl index 9280a8fcf7..37cab6a7ba 100644 --- a/build/opsramp-tracing-proxy/templates/_helpers.tpl +++ b/build/opsramp-tracing-proxy/templates/_helpers.tpl @@ -50,6 +50,23 @@ app.kubernetes.io/name: {{ include "opsramp-tracing-proxy.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} +{{/* +Service Ports +*/}} +{{- define "httpPort" -}} +{{ if .Values.service }} {{ default 8082 .Values.service.http }} {{ else }} 8082 {{ end }} +{{- end }} +{{- define "grpcPort" -}} +{{ if .Values.service }} {{ default 9090 .Values.service.grpc }} {{ else }} 9090 {{ end }} +{{- end }} +{{- define "httpPeerPort" -}} +{{ if .Values.service }} {{ default 8081 .Values.service.peer }} {{ else }} 8081 {{ end }} +{{- end }} +{{- define "grpcPeerPort" -}} +{{ if .Values.service }} {{ default 8084 .Values.service.grpcPeer }} {{ else }} 8084 {{ end }} +{{- end }} + + {{/* Image Defaults */}} diff --git a/build/opsramp-tracing-proxy/templates/deployment.yaml b/build/opsramp-tracing-proxy/templates/deployment.yaml index 99b1ec7200..4abe0ea96f 100644 --- a/build/opsramp-tracing-proxy/templates/deployment.yaml +++ b/build/opsramp-tracing-proxy/templates/deployment.yaml @@ -33,14 +33,16 @@ spec: - "-r" - "/etc/tracing-proxy/rules.yaml" ports: - - name: data - containerPort: {{ .Values.service.port }} + - name: http + containerPort: {{include "httpPort" . | trim }} protocol: TCP - name: peer - containerPort: {{ .Values.service.peerPort }} + containerPort: {{include "httpPeerPort" . | trim }} protocol: TCP - - containerPort: {{ .Values.service.grpcPort }} + - containerPort: {{include "grpcPort" . | trim }} name: grpc + - containerPort: {{include "grpcPeerPort" . | trim }} + name: grpc-peer {{- with .Values.resources }} resources: {{- toYaml . | nindent 12 }} diff --git a/build/opsramp-tracing-proxy/templates/rules.tpl b/build/opsramp-tracing-proxy/templates/rules.tpl deleted file mode 100644 index d64590d2ce..0000000000 --- a/build/opsramp-tracing-proxy/templates/rules.tpl +++ /dev/null @@ -1,295 +0,0 @@ -{{/* -Complete Rules TOML -*/}} - -{{- define "rulesTOML" -}} -############################ -## Sampling Rules Config ## -############################ - -# DryRun - If enabled, marks traces that would be dropped given current sampling rules, -# and sends all traces regardless -DryRun = true - -# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept -DryRunFieldName = "fromProxy" - -# DeterministicSampler is a section of the config for manipulating the -# Deterministic Sampler implementation. This is the simplest sampling algorithm -# - it is a static sample rate, choosing traces randomly to either keep or send -# (at the appropriate rate). It is not influenced by the contents of the trace. -Sampler = "DeterministicSampler" - -# SampleRate is the rate at which to sample. It indicates a ratio, where one -# sample trace is kept for every n traces seen. For example, a SampleRate of 30 -# will keep 1 out of every 30 traces. The choice on whether to keep any specific -# trace is random, so the rate is approximate. -# Eligible for live reload. -SampleRate = 1 - -[dataset1] - - # Note: If your dataset name contains a space, you will have to escape the dataset name - # using single quotes, such as ['dataset 1'] - - # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler - # implementation. This sampler collects the values of a number of fields from a - # trace and uses them to form a key. This key is handed to the standard dynamic - # sampler algorithm which generates a sample rate based on the frequency with - # which that key has appeared in the previous ClearFrequencySec seconds. See - # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics - # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from - # that package. - Sampler = "DynamicSampler" - - # SampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. - SampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. - FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. - UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. - AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # ClearFrequencySec is the name of the field the sampler will use to determine - # the period over which it will calculate the sample rate. This setting defaults - # to 30. - # Eligible for live reload. - ClearFrequencySec = 60 - -[dataset2] - - # EMADynamicSampler is a section of the config for manipulating the Exponential - # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, - # it attempts to average a given sample rate, weighting rare traffic and frequent - # traffic differently so as to end up with the correct average. - # - # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended - # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs - # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential - # Moving Average of counts seen per key, and adjusts this average at regular intervals. - # The weight applied to more recent intervals is defined by `weight`, a number between - # (0, 1) - larger values weight the average more toward recent observations. In other words, - # a larger weight will cause sample rates more quickly adapt to traffic patterns, - # while a smaller weight will result in sample rates that are less sensitive to bursts or drops - # in traffic and thus more consistent over time. - # - # Keys that are not found in the EMA will always have a sample - # rate of 1. Keys that occur more frequently will be sampled on a logarithmic - # curve. In other words, every key will be represented at least once in any - # given window and more frequent keys will have their sample rate - # increased proportionally to wind up with the goal sample rate. - Sampler = "EMADynamicSampler" - - # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. - GoalSampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. - FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. - UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. - AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # AdjustmentInterval defines how often (in seconds) we adjust the moving average from - # recent observations. Default 15s - # Eligible for live reload. - AdjustmentInterval = 15 - - # Weight is a value between (0, 1) indicating the weighting factor used to adjust - # the EMA. With larger values, newer data will influence the average more, and older - # values will be factored out more quickly. In mathematical literature concerning EMA, - # this is referred to as the `alpha` constant. - # Default is 0.5 - # Eligible for live reload. - Weight = 0.5 - - # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. - # Once MaxKeys is reached, new keys will not be included in the sample rate map, but - # existing keys will continue to be be counted. You can use this to keep the sample rate - # map size under control. - # Eligible for live reload - MaxKeys = 0 - - # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key - # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to - # decide what constitutes "zero". Keys with averages below this threshold will be removed - # from the EMA. Default is the same as Weight, as this prevents a key with the smallest - # integer value (1) from being aged out immediately. This value should generally be <= Weight, - # unless you have very specific reasons to set it higher. - # Eligible for live reload - AgeOutValue = 0.5 - - # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define - # the burst detection threshold. If total counts observed for a given interval exceed the threshold - # EMA is updated immediately, rather than waiting on the AdjustmentInterval. - # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, - # burst detection will kick in. - # Eligible for live reload - BurstMultiple = 2.0 - - # BurstDetectionDelay indicates the number of intervals to run after Start is called before - # burst detection kicks in. - # Defaults to 3 - # Eligible for live reload - BurstDetectionDelay = 3 - -[dataset3] - - Sampler = "DeterministicSampler" - SampleRate = 10 - -[dataset4] - - Sampler = "RulesBasedSampler" - # Optional, if set to true then the rules will also check nested json fields, in the format of parent.child - CheckNestedFields = false - - [[dataset4.rule]] - name = "drop healthchecks" - drop = true - [[dataset4.rule.condition]] - field = "http.route" - operator = "=" - value = "/health-check" - - [[dataset4.rule]] - name = "keep slow 500 errors" - SampleRate = 1 - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 500 - [[dataset4.rule.condition]] - field = "duration_ms" - operator = ">=" - value = 1000.789 - - [[dataset4.rule]] - name = "dynamically sample 200 responses" - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 200 - [dataset4.rule.sampler.EMADynamicSampler] - Sampler = "EMADynamicSampler" - GoalSampleRate = 15 - FieldList = ["request.method", "request.route"] - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # Note that Refinery comparisons are type-dependent. If you are operating in an environment where different - # telemetry may send the same field with different types (for example, some systems send status codes as "200" - # instead of 200), you may need to create additional rules to cover these cases. - [[dataset4.rule]] - name = "dynamically sample 200 string responses" - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = "200" - [dataset4.rule.sampler.EMADynamicSampler] - Sampler = "EMADynamicSampler" - GoalSampleRate = 15 - FieldList = ["request.method", "request.route"] - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - [[dataset4.rule]] - name = "sample traces originating from a service" - # if scope is set to "span", a single span in the trace must match - # *all* of the conditions associated with this rule for the rule to - # apply to the trace. - # - # this is especially helpful when sampling a dataset written to - # by multiple services that call one another in normal operation – - # you can set Scope to 'span' to attribute traces to an origin - # service in a way that would be difficult without it. - Scope = "span" - SampleRate = 5 - [[dataset4.rule.condition]] - field = "service name" - operator = "=" - value = "users" - [[dataset4.rule.condition]] - field = "meta.span_type" - operator = "=" - value = "root" - - [[dataset4.rule]] - SampleRate = 10 # default when no rules match, if missing defaults to 10 - -[dataset5] - - Sampler = "TotalThroughputSampler" - GoalThroughputPerSec = 100 - FieldList = "[request.method]" -{{- end }} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/templates/service.yaml b/build/opsramp-tracing-proxy/templates/service.yaml index 04de2144d6..64b557ee66 100644 --- a/build/opsramp-tracing-proxy/templates/service.yaml +++ b/build/opsramp-tracing-proxy/templates/service.yaml @@ -12,13 +12,20 @@ metadata: spec: type: {{ .Values.service.type }} ports: - - port: {{ .Values.service.grpcPort }} - targetPort: grpc + - port: {{include "httpPort" . | trim }} + targetPort: http protocol: TCP - name: grpc - - port: {{ .Values.service.port }} - targetPort: data + name: http + - port: {{include "httpPeerPort" . | trim }} + targetPort: peer protocol: TCP - name: data + name: peer + - port: {{include "grpcPort" . | trim }} + targetPort: grpc + name: grpc + - port: {{include "grpcPeerPort" . | trim }} + targetPort: grpc-peer + name: grpc-peer + selector: {{- include "opsramp-tracing-proxy.selectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index ecde9b21d4..3eb75e3f9e 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -24,22 +24,23 @@ fullnameOverride: "" service: type: ClusterIP - port: 80 - grpcPort: 9090 - peerPort: 8081 + http: 8082 + peer: 8081 + grpc: 9090 + grpcPeer: 8084 annotations: { } config: # ListenAddr is the IP and port on which to listen for incoming events. - ListenAddr: 0.0.0.0:8080 + ListenAddr: 0.0.0.0:{{include "httpPort" . | trim }} # GRPCListenAddr is the IP and port on which to listen for incoming events over gRPC. - GRPCListenAddr: 0.0.0.0:9090 + GRPCListenAddr: 0.0.0.0:{{include "grpcPort" . | trim }} # PeerListenAddr is the IP and port on which to listen for traffic being rerouted from a peer. - PeerListenAddr: 0.0.0.0:8081 + PeerListenAddr: 0.0.0.0:{{include "httpPeerPort" . | trim }} - GRPCPeerListenAddr: 0.0.0.0:8084 + GRPCPeerListenAddr: 0.0.0.0:{{include "grpcPeerPort" . | trim }} # ProxyProtocol accepts http and https # Not Eligible for live reload. From e42736a9b8b16b50e4d8abc9102a4f8f1a46b510 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Wed, 8 Mar 2023 13:32:44 +0530 Subject: [PATCH 289/351] updating comments in helm values.yaml --- build/opsramp-tracing-proxy/values.yaml | 26 ++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index 3eb75e3f9e..c19e8e686e 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -59,7 +59,7 @@ config: ProxyPassword: "" # CompressPeerCommunication determines whether trace-proxy will compress span data - # it forwards to peers. If it costs money to transmit data between refinery + # it forwards to peers. If it costs money to transmit data between OpsRamp-Tracing-Proxy # instances (e.g. they're spread across AWS availability zones), then you # almost certainly want compression enabled to reduce your bill. The option to # disable it is provided as an escape hatch for deployments that value lower CPU @@ -127,22 +127,22 @@ config: # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. - # Cache misses lookup the environment name using HoneycombAPI config value. + # Cache misses lookup the environment name using OpsRampAPI config value. # Default is 1 hour ("1h"). # Not eligible for live reload. EnvironmentCacheTTL: "1h" # QueryAuthToken, if specified, provides a token that must be specified with - # the header "X-Honeycomb-Refinery-Query" in order for a /query request to succeed. - # These /query requests are intended for debugging refinery installations and + # the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed. + # These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and # are not typically needed in normal operation. - # Can be specified in the environment as REFINERY_QUERY_AUTH_TOKEN. + # Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN. # If left unspecified, the /query endpoints are inaccessible. # Not eligible for live reload. # QueryAuthToken: "some-random-value" - # AddRuleReasonToTrace causes traces that are sent to Honeycomb to include the field `meta.refinery.reason`. - # This field contains text indicating which rule was evaluated that caused the trace to be included. + # AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which + # contains text indicating which rule was evaluated that caused the trace to be included. # Eligible for live reload. AddRuleReasonToTrace: true @@ -178,7 +178,7 @@ config: # in OpsRampMetrics and only works when the Metrics is set to "prometheus") SendMetricsToOpsRamp: false - # Configure how Refinery peers are discovered and managed + # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed PeerManagement: Strategy: "hash" # Always use hash for balanced distribution of traces @@ -186,7 +186,7 @@ config: Type: "redis" # RedisHost is used to connect to redis for peer cluster membership management. - # Further, if the environment variable 'REFINERY_REDIS_HOST' is set it takes + # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes # precedence and this value is ignored. # Not eligible for live reload. # RedisHost will default to the name used for the release or name overrides depending on what is used, @@ -194,13 +194,13 @@ config: RedisHost: '{{include "opsramp-tracing-proxy.redis.fullname" .}}:6379' # RedisUsername is the username used to connect to redis for peer cluster membership management. - # If the environment variable 'REFINERY_REDIS_USERNAME' is set it takes + # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes # precedence and this value is ignored. # Not eligible for live reload. RedisUsername: "" # RedisPassword is the password used to connect to redis for peer cluster membership management. - # If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes + # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes # precedence and this value is ignored. # Not eligible for live reload. RedisPassword: "" @@ -226,7 +226,7 @@ config: ############################ ## Implementation Choices ## ############################ - # Each of the config options below chooses an implementation of a Refinery + # Each of the config options below chooses an implementation of a OpsRamp-Tracing-Proxy # component to use. Depending on the choice there may be more configuration # required below in the section for that choice. Changing implementation choices # requires a process restart; these changes will not be picked up by a live @@ -271,7 +271,7 @@ config: OpsRampMetrics: # MetricsListenAddr determines the interface and port on which Prometheus will - # listen for requests for /metrics. Must be different from the main Refinery + # listen for requests for /metrics. Must be different from the main OpsRamp-Tracing-Proxy # listener. # Not eligible for live reload. MetricsListenAddr: 'localhost:2112' From 5d7b2c69d6a9f4a31561fa8385b0657631b9ef88 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Tue, 14 Mar 2023 12:20:25 +0530 Subject: [PATCH 290/351] removing refinery references --- CHANGELOG.md | 426 ---------------------------------------- RELEASING.md | 8 - collect/cache/cuckoo.go | 2 +- go.mod | 7 +- go.sum | 13 +- 5 files changed, 6 insertions(+), 450 deletions(-) delete mode 100644 CHANGELOG.md delete mode 100644 RELEASING.md diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 6baf821c38..0000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,426 +0,0 @@ -# tracing-proxy Changelog - -## 1.19.0 2022-11-09 - -Adds new query command to retrieve configuration metadata, and also allows for a new (optional) cache management strategy that should be more effective at preventing OOM crashes in situations where memory is under pressure. - -### Enhancements - -- Add command to query config metadata (#556) | [@kentquirk](https://github.com/kentquirk) -- New cache management strategy (#547) | [@kentquirk](https://github.com/kentquirk) - -### Fixes - -- Set content-type on marshalToFormat (#548) | [@kentquirk](https://github.com/kentquirk) - -### Maintenance - -- Bump google.golang.org/grpc from 1.50.0 to 1.50.1 (#553) -- Bump github.com/fsnotify/fsnotify from 1.5.4 to 1.6.0 (#552) -- Bump github.com/stretchr/testify from 1.8.0 to 1.8.1 (#551) -- Bump github.com/honeycombio/libhoney-go from 1.16.0 to 1.18.0 (#550) -- Bump github.com/klauspost/compress from 1.15.11 to 1.15.12 (#549) - -## 1.18.0 2022-10-12 - -### Enhancements - -- Track span count and optionally add it to root (#532) | [@kentquirk](https://github.com/kentquirk) -- Add support for metrics api key env var (#535) | [@TylerHelmuth](https://github.com/TylerHelmuth) - -### Fixes - -- RedisIdentifier now operates properly in more circumstances (#521) | [@Baliedge](https://github.com/Baliedge) -- Properly set metadata to values that will work. (#523) | [@kentquirk](https://github.com/kentquirk) - -### Maintenance - -- maint: add new project workflow (#537) | [@vreynolds](https://github.com/vreynolds) -- Bump go version to 1.19 (#534) | [@TylerHelmuth](https://github.com/TylerHelmuth) -- Bump github.com/klauspost/compress from 1.15.9 to 1.15.11 (#531) -- Bump github.com/honeycombio/husky from 0.15.0 to 0.16.1 (#529) -- Bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0 (#528) -- Bump github.com/spf13/viper from 1.12.0 to 1.13.0 (#527) -- Bump Husky to v0.17.0 (#538) | [@kentquirk](https://github.com/kentquirk) - -### New Contributors - -- @Baliedge made their first contribution in https://github.com/honeycombio/refinery/pull/521 -- @TylerHelmuth made their first contribution in https://github.com/honeycombio/refinery/pull/534 - -**Full Changelog**: https://github.com/honeycombio/refinery/compare/v1.17.0...v1.18.0 - -## 1.17.0 2022-09-16 - -### Enhancements - -- Allow adding extra fields to error logs (#514) | [@kentquirk](https://github.com/kentquirk) -- Allow BatchTimeout to be overriden on the libhoney Transmission (#509) | [@leviwilson](https://github.com/leviwilson) - -### Fixes - -- Consolidate honeycomb metrics to use single lock & fix concurrent read/write (#511)| [@MikeGoldsmith](https://github.com/MikeGoldsmith) -- Fix variable shadowing bug (#519)| [@kentquirk](https://github.com/kentquirk) - -## 1.16.0 2022-09-09 - -This release contains a number of small new features to assist in running refinery more effectively: - -- Adds new endpoints to help in debugging refinery rules (see README.md) -- Fixes issues with SampleRate -- Adds some new configuration parameters (see the *_complete.toml files for more) -- Conforms to the GRPC standard for health probes -- Accepts OTLP/JSON traces and conforms to the most recent OTLP trace specification - -### Enhancements - -- Add /query endpoints to help debug refinery rules (#500, #502) | [kentquirk](https://github.com/kentquirk) -- Implement grpc-health-probe (#498) | [abatilo](https://github.com/abatilo) -- Make gRPC ServerParameters configurable (#499) | [abatilo](https://github.com/abatilo) -- Fix sample rate for late spans (#504) | [kentquirk](https://github.com/kentquirk) -- Optionally record why a sample decision was made (#503) | [kentquirk](https://github.com/kentquirk) -- Added PeerManagement.Timeout config option (#491) | [thrawn01](https://github.com/thrawn01) -- Add 'meta.refinery.original_sample_rate' (#508) | [epvanhouten](https://github.com/epvanhouten) - -### Maintenance - -- maint: improvements to GitHub operation (#474, #477, #478) | [JamieDanielson](https://github.com/JamieDanielson), [vreynolds](https://github.com/vreynolds) - -### Dependencies - -- Bump github.com/stretchr/testify from 1.7.2 to 1.8.0 (#472) | [dependabot](https://github.com/dependabot) -- Bump github.com/sirupsen/logrus from 1.8.1 to 1.9.0 (#484) | [dependabot](https://github.com/dependabot) -- Bump google.golang.org/grpc from 1.46.2 to 1.49.0 (#485, 494) | [dependabot](https://github.com/dependabot) -- Bump github.com/honeycombio/libhoney-go from 1.15.8 to 1.16.0 (#487) | [dependabot](https://github.com/dependabot) -- Bump github.com/gomodule/redigo from 1.8.8 to 1.8.9 (#488) | [dependabot](https://github.com/dependabot) -- Bump github.com/klauspost/compress from 1.15.7 to 1.15.9 (#495) | [dependabot](https://github.com/dependabot) -- Bump github.com/tidwall/gjson from 1.14.1 to 1.14.3 (#497) | [dependabot](https://github.com/dependabot) -- Update github.com/honeycombio/husky to latest and fix breaking changes (#505) | [kentquirk](https://github.com/kentquirk) -- Go mod tidy (#507) | [kentquirk](https://github.com/kentquirk) - -## New Contributors - -- @abatilo made their first contribution in https://github.com/honeycombio/refinery/pull/498 -- @thrawn01 made their first contribution in https://github.com/honeycombio/refinery/pull/491 -- @epvanhouten made their first contribution in https://github.com/honeycombio/refinery/pull/508 - -**Full Changelog**: https://github.com/honeycombio/refinery/compare/v1.15.0...v1.16.0 - -## 1.15.0 2022-07-01 - -### Enhancements - -- Add rule Scope configuration option to rules-based sampler (#440) | [isnotajoke](https://github.com/isnotajoke) -- Replace hand-rolled binary.BigEndian.Uint32 with the real deal (#459) | [toshok](https://github.com/toshok) -- Validate successful span scoped rules test (#465) | [MikeGoldsmith](https://github.com/MikeGoldsmith) -- Create helm-chart issue on release (#458) | [MikeGoldsmith](https://github.com/MikeGoldsmith) -- github_token needs underscore not hyphen (#464) | [@JamieDanielson](https://github.com/JamieDanielson) - -### Maintenance - -- Replace legacy with classic in readme (#457) | [MikeGoldsmith](https://github.com/MikeGoldsmith) - -### Dependencies - -- Bump github.com/spf13/viper from 1.10.1 to 1.12.0 (#461) -- Bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#467) -- Bump github.com/honeycombio/husky from 0.10.5 to 0.10.6 (#460) -- Bump github.com/klauspost/compress from 1.15.4 to 1.15.6 (#466) -- Bump github.com/prometheus/client_golang from 1.12.1 to 1.12.2 (#463) - -## 1.14.1 2022-05-16 - -### Fixes - -- Fix crash bug related to sharding (#455) | [@kentquirk](https://github.com/kentquirk) - -### Maintenance - -- bump husky to 0.10.5 (#450) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) -- Bump github.com/klauspost/compress from 1.15.2 to 1.15.4 (#451) | dependabot -- Bump github.com/tidwall/gjson from 1.14.0 to 1.14.1 (#444) | dependabot -- Bump github.com/fsnotify/fsnotify from 1.5.1 to 1.5.4 (#441) | dependabot - -### Documentation - -- add a note about reloading the configuration when running within docker (#448) | [@leviwilson](https://github.com/leviwilson) -- README: remove incorrect mention of sending SIGUSR1 to trigger a configuration reload (#447) | [@jharley](https://github.com/jharley) - -## 1.14.0 2022-05-03 - -### Enhancements - -- Add support for environment and dataset rules with same names (#438) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -### Maintenance - -- Update otlp to v0.11.0 (#437) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) -- Update go to 1.18 (#430) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -**Note**: The docker image used to create the binaries has been updated to a version that does not suffer a [OpenSSL CVE](https://mta.openssl.org/pipermail/openssl-announce/2022-March/000219.html). - -## 1.13.0 2022-04-08 - -### Enhancements - -- Add parsing for nested json fields in the rules sampler (#418) | [@ecobrien29](https://github.com/ecobrien29) - -### Maintenance - -- Update husky to v0.10.3 (#431) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) -- Bump google.golang.org/grpc from 1.43.0 to 1.45.0 (#428) -- Bump github.com/klauspost/compress from 1.13.6 to 1.15.1 (#427) -- Bump github.com/stretchr/testify from 1.7.0 to 1.7.1 (#426) -- Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1 (#390) - -## 1.12.1 2022-03-28 - -### Fixes - -- fix: error log event metadata (#422) | [@vreynolds](https://github.com/vreynolds) - -### Maintenance - -- Create checksums when building binaries (#423) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) -- Cache google ko deps between workflows (#424) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -## 1.12.0 2022-02-24 - -### Enhancements - -- feat: add support for env name from auth (#410) | [@JamieDanielson](https://github.com/JamieDanielson) - -### Maintenance - -- update aws-client orb to latest (#409) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -## 1.11.0 2022-02-17 - -### Enhancements - -**Note: Environment & Services Support requires v1.12.0 and higher** - -Do **not** use this version with Environment & Services. - -- Add Environment & Services support (#403) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -### Maintenance - -- docs: add helm charts step to releasing (#400) | [@vreynolds](https://github.com/vreynolds) - -## 1.10.0 2022-02-10 - -### Enhancements - -- added username in config for redis auth (#397) | [@ecobrien29](https://github.com/ecobrien29) -- build: add ARM64 (aarch64) RPM artifact (#395) | [@jharley](https://github.com/jharley) - -### Fixes - -- fix: deadlock when reloading configs (#398) | [@vreynolds](https://github.com/vreynolds) -- Fixed "honeeycomb" typo in log output when reloading config (#394) | [@looneym](https://github.com/looneym) - -## 1.9.0 2022-02-01 - -### Enhancements - -- Honor env. variable to set gRPC listener address (#386) | [@seh](https://github.com/seh) -- Add retries when connecting to redis during init (#382) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -### Fixes - -- Properly set meta.refinery.local_hostname field (#387) | [@jharley](https://github.com/jharley) - -### Maintenance - -- docs: update rules example (#378) | [@vreynolds](https://github.com/vreynolds) -- Bump github.com/gomodule/redigo from 1.8.5 to 1.8.8 (#374) -- Bump github.com/spf13/viper from 1.9.0 to 1.10.1 (#375) -- Bump google.golang.org/grpc from 1.42.0 to 1.43.0 (#372) - -## 1.8.1 2022-01-06 - -### Maintenance - -- Add re-triage workflow (#368) | [@vreynolds](https://github.com/vreynolds) -- Bump libtrace & golang (#373) | [@lizthegrey](https://github.com/lizthegrey) -- Bump github.com/honeycombio/husky from 0.5.0 to 0.6.0 (#370) -- Bump github.com/prometheus/client_golang from 0.9.4 to 1.11.0 (#357) - -## 1.8.0 2021-12-08 - -### Enhancements - -- Make MaxBatchSize configurable (#365) | [@JamieDanielson](https://github.com/JamieDanielson) - -### Maintenance - -- Bump husky to v0.5.0 (#366) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) -- Bump husky to v0.4.0 (#361) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -## 1.7.0 2021-11-29 - -### Enhancements - -- Replace internal duplicated code with Husky (#341) [@MikeGoldsmith](https://github.com/MikeGoldsmith) - - Also fixes segfaults caused by nil appearing in OTLP data as described in (#358) -- Improves histogram buckets over the default set (#355) [@bdarfler](https://github.com/bdarfler) - -### Maintenance - -- Update dependabot to monthly (#356) [@vreynolds](https://github.com/vreynolds) - -## 1.6.1 2021-11-10 - -- Revert "Use alpine as base image (#343)" (#352) - -## 1.6.0 2021-11-04 - -- Add an --interface-names flag (#342) | [@ismith](https://github.com/ismith) - -### Fixes - -- bump libtrace-go to v1.15.6 -- empower apply-labels action to apply labels (#344) -- Bump github.com/opsramp/libtrace-go from 1.15.4 to 1.15.5 (#327) -- Re-add missing docker login when publishing (#338) - -## 1.5.2 2021-10-13 - -### Fixes - -- Build multi-arch docker images during publish CI step (#336) [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -## 1.5.1 - -### Fixes - -- Fix for race condition in prometheus metrics (#324) [@estheruary](https://github.com/estheruary) -- Update race condition fix to use RWLock instead of Lock (#331) [@MikeGoldsmith](https://github.com/MikeGoldsmith) & [@robbkidd](https://github.com/robbkidd) - -### Maintenance - -- Build docker images on all builds and publish only on tag (#328) [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -## 1.5.0 - -### Enhancements - -- Add dynamic sampler support to rules based samplers (#317) [@puckpuck](https://github.com/puckpuck) -- Publish arm64 Docker images (#323) [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -### Maintenance - -- Adds Stalebot (#321) [@JamieDanielson](https://github.com/JamieDanielson) -- Switch licecycle terminology to maintained (#315) [cartermp](https://github.com/cartermp) -- Add NOTICE (#314) [cartermp](https://github.com/cartermp) -- Add issue and PR templates (#307) [@vreynolds](https://github.com/vreynolds) -- Add OSS lifecycle badge (#304) [@vreynolds](https://github.com/vreynolds) -- Add community health files (#303) [@vreynolds](https://github.com/vreynolds) -- Bump github.com/spf13/viper from 1.8.1 to 1.9.0 (#320) [dependabot[bot]] -- Bump github.com/json-iterator/go from 1.1.11 to 1.1.12 (#316) [dependabot[bot]] -- Bump github.com/klauspost/compress from 1.13.4 to 1.13.6 (#319) [dependabot[bot]] -- Bump github.com/fsnotify/fsnotify from 1.5.0 to 1.5.1 (#311) [dependabot[bot]] -- Bump google.golang.org/grpc from 1.39.1 to 1.40.0 (#305) [dependabot[bot]] -- Bump github.com/fsnotify/fsnotify from 1.4.9 to 1.5.0 (#308) [dependabot[bot]] -- Bump github.com/klauspost/compress from 1.13.3 to 1.13.4 (#306) [dependabot[bot]] - -## 1.4.1 - -### Fixes - -- Add span.kind when ingesting OTLP (#299) - -### Maintenance - -- Bump google.golang.org/grpc from 1.39.0 to 1.39.1 (#300) -- Bump github.com/klauspost/compress from 1.13.2 to 1.13.3 (#301) -- Bump github.com/opsramp/libtrace-go from 1.12.4 to 1.15.4 (#295) -- Bump github.com/klauspost/compress from 1.10.3 to 1.13.2 (#297) - -## 1.4.0 - -### Added - -- Add support for OTLP over HTTP/protobuf [#279](https://github.com/jirs5/tracing-proxy/pull/279) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -### Maintenance - -- Bump github.com/sirupsen/logrus from 1.2.0 to 1.8.1 (#290) -- Bump google.golang.org/grpc from 1.37.1 to 1.39.0 (#288) -- Bump github.com/gomodule/redigo from 1.8.4 to 1.8.5 (#287) -- Bump github.com/spf13/viper from 1.7.0 to 1.8.1 (#274) -- Bump github.com/gogo/protobuf from 1.3.1 to 1.3.2 (#242) -- Bump github.com/golang/protobuf from 1.4.3 to 1.5.2 (#252) -- Bump github.com/grpc-ecosystem/grpc-gateway from 1.12.1 to 1.16.0 (#233) - -## 1.3.0 - -### Added - -- Add support to "does-not-contain" operator on RulesBasedSampler [#267](https://github.com/jirs5/tracing-proxy/pull/267) | [@tr-fteixeira](https://github.com/tr-fteixeira) - -### Fixes - -- Ensure span links and events generate events and get resource attrs [#264](https://github.com/jirs5/tracing-proxy/pull/264) | [@MikeGoldsmith](https://github.com/MikeGoldsmith) - -## 1.2.1 - -### Fixes - -- OTLP span events are now supported, they were being dropped on the floor previously (#261) | [@dstrelau](https://github.com/dstrelau) - -## 1.2.0 - -### Added - -- Add `UseTLSInsecure` config option to skip TLS verification with Redis (#254) | [@beanieboi](https://github.com/beanieboi) -- Add `AddHostMetadataToTrace` config option to add tracing-proxy hostname information to spans (#250) | [@jharley](https://github.com/jharley) -- Additional config validation: verify that sample rate trace field key is specified, if needed (#248) | [@paulosman](https://github.com/paulosman) - -### Changed - -- Remove redundant peer/api suffix from response error metrics (#247) | [@vreynolds](https://github.com/vreynolds) - - `api_response_errors_api`, `api_response_errors_peer`, `peer_response_errors_api`, `peer_response_errors_peer` - - replaced by `api_response_errors`, `peer_response_errors` -- Fix rules sampler to emit correct metric (#236) | [@isnotajoke](https://github.com/isnotajoke) - - Previously `dynsampler_num_dropped` was emitted, now `rulessampler_num_dropped` will be emitted - -### Maintenance - -- Update README content (#239) | [@jjziv](https://github.com/jjziv) -- Move from garyburd Redigo to supported redigo (#249) | [@verajohne](https://github.com/verajohne) -- Bump google.golang.org/grpc from 1.32.0 to 1.37.1 (#253) -- Bump github.com/prometheus/client_golang from 0.9.3 to 0.9.4 (#240) -- Bump github.com/pkg/errors from 0.8.1 to 0.9.1 (#232) -- Bump github.com/stretchr/testify from 1.5.1 to 1.7.0 (#231) -- Bump github.com/jessevdk/go-flags from 1.4.0 to 1.5.0 (#230) -- Bump github.com/hashicorp/golang-lru from 0.5.1 to 0.5.4 (#229) - -## 1.1.1 - -### Fixes - -- tracing-proxy startup issues in v1.1.0 - -## 1.1.0 - -### Improvements - -- Add support environment variables for API keys (#221) -- Removes whitelist terminology (#222) -- Log sampler config and validation errors (#228) - -### Fixes - -- Pass along upstream and peer metrics configs to libtrace (#227) -- Guard against nil pointer dereference when processing OTLP span.Status (#223) -- Fix YAML config parsing (#220) - -### Maintenance - -- Add test for OTLP handler, including spans with no status (#225) - -## 1.0.0 - -Initial GA release of tracing-proxy diff --git a/RELEASING.md b/RELEASING.md deleted file mode 100644 index 3339caea7b..0000000000 --- a/RELEASING.md +++ /dev/null @@ -1,8 +0,0 @@ -# Release Process - -1. Add release entry to [changelog](./CHANGELOG.md) -2. Open a PR with the above, and merge that into main -3. Create new tag on merged commit with the new version (e.g. `v1.4.1`) -4. Push the tag upstream (this will kick off the release pipeline in CI) -5. Copy change log entry for newest version into draft GitHub release created as part of CI publish steps -6. Update the `appVersion` and any relevant chart changes in [helm-charts](https://github.com/honeycombio/helm-charts/tree/main/charts/refinery) diff --git a/collect/cache/cuckoo.go b/collect/cache/cuckoo.go index 4735c7a66b..5d5aff0eb7 100644 --- a/collect/cache/cuckoo.go +++ b/collect/cache/cuckoo.go @@ -3,7 +3,7 @@ package cache import ( "sync" - "github.com/honeycombio/refinery/metrics" + "github.com/opsramp/tracing-proxy/metrics" cuckoo "github.com/panmari/cuckoofilter" ) diff --git a/go.mod b/go.mod index addb162d36..25c3a957d4 100644 --- a/go.mod +++ b/go.mod @@ -14,11 +14,10 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru v0.5.4 github.com/honeycombio/dynsampler-go v0.2.1 - github.com/honeycombio/refinery v1.19.0 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 - github.com/opsramp/husky v0.0.0-20230202041143-752f307df09e + github.com/opsramp/husky v0.0.0-20230314064610-c5b7e46d200f github.com/opsramp/libtrace-go v0.0.0-20230202041014-73d98b9c4b7c github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 @@ -52,8 +51,6 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/honeycombio/husky v0.18.0 // indirect - github.com/honeycombio/libhoney-go v1.18.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect @@ -71,13 +68,13 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - golang.org/x/crypto v0.1.0 // indirect golang.org/x/net v0.4.0 // indirect golang.org/x/sys v0.3.0 // indirect golang.org/x/text v0.5.0 // indirect google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/alexcesaro/statsd.v2 v2.0.0 // indirect + gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 2712316920..b3f14a993e 100644 --- a/go.sum +++ b/go.sum @@ -547,12 +547,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/honeycombio/dynsampler-go v0.2.1 h1:IbhjbdB0IbLSZn7xVYuk6jjk/ZDk/EO+DJ5OXFZliv8= github.com/honeycombio/dynsampler-go v0.2.1/go.mod h1:BOeTUPT6fCRH5X/+QqF6Kza3IyLp9uSq/rWgEtI4aZI= -github.com/honeycombio/husky v0.18.0 h1:1ajF1x047rGgxd9kh0bS51KXK8TIX6wwcpgwBp3n3d8= -github.com/honeycombio/husky v0.18.0/go.mod h1:E4aDSkjpiU4uHpE7Qz84ZG6mEspdPDXTIVwetv5NMeE= -github.com/honeycombio/libhoney-go v1.18.0 h1:OYHOP381r3Ea76BhUYeza8PUTMDp8MByoOxDn3qtEq8= -github.com/honeycombio/libhoney-go v1.18.0/go.mod h1:KwbcXkqUbH20x3MpfSt/kdvlog3FFdEnouqYD3XKXLY= -github.com/honeycombio/refinery v1.19.0 h1:KH8qg5cBK6seY8uTJQy7Va/fBPKn0uBuDCfQ+63fpUM= -github.com/honeycombio/refinery v1.19.0/go.mod h1:4rUkL6kBsbVO65kgre9+LOFGE/ZaVV7FNYugMSYmi9A= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= @@ -585,8 +579,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opsramp/husky v0.0.0-20230202041143-752f307df09e h1:o4MyRmiFnLsWPfynxvl21bKofcaMxKpLsvwriAlnZKk= -github.com/opsramp/husky v0.0.0-20230202041143-752f307df09e/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= +github.com/opsramp/husky v0.0.0-20230314064610-c5b7e46d200f h1:tsDDGfYpI0/muPSQrKTC30/SmmsBq4Sj0XIdTPB0jKE= +github.com/opsramp/husky v0.0.0-20230314064610-c5b7e46d200f/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= github.com/opsramp/libtrace-go v0.0.0-20230202041014-73d98b9c4b7c h1:Mmydh9H0k2SEhHKCbwKXNfKyRCX37u2oOqgb5WV0weU= github.com/opsramp/libtrace-go v0.0.0-20230202041014-73d98b9c4b7c/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= @@ -676,8 +670,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1183,6 +1175,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= From 6d5475ae89a7bfa731b1fadaa9bcac62ffc17dbc Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Tue, 14 Mar 2023 12:28:15 +0530 Subject: [PATCH 291/351] updating libtrace-go --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 25c3a957d4..c1b613f11e 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 github.com/opsramp/husky v0.0.0-20230314064610-c5b7e46d200f - github.com/opsramp/libtrace-go v0.0.0-20230202041014-73d98b9c4b7c + github.com/opsramp/libtrace-go v0.0.0-20230314065618-8eadd7f71090 github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index b3f14a993e..ee37fc696b 100644 --- a/go.sum +++ b/go.sum @@ -581,8 +581,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/opsramp/husky v0.0.0-20230314064610-c5b7e46d200f h1:tsDDGfYpI0/muPSQrKTC30/SmmsBq4Sj0XIdTPB0jKE= github.com/opsramp/husky v0.0.0-20230314064610-c5b7e46d200f/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230202041014-73d98b9c4b7c h1:Mmydh9H0k2SEhHKCbwKXNfKyRCX37u2oOqgb5WV0weU= -github.com/opsramp/libtrace-go v0.0.0-20230202041014-73d98b9c4b7c/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/libtrace-go v0.0.0-20230314065618-8eadd7f71090 h1:tamz5ez5Fei3glpqMrvLBldrG9Kf/+lkh/pKnBX99XA= +github.com/opsramp/libtrace-go v0.0.0-20230314065618-8eadd7f71090/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= From 8e81d18828f23928da836fb926039fc7db85dab3 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Tue, 14 Mar 2023 12:42:52 +0530 Subject: [PATCH 292/351] printing errors if upstream or peer clients failed --- cmd/tracing-proxy/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index f39e67b4d7..e1a4379e96 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -161,7 +161,7 @@ func main() { }, }) if err != nil { - fmt.Printf("unable to initialize upstream libtrace client") + fmt.Printf("unable to initialize upstream libtrace client: %v", err) os.Exit(1) } @@ -182,7 +182,7 @@ func main() { }, }) if err != nil { - fmt.Printf("unable to initialize upstream libtrace client") + fmt.Printf("unable to initialize upstream libtrace client: %v", err) os.Exit(1) } From b3ba572e522870f3f1dc42c9b727eccf258c22e4 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Wed, 29 Mar 2023 12:07:48 +0530 Subject: [PATCH 293/351] bug-fix for /v1/traces --- app/app.go | 7 - build/opsramp-tracing-proxy/values.yaml | 21 +- cmd/tracing-proxy/main.go | 2 +- collect/collect.go | 2 +- config.toml | 39 - config/config.go | 10 +- config/file_config.go | 29 +- config/sampler_config.go | 324 +++++++ config_complete.toml | 83 +- docker-compose.yml | 44 + go.mod | 2 +- go.sum | 4 +- internal/peer/redis.go | 4 +- metrics/opsramp.go | 134 +-- route/middleware.go | 39 +- route/otlp_trace.go | 26 +- route/route.go | 18 - rules.toml | 6 - sample/rules.go | 9 + sample/rules_test.go | 1062 ----------------------- sharder/deterministic_test.go | 2 - types/event.go | 12 +- 22 files changed, 552 insertions(+), 1327 deletions(-) delete mode 100644 config.toml create mode 100644 docker-compose.yml delete mode 100644 rules.toml delete mode 100644 sample/rules_test.go diff --git a/app/app.go b/app/app.go index fb087bd5fa..231b5a260d 100644 --- a/app/app.go +++ b/app/app.go @@ -24,13 +24,6 @@ type App struct { Version string } -type OpsRampAuthTokenResponse struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int64 `json:"expires_in"` - Scope string `json:"scope"` -} - // Start on the App obect should block until the proxy is shutting down. After // Start exits, Stop will be called on all dependencies then on App then the // program will exit. diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index c19e8e686e..397f067a48 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -66,15 +66,6 @@ config: # utilization over data transfer costs. CompressPeerCommunication: true - # APIKeys is a list of OpsRamp API keys that the proxy will accept. This list - # only applies to events - other OpsRamp API actions will fall through to the - # upstream API directly. - # Adding keys here causes events arriving with API keys not in this list to be - # rejected with an HTTP 401 error If an API key that is a literal '*' is in the - # list, all API keys are accepted. - # Eligible for live reload. - APIKeys: [ "*" ]# wildcard accepts all keys - # OpsrampAPI is the URL for the upstream Opsramp API. # Eligible for live reload. OpsrampAPI: "" @@ -205,6 +196,18 @@ config: # Not eligible for live reload. RedisPassword: "" + # RedisPrefix is a string used as a prefix for the keys in redis while storing + # the peer membership. It might be useful to set this in any situation where + # multiple trace-proxy clusters or multiple applications want to share a single + # Redis instance. It may not be blank. + RedisPrefix: "tracing-proxy" + + # RedisDatabase is an integer from 0-15 indicating the database number to use + # for the Redis instance storing the peer membership. It might be useful to set + # this in any situation where multiple trace-proxy clusters or multiple + # applications want to share a single Redis instance. + RedisDatabase: 0 + # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. # Not eligible for live reload. UseTLS: false diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index e1a4379e96..3c0d2c85f4 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -113,7 +113,7 @@ func main() { os.Exit(1) } - // upstreamTransport is the http transport used to send things on to Honeycomb + // upstreamTransport is the http transport used to send things on to OpsRamp upstreamTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ diff --git a/collect/collect.go b/collect/collect.go index 20c30ed97c..367b0e6771 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -667,7 +667,7 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { if i.Config.GetIsDryRun() && !shouldSend { i.Logger.Info().WithFields(logFields).Logf("Trace would have been dropped, but dry run mode is enabled") } - i.Logger.Info().WithFields(logFields).Logf("Sending trace") + i.Logger.Debug().WithFields(logFields).Logf("Sending trace") for _, sp := range trace.GetSpans() { if i.Config.GetAddRuleReasonToTrace() { sp.Data["meta.refinery.reason"] = reason diff --git a/config.toml b/config.toml deleted file mode 100644 index ca5d708fa2..0000000000 --- a/config.toml +++ /dev/null @@ -1,39 +0,0 @@ -####################### -## Quickstart Config ## -####################### - -# InMemCollector brings together all the settings that are relevant to -# collecting spans together to make traces. -[InMemCollector] - -# The collection cache is used to collect all spans into a trace as well as -# remember the sampling decision for any spans that might come in after the -# trace has been marked "complete" (either by timing out or seeing the root -# span). The number of traces in the cache should be many multiples (100x to -# 1000x) of the total number of concurrently active traces (trace throughput * -# trace duration). -# Eligible for live reload. Growing the cache capacity with a live config reload -# is fine. Avoid shrinking it with a live reload (you can, but it may cause -# temporary odd sampling decisions). -CacheCapacity = 1000 - -[HoneycombMetrics] - -# MetricsOpsrampAPI is the URL for the upstream Honeycomb API. -# Eligible for live reload. -MetricsOpsrampAPI = "https://api.jirs5" - -# MetricsAPIKey is the API key to use to send log events to the Honeycomb logging -# dataset. This is separate from the APIKeys used to authenticate regular -# traffic. -# Eligible for live reload. -MetricsAPIKey = "abcd1234" - -# MetricsDataset is the name of the dataset to which to send tracing-proxy metrics -# Eligible for live reload. -MetricsDataset = "tracing-proxy Metrics" - -# MetricsReportingInterval is the frequency (in seconds) to send metric events -# to Honeycomb. Between 1 and 60 is recommended. -# Not eligible for live reload. -MetricsReportingInterval = 3 diff --git a/config/config.go b/config/config.go index e488d2fbc7..9ee23b2266 100644 --- a/config/config.go +++ b/config/config.go @@ -37,9 +37,6 @@ type Config interface { // incoming events over gRPC GetGRPCListenAddr() (string, error) - // GetAPIKeys returns a list of Honeycomb API keys - GetAPIKeys() ([]string, error) - // GetPeers returns a list of other servers participating in this proxy cluster GetPeers() ([]string, error) @@ -61,6 +58,13 @@ type Config interface { // management. GetRedisPassword() (string, error) + // GetRedisPrefix returns the prefix string used in the keys for peer + // management. + GetRedisPrefix() string + + // GetRedisDatabase returns the ID of the Redis database to use for peer management. + GetRedisDatabase() int + // GetUseTLS returns true when TLS must be enabled to dial the Redis instance to // use for peer management. GetUseTLS() (bool, error) diff --git a/config/file_config.go b/config/file_config.go index 2162f977eb..c8b829582d 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -36,8 +36,7 @@ type configContents struct { CompressPeerCommunication bool GRPCListenAddr string GRPCPeerListenAddr string - APIKeys []string `validate:"required"` - OpsrampAPI string `validate:"required,url"` + OpsrampAPI string `validate:"required,url"` OpsrampKey string OpsrampSecret string TenantId string @@ -453,13 +452,6 @@ func (f *fileConfig) GetGRPCPeerListenAddr() (string, error) { return f.conf.GRPCPeerListenAddr, nil } -func (f *fileConfig) GetAPIKeys() ([]string, error) { - f.mux.RLock() - defer f.mux.RUnlock() - - return f.conf.APIKeys, nil -} - func (f *fileConfig) GetPeerManagementType() (string, error) { f.mux.RLock() defer f.mux.RUnlock() @@ -502,6 +494,25 @@ func (f *fileConfig) GetRedisPassword() (string, error) { return f.config.GetString("PeerManagement.RedisPassword"), nil } +func (f *fileConfig) GetRedisPrefix() string { + f.mux.RLock() + defer f.mux.RUnlock() + + prefix := f.config.GetString("PeerManagement.RedisPrefix") + if prefix == "" { + prefix = "tracing-proxy" + } + + return prefix +} + +func (f *fileConfig) GetRedisDatabase() int { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.config.GetInt("PeerManagement.RedisDatabase") +} + func (f *fileConfig) GetProxyProtocol() (string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config/sampler_config.go b/config/sampler_config.go index b35965c9c2..afb2ca2ca4 100644 --- a/config/sampler_config.go +++ b/config/sampler_config.go @@ -2,6 +2,8 @@ package config import ( "fmt" + "strconv" + "strings" ) type DeterministicSamplerConfig struct { @@ -45,12 +47,334 @@ type RulesBasedSamplerCondition struct { Field string Operator string Value interface{} + Datatype string + Matches func(value any, exists bool) bool +} + +func (r *RulesBasedSamplerCondition) Init() error { + return r.setMatchesFunction() } func (r *RulesBasedSamplerCondition) String() string { return fmt.Sprintf("%+v", *r) } +func (r *RulesBasedSamplerCondition) setMatchesFunction() error { + switch r.Operator { + case "exists": + r.Matches = func(value any, exists bool) bool { + return exists + } + return nil + case "not-exists": + r.Matches = func(value any, exists bool) bool { + return !exists + } + return nil + case "!=", "=", ">", "<", "<=", ">=": + return setCompareOperators(r, r.Operator) + case "starts-with", "contains", "does-not-contain": + err := setMatchStringBasedOperators(r, r.Operator) + if err != nil { + return err + } + default: + return fmt.Errorf("unknown operator '%s'", r.Operator) + } + return nil +} + +func tryConvertToInt(v any) (int, bool) { + switch value := v.(type) { + case int: + return value, true + case int64: + return int(value), true + case float64: + return int(value), true + case bool: + return 0, false + case string: + n, err := strconv.Atoi(value) + if err == nil { + return n, true + } + return 0, false + default: + return 0, false + } +} + +func tryConvertToFloat(v any) (float64, bool) { + switch value := v.(type) { + case float64: + return value, true + case int: + return float64(value), true + case int64: + return float64(value), true + case bool: + return 0, false + case string: + n, err := strconv.ParseFloat(value, 64) + return n, err == nil + default: + return 0, false + } +} + +// In the case of strings, we want to stringize everything we get through a +// "standard" format, which we are defining as whatever Go does with the %v +// operator to sprintf. This will make sure that no matter how people encode +// their values, they compare on an equal footing. +func tryConvertToString(v any) (string, bool) { + return fmt.Sprintf("%v", v), true +} + +func tryConvertToBool(v any) bool { + value, ok := tryConvertToString(v) + if !ok { + return false + } + str, err := strconv.ParseBool(value) + if err != nil { + return false + } + if str { + return true + } else { + return false + } +} + +func setCompareOperators(r *RulesBasedSamplerCondition, condition string) error { + switch r.Datatype { + case "string": + conditionValue, ok := tryConvertToString(r.Value) + if !ok { + return fmt.Errorf("could not convert %v to string", r.Value) + } + + // check if conditionValue and spanValue are not equal + switch condition { + case "!=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToString(spanValue); exists && ok { + return n != conditionValue + } + return false + } + return nil + case "=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToString(spanValue); exists && ok { + return n == conditionValue + } + return false + } + return nil + case ">": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToString(spanValue); exists && ok { + return n > conditionValue + } + return false + } + return nil + case "<": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToString(spanValue); exists && ok { + return n < conditionValue + } + return false + } + return nil + case "<=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToString(spanValue); exists && ok { + return n <= conditionValue + } + return false + } + return nil + } + case "int": + // check if conditionValue and spanValue are not equal + conditionValue, ok := tryConvertToInt(r.Value) + if !ok { + return fmt.Errorf("could not convert %v to string", r.Value) + } + switch condition { + case "!=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToInt(spanValue); exists && ok { + return n != conditionValue + } + return false + } + return nil + case "=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToInt(spanValue); exists && ok { + return n == conditionValue + } + return false + } + return nil + case ">": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToInt(spanValue); exists && ok { + return n > conditionValue + } + return false + } + return nil + case ">=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToInt(spanValue); exists && ok { + return n >= conditionValue + } + return false + } + return nil + case "<": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToInt(spanValue); exists && ok { + return n < conditionValue + } + return false + } + return nil + case "<=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToInt(spanValue); exists && ok { + return n <= conditionValue + } + return false + } + return nil + } + case "float": + conditionValue, ok := tryConvertToFloat(r.Value) + if !ok { + return fmt.Errorf("could not convert %v to string", r.Value) + } + // check if conditionValue and spanValue are not equal + switch condition { + case "!=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToFloat(spanValue); exists && ok { + return n != conditionValue + } + return false + } + return nil + case "=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToFloat(spanValue); exists && ok { + return n == conditionValue + } + return false + } + return nil + case ">": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToFloat(spanValue); exists && ok { + return n > conditionValue + } + return false + } + return nil + case ">=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToFloat(spanValue); exists && ok { + return n >= conditionValue + } + return false + } + return nil + case "<": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToFloat(spanValue); exists && ok { + return n < conditionValue + } + return false + } + return nil + case "<=": + r.Matches = func(spanValue any, exists bool) bool { + if n, ok := tryConvertToFloat(spanValue); exists && ok { + return n <= conditionValue + } + return false + } + return nil + } + case "bool": + conditionValue := tryConvertToBool(r.Value) + + switch condition { + case "!=": + r.Matches = func(spanValue any, exists bool) bool { + if n := tryConvertToBool(spanValue); exists && n { + return n != conditionValue + } + return false + } + return nil + case "=": + r.Matches = func(spanValue any, exists bool) bool { + if n := tryConvertToBool(spanValue); exists && n { + return n == conditionValue + } + return false + } + return nil + } + case "": + // user did not specify dataype, so do not specify matches function + default: + return fmt.Errorf("%s must be either string, int, float or bool", r.Datatype) + } + return nil +} + +func setMatchStringBasedOperators(r *RulesBasedSamplerCondition, condition string) error { + conditionValue, ok := tryConvertToString(r.Value) + if !ok { + return fmt.Errorf("%s value must be a string, but was '%s'", condition, r.Value) + } + + switch condition { + case "starts-with": + r.Matches = func(spanValue any, exists bool) bool { + s, ok := tryConvertToString(spanValue) + if ok { + return strings.HasPrefix(s, conditionValue) + } + return false + } + case "contains": + r.Matches = func(spanValue any, exists bool) bool { + s, ok := tryConvertToString(spanValue) + if ok { + return strings.Contains(s, conditionValue) + } + return false + } + case "does-not-contain": + r.Matches = func(spanValue any, exists bool) bool { + s, ok := tryConvertToString(spanValue) + if ok { + return !strings.Contains(s, conditionValue) + } + return false + } + } + + return nil +} + type RulesBasedDownstreamSampler struct { DynamicSampler *DynamicSamplerConfig EMADynamicSampler *EMADynamicSamplerConfig diff --git a/config_complete.toml b/config_complete.toml index b2ae653260..731fc6578e 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -5,9 +5,9 @@ # ListenAddr is the IP and port on which to listen for incoming events. Incoming # traffic is expected to be HTTP, so if using SSL put something like nginx in # front to do the decryption. -# Should be of the form 0.0.0.0:8080 +# Should be of the form 0.0.0.0:8082 # Not eligible for live reload. -ListenAddr = "0.0.0.0:8080" +ListenAddr = "0.0.0.0:8082" # GRPCListenAddr is the IP and port on which to listen for incoming events over # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put @@ -23,7 +23,7 @@ GRPCListenAddr = "0.0.0.0:4317" # Should be of the form 0.0.0.0:8081 # Not eligible for live reload. PeerListenAddr = "0.0.0.0:8083" -GRPCPeerListenAddr = "0.0.0.0:8084" +GRPCPeerListenAddr = "tracing-proxy:8084" # ProxyProtocol accepts http and https # Not Eligible for live reload. @@ -31,11 +31,11 @@ ProxyProtocol = "" # ProxyServer takes the proxy server address # Not Eligible for live reload. -ProxyServer= "" +ProxyServer = "" # ProxyPort takes the proxy server port # Not Eligible for live reload. -ProxyPort=0 +ProxyPort = 0 # ProxyUserName takes the proxy username # Not Eligible for live reload. @@ -53,32 +53,19 @@ ProxyPassword = "" # utilization over data transfer costs. CompressPeerCommunication = true -# APIKeys is a list of Opsramp API keys that the proxy will accept. This list -# only applies to events - other Opsramp API actions will fall through to the -# upstream API directly. -# Adding keys here causes events arriving with API keys not in this list to be -# rejected with an HTTP 401 error If an API key that is a literal '*' is in the -# list, all API keys are accepted. -# Eligible for live reload. -APIKeys = [ - # "replace-me", - # "more-optional-keys", - "*", # wildcard accept all keys -] - # OpsrampAPI is the URL for the upstream Opsramp API. # Eligible for live reload. #OpsrampAPI = "localhost:50052" -OpsrampAPI = "https://int.opsramp.net" +OpsrampAPI = "" # OpsrampKey is used to get the OauthToken -OpsrampKey = "***REMOVED***" +OpsrampKey = "" # OpsrampSecret is used to get the OauthToken -OpsrampSecret = "***REMOVED***" +OpsrampSecret = "" # Traces are send to the client with given tenantid -TenantId = "3748c67e-bec1-4cad-bd8b-8f2f8ea840f3" +TenantId = "" # Dataset you want to use for sampling Dataset = "ds" @@ -170,7 +157,7 @@ EnvironmentCacheTTL = "1h" # Default is ["trace.span_id"]. # Eligible for live reload. AdditionalErrorFields = [ - "trace.span_id" + "trace.span_id" ] # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate @@ -193,7 +180,7 @@ AdditionalErrorFields = [ # Metrics are sent to OpsRamp (The collection happens based on configuration specifie # in OpsRampMetrics and only works when the Metrics is set to "prometheus") -SendMetricsToOpsRamp = true +SendMetricsToOpsRamp = false ############################ ## Implementation Choices ## @@ -215,28 +202,28 @@ Collector = "InMemCollector" ## Peer Management ## ######################### -[PeerManagement] -Type = "file" -# Peers is the list of all servers participating in this proxy cluster. Events -# will be sharded evenly across all peers based on the Trace ID. Values here -# should be the base URL used to access the peer, and should include scheme, -# hostname (or ip address) and port. All servers in the cluster should be in -# this list, including this host. -Peers = [ - "http://127.0.0.1:8084", #only grpc peer listener used - # "http://127.0.0.1:8083", - # "http://10.1.2.3.4:8080", - # "http://refinery-1231:8080", - # "http://peer-3.fqdn" // assumes port 80 -] +#[PeerManagement] +#Type = "file" +## Peers is the list of all servers participating in this proxy cluster. Events +## will be sharded evenly across all peers based on the Trace ID. Values here +## should be the base URL used to access the peer, and should include scheme, +## hostname (or ip address) and port. All servers in the cluster should be in +## this list, including this host. +#Peers = [ +# "http://127.0.0.1:8084", #only grpc peer listener used +# # "http://127.0.0.1:8083", +# # "http://10.1.2.3.4:8080", +# # "http://refinery-1231:8080", +# # "http://peer-3.fqdn" // assumes port 80 +#] -# [PeerManagement] -# Type = "redis" +[PeerManagement] +Type = "redis" # RedisHost is is used to connect to redis for peer cluster membership management. # Further, if the environment variable 'REFINERY_REDIS_HOST' is set it takes # precedence and this value is ignored. # Not eligible for live reload. -# RedisHost = "localhost:6379" +RedisHost = "redis:22122" # RedisUsername is the username used to connect to redis for peer cluster membership management. # If the environment variable 'REFINERY_REDIS_USERNAME' is set it takes @@ -250,6 +237,18 @@ Peers = [ # Not eligible for live reload. # RedisPassword = "" +# RedisPrefix is a string used as a prefix for the keys in redis while storing +# the peer membership. It might be useful to set this in any situation where +# multiple tracing-proxy clusters or multiple applications want to share a single +# Redis instance. If not set then "tracing-proxy" is used as prefix +# RedisPrefix = "customPrefix" + +# RedisDatabase is an integer from 0-15 indicating the database number to use +# for the Redis instance storing the peer membership. It might be useful to set +# this in any situation where multiple trace-proxy clusters or multiple +# applications want to share a single Redis instance. if not set Default = 0 +# RedisDatabase = 1 + # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. # Not eligible for live reload. # UseTLS = false @@ -291,7 +290,7 @@ Peers = [ # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the # number of nodes) are disrupted when the node count changes. # Not eligible for live reload. -# Strategy = "hash" +Strategy = "hash" ######################### ## In-Memory Collector ## diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000..c0f1033c6e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,44 @@ +version: '3.8' + +services: + redis: + hostname: redis + image: dynomitedb/redis + expose: + - "22122" + profiles: + - redis + - all + + redis-commander: + image: rediscommander/redis-commander:latest + restart: always + environment: + - REDIS_HOSTS=local:redis:22122 + ports: + - "4042:8081" + depends_on: + - redis + profiles: + - redis + - all + + tracing-proxy: + build: + context: . + dockerfile: Dockerfile + deploy: + mode: replicated + replicas: 3 + endpoint_mode: vip + expose: + - "4317" + - "8084" + - "8082" + ports: + - "8082-8084:8082" + depends_on: + - redis + profiles: + - tracing-proxy + - all \ No newline at end of file diff --git a/go.mod b/go.mod index c1b613f11e..94b6699575 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 - github.com/opsramp/husky v0.0.0-20230314064610-c5b7e46d200f + github.com/opsramp/husky v0.0.0-20230329062823-ef17d98937d7 github.com/opsramp/libtrace-go v0.0.0-20230314065618-8eadd7f71090 github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 diff --git a/go.sum b/go.sum index ee37fc696b..47542c9993 100644 --- a/go.sum +++ b/go.sum @@ -579,8 +579,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opsramp/husky v0.0.0-20230314064610-c5b7e46d200f h1:tsDDGfYpI0/muPSQrKTC30/SmmsBq4Sj0XIdTPB0jKE= -github.com/opsramp/husky v0.0.0-20230314064610-c5b7e46d200f/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= +github.com/opsramp/husky v0.0.0-20230329062823-ef17d98937d7 h1:xP2qFwbG494966JCq3Qjx24SP45IrbqUVqinp1hT4X0= +github.com/opsramp/husky v0.0.0-20230329062823-ef17d98937d7/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= github.com/opsramp/libtrace-go v0.0.0-20230314065618-8eadd7f71090 h1:tamz5ez5Fei3glpqMrvLBldrG9Kf/+lkh/pKnBX99XA= github.com/opsramp/libtrace-go v0.0.0-20230314065618-8eadd7f71090/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= diff --git a/internal/peer/redis.go b/internal/peer/redis.go index 97dc7e1fd3..a530c0650b 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -91,7 +91,7 @@ func newRedisPeers(ctx context.Context, c config.Config, done chan struct{}) (Pe peers := &redisPeers{ store: &redimem.RedisMembership{ - Prefix: "tracing-proxy", + Prefix: c.GetRedisPrefix(), Pool: pool, }, peers: make([]string, 1), @@ -225,7 +225,7 @@ func buildOptions(c config.Config) []redis.DialOption { options := []redis.DialOption{ redis.DialReadTimeout(1 * time.Second), redis.DialConnectTimeout(1 * time.Second), - redis.DialDatabase(0), // TODO enable multiple databases for multiple samproxies + redis.DialDatabase(c.GetRedisDatabase()), } username, _ := c.GetRedisUsername() diff --git a/metrics/opsramp.go b/metrics/opsramp.go index 182e5ec119..a804199794 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -5,10 +5,12 @@ import ( "encoding/json" "fmt" "github.com/golang/snappy" + "github.com/gorilla/mux" + "github.com/prometheus/client_golang/prometheus/promhttp" io_prometheus_client "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -18,18 +20,18 @@ import ( "time" "github.com/gogo/protobuf/proto" - "github.com/gorilla/mux" "github.com/opsramp/tracing-proxy/config" "github.com/opsramp/tracing-proxy/logger" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/client_golang/prometheus/promhttp" ) +var metricsServer sync.Once + type OpsRampMetrics struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` - // metrics keeps a record of all the registered metrics so we can increment + // metrics keeps a record of all the registered metrics so that we can increment // them by name metrics map[string]interface{} lock sync.RWMutex @@ -55,34 +57,41 @@ func (p *OpsRampMetrics) Start() error { return err } - if p.prefix == "" && p.Config.GetSendMetricsToOpsRamp() { + if p.Config.GetSendMetricsToOpsRamp() { go func() { metricsTicker := time.NewTicker(time.Duration(metricsConfig.OpsRampMetricsReportingInterval) * time.Second) defer metricsTicker.Stop() - p.PopulateOpsRampMetrics(metricsConfig) + p.Populate(metricsConfig) // populating the oAuth Token Initially - err := p.RenewOpsRampOAuthToken() + err := p.RenewOAuthToken() if err != nil { p.Logger.Error().Logf("error while initializing oAuth Token Err: %v", err) } - for _ = range metricsTicker.C { - statusCode, err := p.PushMetricsToOpsRamp() + for range metricsTicker.C { + statusCode, err := p.PushMetrics() if err != nil { p.Logger.Error().Logf("error while pushing metrics with statusCode: %d and Error: %v", statusCode, err) } } }() - } p.metrics = make(map[string]interface{}) - muxxer := mux.NewRouter() + metricsServer.Do(func() { + muxer := mux.NewRouter() + muxer.Handle("/metrics", promhttp.Handler()) + + go func() { + err := http.ListenAndServe(metricsConfig.MetricsListenAddr, muxer) + if err != nil { + p.Logger.Error().Logf("failed to create /metrics server Error: %v", err) + } + }() + }) - muxxer.Handle("/metrics", promhttp.Handler()) - go http.ListenAndServe(metricsConfig.MetricsListenAddr, muxxer) return nil } @@ -92,48 +101,47 @@ func (p *OpsRampMetrics) Register(name string, metricType string) { p.lock.Lock() defer p.lock.Unlock() - newmet, exists := p.metrics[name] + newMetric, exists := p.metrics[name] // don't attempt to add the metric again as this will cause a panic if exists { return } - hostMap := make(map[string]string) + constantLabels := make(map[string]string) if hostname, err := os.Hostname(); err == nil && hostname != "" { - - hostMap["hostname"] = hostname + constantLabels["hostname"] = hostname } switch metricType { case "counter": - newmet = promauto.NewCounter(prometheus.CounterOpts{ + newMetric = promauto.NewCounter(prometheus.CounterOpts{ Name: name, Namespace: p.prefix, Help: name, - ConstLabels: hostMap, + ConstLabels: constantLabels, }) case "gauge": - newmet = promauto.NewGauge(prometheus.GaugeOpts{ + newMetric = promauto.NewGauge(prometheus.GaugeOpts{ Name: name, Namespace: p.prefix, Help: name, - ConstLabels: hostMap, + ConstLabels: constantLabels, }) case "histogram": - newmet = promauto.NewHistogram(prometheus.HistogramOpts{ + newMetric = promauto.NewHistogram(prometheus.HistogramOpts{ Name: name, Namespace: p.prefix, Help: name, // This is an attempt at a usable set of buckets for a wide range of metrics // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous Buckets: prometheus.ExponentialBuckets(1, 4, 16), - ConstLabels: hostMap, + ConstLabels: constantLabels, }) } - p.metrics[name] = newmet + p.metrics[name] = newMetric } // RegisterWithDescriptionLabels takes a name, a metric type, description, labels. The type should be one of "counter", @@ -142,7 +150,7 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s p.lock.Lock() defer p.lock.Unlock() - newmet, exists := p.metrics[name] + newMetric, exists := p.metrics[name] // don't attempt to add the metric again as this will cause a panic if exists { @@ -156,14 +164,14 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s switch metricType { case "counter": - newmet = promauto.NewCounterVec(prometheus.CounterOpts{ + newMetric = promauto.NewCounterVec(prometheus.CounterOpts{ Name: name, Namespace: p.prefix, Help: desc, ConstLabels: hostMap, }, labels) case "gauge": - newmet = promauto.NewGaugeVec( + newMetric = promauto.NewGaugeVec( prometheus.GaugeOpts{ Name: name, Namespace: p.prefix, @@ -172,7 +180,7 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s }, labels) case "histogram": - newmet = promauto.NewHistogramVec(prometheus.HistogramOpts{ + newMetric = promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: name, Namespace: p.prefix, Help: desc, @@ -184,15 +192,15 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s } - p.metrics[name] = newmet + p.metrics[name] = newMetric } func (p *OpsRampMetrics) Increment(name string) { p.lock.RLock() defer p.lock.RUnlock() - if counterIface, ok := p.metrics[name]; ok { - if counter, ok := counterIface.(prometheus.Counter); ok { + if counterInterface, ok := p.metrics[name]; ok { + if counter, ok := counterInterface.(prometheus.Counter); ok { counter.Inc() } } @@ -201,8 +209,8 @@ func (p *OpsRampMetrics) Count(name string, n interface{}) { p.lock.RLock() defer p.lock.RUnlock() - if counterIface, ok := p.metrics[name]; ok { - if counter, ok := counterIface.(prometheus.Counter); ok { + if counterInterface, ok := p.metrics[name]; ok { + if counter, ok := counterInterface.(prometheus.Counter); ok { counter.Add(ConvertNumeric(n)) } } @@ -211,8 +219,8 @@ func (p *OpsRampMetrics) Gauge(name string, val interface{}) { p.lock.RLock() defer p.lock.RUnlock() - if gaugeIface, ok := p.metrics[name]; ok { - if gauge, ok := gaugeIface.(prometheus.Gauge); ok { + if gaugeInterface, ok := p.metrics[name]; ok { + if gauge, ok := gaugeInterface.(prometheus.Gauge); ok { gauge.Set(ConvertNumeric(val)) } } @@ -221,8 +229,8 @@ func (p *OpsRampMetrics) Histogram(name string, obs interface{}) { p.lock.RLock() defer p.lock.RUnlock() - if histIface, ok := p.metrics[name]; ok { - if hist, ok := histIface.(prometheus.Histogram); ok { + if histInterface, ok := p.metrics[name]; ok { + if hist, ok := histInterface.(prometheus.Histogram); ok { hist.Observe(ConvertNumeric(obs)) } } @@ -232,8 +240,8 @@ func (p *OpsRampMetrics) GaugeWithLabels(name string, labels map[string]string, p.lock.RLock() defer p.lock.RUnlock() - if gaugeIface, ok := p.metrics[name]; ok { - if gaugeVec, ok := gaugeIface.(*prometheus.GaugeVec); ok { + if gaugeInterface, ok := p.metrics[name]; ok { + if gaugeVec, ok := gaugeInterface.(*prometheus.GaugeVec); ok { gaugeVec.With(labels).Set(value) } } @@ -243,8 +251,8 @@ func (p *OpsRampMetrics) IncrementWithLabels(name string, labels map[string]stri p.lock.RLock() defer p.lock.RUnlock() - if gaugeIface, ok := p.metrics[name]; ok { - if gaugeVec, ok := gaugeIface.(*prometheus.CounterVec); ok { + if gaugeInterface, ok := p.metrics[name]; ok { + if gaugeVec, ok := gaugeInterface.(*prometheus.CounterVec); ok { gaugeVec.With(labels).Inc() } } @@ -257,16 +265,15 @@ type OpsRampAuthTokenResponse struct { Scope string `json:"scope"` } -func (p *OpsRampMetrics) PopulateOpsRampMetrics(metricsConfig *config.OpsRampMetricsConfig) { - +func (p *OpsRampMetrics) Populate(metricsConfig *config.OpsRampMetricsConfig) { p.apiEndpoint = metricsConfig.OpsRampMetricsAPI p.apiKey = metricsConfig.OpsRampMetricsAPIKey p.apiSecret = metricsConfig.OpsRampMetricsAPISecret p.tenantID = metricsConfig.OpsRampTenantID p.retryCount = metricsConfig.OpsRampMetricsRetryCount - // Creating Regex for list of metrics - regexString := ".*" // default value is to take everything + // Creating Regex for a list of metrics + regexString := ".*" // the default value is to take everything if len(metricsConfig.OpsRampMetricsList) >= 1 { regexString = metricsConfig.OpsRampMetricsList[0] for index := 0; index < len(metricsConfig.OpsRampMetricsList); index++ { @@ -286,7 +293,7 @@ func (p *OpsRampMetrics) PopulateOpsRampMetrics(metricsConfig *config.OpsRampMet p.Client = http.Client{ Transport: &http.Transport{Proxy: http.ProxyFromEnvironment}, - Timeout: time.Duration(10) * time.Second, + Timeout: time.Duration(240) * time.Second, } if proxyUrl != "" { proxyURL, err := url.Parse(proxyUrl) @@ -295,13 +302,13 @@ func (p *OpsRampMetrics) PopulateOpsRampMetrics(metricsConfig *config.OpsRampMet } else { p.Client = http.Client{ Transport: &http.Transport{Proxy: http.ProxyURL(proxyURL)}, - Timeout: time.Duration(10) * time.Second, + Timeout: time.Duration(240) * time.Second, } } } } -func (p *OpsRampMetrics) PushMetricsToOpsRamp() (int, error) { +func (p *OpsRampMetrics) PushMetrics() (int, error) { metricFamilySlice, err := prometheus.DefaultGatherer.Gather() if err != nil { return -1, err @@ -309,15 +316,14 @@ func (p *OpsRampMetrics) PushMetricsToOpsRamp() (int, error) { presentTime := time.Now().UnixMilli() - timeSeries := []prompb.TimeSeries{} + var timeSeries []prompb.TimeSeries for _, metricFamily := range metricFamilySlice { - if !p.re.MatchString(metricFamily.GetName()) { continue } for _, metric := range metricFamily.GetMetric() { - labels := []prompb.Label{} + var labels []prompb.Label for _, label := range metric.GetLabel() { labels = append(labels, prompb.Label{ Name: label.GetName(), @@ -468,13 +474,9 @@ func (p *OpsRampMetrics) PushMetricsToOpsRamp() (int, error) { } req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") - req.Header.Set("Connection", "close") + //req.Header.Set("Connection", "close") req.Header.Set("Content-Encoding", "snappy") req.Header.Set("Content-Type", "application/x-protobuf") - - if !strings.Contains(p.oAuthToken.Scope, "metrics:write") { - return -1, fmt.Errorf("auth token provided not not have metrics:write scope") - } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken)) resp, err := p.SendWithRetry(req) @@ -482,8 +484,8 @@ func (p *OpsRampMetrics) PushMetricsToOpsRamp() (int, error) { return -1, err } defer resp.Body.Close() - // Depending on version and configuration of the PGW, StatusOK or StatusAccepted may be returned. - body, err := ioutil.ReadAll(resp.Body) + // Depending on the version and configuration of the PGW, StatusOK or StatusAccepted may be returned. + body, err := io.ReadAll(resp.Body) if err != nil { p.Logger.Error().Logf("failed to parse response body Err: %v", err) } @@ -495,15 +497,14 @@ func (p *OpsRampMetrics) PushMetricsToOpsRamp() (int, error) { return resp.StatusCode, nil } -func (p *OpsRampMetrics) RenewOpsRampOAuthToken() error { - +func (p *OpsRampMetrics) RenewOAuthToken() error { p.oAuthToken = new(OpsRampAuthTokenResponse) - url := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(p.apiEndpoint, "/")) + endpoint := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(p.apiEndpoint, "/")) requestBody := strings.NewReader("client_id=" + p.apiKey + "&client_secret=" + p.apiSecret + "&grant_type=client_credentials") - req, err := http.NewRequest(http.MethodPost, url, requestBody) + req, err := http.NewRequest(http.MethodPost, endpoint, requestBody) if err != nil { return err } @@ -516,7 +517,7 @@ func (p *OpsRampMetrics) RenewOpsRampOAuthToken() error { return err } - respBody, err := ioutil.ReadAll(resp.Body) + respBody, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -531,13 +532,13 @@ func (p *OpsRampMetrics) RenewOpsRampOAuthToken() error { } func (p *OpsRampMetrics) SendWithRetry(request *http.Request) (*http.Response, error) { - response, err := p.Client.Do(request) if err == nil && response != nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { return response, nil } if response != nil && response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token - p.RenewOpsRampOAuthToken() + p.RenewOAuthToken() + request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken)) } // retry if the error is not nil @@ -547,7 +548,8 @@ func (p *OpsRampMetrics) SendWithRetry(request *http.Request) (*http.Response, e return response, nil } if response != nil && response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token - p.RenewOpsRampOAuthToken() + p.RenewOAuthToken() + request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken)) } } diff --git a/route/middleware.go b/route/middleware.go index ded46cf1a1..4024f317cb 100644 --- a/route/middleware.go +++ b/route/middleware.go @@ -2,7 +2,6 @@ package route import ( "context" - "errors" "fmt" "math/rand" "net/http" @@ -37,39 +36,6 @@ func (r *Router) queryTokenChecker(next http.Handler) http.Handler { }) } -func (r *Router) apiKeyChecker(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - apiKey := req.Header.Get(types.APIKeyHeader) - if apiKey == "" { - apiKey = req.Header.Get(types.APIKeyHeaderShort) - } - if apiKey == "" { - err := errors.New("no " + types.APIKeyHeader + " header found from within authing middleware") - r.handlerReturnWithError(w, ErrAuthNeeded, err) - return - } - allowedKeys, err := r.Config.GetAPIKeys() - if err != nil { - r.handlerReturnWithError(w, ErrConfigReadFailed, err) - return - } - for _, key := range allowedKeys { - if key == "*" { - // all keys are allowed, it's all good - next.ServeHTTP(w, req) - return - } - if apiKey == key { - // we're in the allowlist, it's all good - next.ServeHTTP(w, req) - return - } - } - err = fmt.Errorf("api key %s not found in list of authed keys", apiKey) - r.handlerReturnWithError(w, ErrAuthNeeded, err) - }) -} - type statusRecorder struct { http.ResponseWriter status int @@ -124,15 +90,12 @@ func (r *Router) requestLogger(next http.Handler) http.Handler { } func (r *Router) setResponseHeaders(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - - // Set content type header early so it's before any calls to WriteHeader + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { // Set content type header early so it's before any calls to WriteHeader w.Header().Set("Content-Type", "application/json") // Allow cross-origin API operation from browser js w.Header().Set("Access-Control-Allow-Origin", "*") next.ServeHTTP(w, req) - }) } diff --git a/route/otlp_trace.go b/route/otlp_trace.go index a8ce8b21fe..96cb315fb0 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -19,6 +19,8 @@ import ( func (r *Router) postOTLP(w http.ResponseWriter, req *http.Request) { ri := huskyotlp.GetRequestInfoFromHttpHeaders(req.Header) + r.Logger.Info().Logf("ri: %+v", ri) + result, err := huskyotlp.TranslateTraceRequestFromReader(req.Body, ri) if err != nil { r.handlerReturnWithError(w, ErrUpstreamFailed, err) @@ -26,8 +28,11 @@ func (r *Router) postOTLP(w http.ResponseWriter, req *http.Request) { } token := ri.ApiToken - tenantId := ri.ApiTenantId - if err := processTraceRequest(req.Context(), r, result.Batches, ri.Dataset, token, tenantId); err != nil { + tenantID := ri.ApiTenantId + if tenantID == "" { + tenantID, _ = r.Config.GetTenantId() + } + if err := processTraceRequest(req.Context(), r, result.Batches, ri.Dataset, token, tenantID); err != nil { r.handlerReturnWithError(w, ErrUpstreamFailed, err) } } @@ -42,10 +47,10 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ return nil, huskyotlp.AsGRPCError(err) } token := ri.ApiToken - tenantId := ri.ApiTenantId - if len(tenantId) == 0 { - OpsrampTenantId, _ := r.Config.GetTenantId() - tenantId = OpsrampTenantId + tenantID := ri.ApiTenantId + if len(tenantID) == 0 { + opsrampTenantID, _ := r.Config.GetTenantId() + tenantID = opsrampTenantID } if len(ri.Dataset) == 0 { @@ -53,10 +58,7 @@ func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServ ri.Dataset = dataset } - r.Logger.Debug().Logf("TenantId: %s", tenantId) - r.Logger.Debug().Logf("dataset:", ri.Dataset) - - if err := processTraceRequest(ctx, r, result.Batches, ri.Dataset, token, tenantId); err != nil { + if err := processTraceRequest(ctx, r, result.Batches, ri.Dataset, token, tenantID); err != nil { return nil, huskyotlp.AsGRPCError(err) } @@ -69,7 +71,7 @@ func processTraceRequest( batches []huskyotlp.Batch, datasetName string, token string, - tenantId string) error { + tenantID string) error { var requestID types.RequestIDContextKey apiHost, err := router.Config.GetOpsrampAPI() @@ -84,7 +86,7 @@ func processTraceRequest( Context: ctx, APIHost: apiHost, APIToken: token, - APITenantId: tenantId, + APITenantId: tenantID, Dataset: datasetName, Environment: "", SampleRate: uint(ev.SampleRate), diff --git a/route/route.go b/route/route.go index fd5a29fe1d..517680ebee 100644 --- a/route/route.go +++ b/route/route.go @@ -172,7 +172,6 @@ func (r *Router) LnS(incomingOrPeer string) { // require an auth header for events and batches authedMuxxer := muxxer.PathPrefix("/1/").Methods("POST").Subrouter() - authedMuxxer.Use(r.apiKeyChecker) // handle events and batches authedMuxxer.HandleFunc("/events/{datasetName}", r.event).Name("event") @@ -180,7 +179,6 @@ func (r *Router) LnS(incomingOrPeer string) { // require an auth header for OTLP requests otlpMuxxer := muxxer.PathPrefix("/v1/").Methods("POST").Subrouter() - otlpMuxxer.Use(r.apiKeyChecker) // handle OTLP trace requests otlpMuxxer.HandleFunc("/traces", r.postOTLP).Name("otlp") @@ -409,9 +407,6 @@ func (r *Router) event(w http.ResponseWriter, req *http.Request) { func (r *Router) requestToEvent(req *http.Request, reqBod []byte) (*types.Event, error) { // get necessary bits out of the incoming event apiKey := req.Header.Get(types.APIKeyHeader) - if apiKey == "" { - apiKey = req.Header.Get(types.APIKeyHeaderShort) - } sampleRate, err := strconv.Atoi(req.Header.Get(types.SampleRateHeader)) if err != nil { sampleRate = 1 @@ -477,9 +472,6 @@ func (r *Router) batch(w http.ResponseWriter, req *http.Request) { } apiKey := req.Header.Get(types.APIKeyHeader) - if apiKey == "" { - apiKey = req.Header.Get(types.APIKeyHeaderShort) - } // get environment name - will be empty for legacy keys environment, err := r.getEnvironmentName(apiKey) @@ -740,16 +732,6 @@ func unmarshal(r *http.Request, data io.Reader, v interface{}) error { } } -func getAPIKeyAndDatasetFromMetadata(md metadata.MD) (apiKey string, dataset string) { - apiKey = getFirstValueFromMetadata(types.APIKeyHeader, md) - if apiKey == "" { - apiKey = getFirstValueFromMetadata(types.APIKeyHeaderShort, md) - } - dataset = getFirstValueFromMetadata(types.DatasetHeader, md) - - return apiKey, dataset -} - // getFirstValueFromMetadata returns the first value of a metadata entry using a // case-insensitive key func getFirstValueFromMetadata(key string, md metadata.MD) string { diff --git a/rules.toml b/rules.toml deleted file mode 100644 index 9a87e24b29..0000000000 --- a/rules.toml +++ /dev/null @@ -1,6 +0,0 @@ -############################ -## Sampling Rules Config ## -############################ - -# Defaults for the rules configuration are set in file_config.go. -# For an example file with various sampling methods and their configurations, see rules_complete.toml. diff --git a/sample/rules.go b/sample/rules.go index 56d27deb53..535391cb58 100644 --- a/sample/rules.go +++ b/sample/rules.go @@ -31,6 +31,15 @@ func (s *RulesBasedSampler) Start() error { // Check if any rule has a downstream sampler and create it for _, rule := range s.Config.Rule { + for _, cond := range rule.Condition { + if err := cond.Init(); err != nil { + s.Logger.Debug().WithFields(map[string]interface{}{ + "rule_name": rule.Name, + "condition": cond.String(), + }).Logf("error creating rule evaluation function: %s", err) + continue + } + } if rule.Sampler != nil { var sampler Sampler if rule.Sampler.DynamicSampler != nil { diff --git a/sample/rules_test.go b/sample/rules_test.go deleted file mode 100644 index 98aa93487e..0000000000 --- a/sample/rules_test.go +++ /dev/null @@ -1,1062 +0,0 @@ -package sample - -import ( - "testing" - - "github.com/opsramp/tracing-proxy/config" - "github.com/opsramp/tracing-proxy/logger" - "github.com/opsramp/tracing-proxy/metrics" - "github.com/opsramp/tracing-proxy/types" - "github.com/stretchr/testify/assert" -) - -type TestRulesData struct { - Rules *config.RulesBasedSamplerConfig - Spans []*types.Span - ExpectedRate uint - ExpectedKeep bool - ExpectedName string -} - -func TestRules(t *testing.T) { - data := []TestRulesData{ - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "int64equals", - SampleRate: 10, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test", - Operator: "=", - Value: int64(1), - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": int64(1), - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "int64greaterthan", - SampleRate: 10, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test", - Operator: ">", - Value: int64(1), - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": int64(2), - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "int64lessthan", - SampleRate: 10, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test", - Operator: "<", - Value: int64(2), - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": int64(1), - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "int64float64lessthan", - SampleRate: 10, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test", - Operator: "<", - Value: 2.2, - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": int64(1), - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "rule that wont be hit", - SampleRate: 0, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test", - Operator: ">", - Value: 2.2, - }, - }, - }, - { - Name: "fallback", - SampleRate: 10, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": int64(1), - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - ExpectedName: "fallback", - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "multiple matches", - SampleRate: 10, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test", - Operator: "<=", - Value: 2.2, - }, - { - Field: "test", - Operator: ">=", - Value: 2.2, - }, - { - Field: "test_two", - Operator: "=", - Value: true, - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": 2.2, - "test_two": false, - }, - }, - }, - { - Event: types.Event{ - Data: map[string]interface{}{ - "test_two": true, - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "drop", - Drop: true, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test", - Operator: ">", - Value: int64(2), - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": float64(3), - }, - }, - }, - }, - ExpectedKeep: false, - ExpectedRate: 0, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "drop everything", - Drop: true, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": int64(1), - }, - }, - }, - }, - ExpectedKeep: false, - ExpectedRate: 0, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "test multiple rules must all be matched", - SampleRate: 4, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "first", - Operator: "=", - Value: int64(1), - }, - { - Field: "second", - Operator: "=", - Value: int64(2), - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "first": int64(1), - }, - }, - }, - { - Event: types.Event{ - Data: map[string]interface{}{ - "first": int64(1), - }, - }, - }, - }, - ExpectedKeep: true, - // the trace does not match all the rules so we expect the default sample rate - ExpectedRate: 1, - ExpectedName: "no rule matched", - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "not equal test", - SampleRate: 4, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "first", - Operator: "!=", - Value: int64(10), - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "first": int64(9), - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 4, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "exists test", - SampleRate: 4, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "first", - Operator: "exists", - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "first": int64(9), - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 4, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "not exists test", - SampleRate: 4, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "first", - Operator: "not-exists", - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "second": int64(9), - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 4, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "starts with test", - SampleRate: 4, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "first", - Operator: "starts-with", - Value: "honey", - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "first": "honeycomb", - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 4, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "contains test", - SampleRate: 4, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "first", - Operator: "contains", - Value: "eyco", - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "first": "honeycomb", - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 4, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "does not contain test", - SampleRate: 4, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "first", - Operator: "does-not-contain", - Value: "noteyco", - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "first": "honeycomb", - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 4, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "YAMLintgeaterthan", - SampleRate: 10, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test", - Operator: ">", - Value: int(1), - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": int64(2), - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "Check root span for span count", - SampleRate: 1, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "meta.span_count", - Operator: "=", - Value: int(2), - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "trace.trace_id": "12345", - "trace.span_id": "54321", - "meta.span_count": int64(2), - "test": int64(2), - }, - }, - }, - { - Event: types.Event{ - Data: map[string]interface{}{ - "trace.trace_id": "12345", - "trace.span_id": "654321", - "trace.parent_id": "54321", - "test": int64(2), - }, - }, - }, - }, - ExpectedName: "Check root span for span count", - ExpectedKeep: true, - ExpectedRate: 1, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "Check root span for span count", - Drop: true, - SampleRate: 0, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "meta.span_count", - Operator: ">=", - Value: int(2), - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "trace.trace_id": "12345", - "trace.span_id": "54321", - "meta.span_count": int64(2), - "test": int64(2), - }, - }, - }, - { - Event: types.Event{ - Data: map[string]interface{}{ - "trace.trace_id": "12345", - "trace.span_id": "654321", - "trace.parent_id": "54321", - "test": int64(2), - }, - }, - }, - { - Event: types.Event{ - Data: map[string]interface{}{ - "trace.trace_id": "12345", - "trace.span_id": "754321", - "trace.parent_id": "54321", - "test": int64(3), - }, - }, - }, - }, - ExpectedName: "Check root span for span count", - ExpectedKeep: false, - ExpectedRate: 0, - }, - } - - for _, d := range data { - sampler := &RulesBasedSampler{ - Config: d.Rules, - Logger: &logger.NullLogger{}, - Metrics: &metrics.NullMetrics{}, - } - - trace := &types.Trace{} - - for _, span := range d.Spans { - trace.AddSpan(span) - } - - rate, keep, reason := sampler.GetSampleRate(trace) - - assert.Equal(t, d.ExpectedRate, rate, d.Rules) - name := d.ExpectedName - if name == "" { - name = d.Rules.Rule[0].Name - } - assert.Contains(t, reason, name) - - // we can only test when we don't expect to keep the trace - if !d.ExpectedKeep { - assert.Equal(t, d.ExpectedKeep, keep, d.Rules) - } - } -} - -func TestRulesWithNestedFields(t *testing.T) { - data := []TestRulesData{ - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "nested field", - SampleRate: 10, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test.test1", - Operator: "=", - Value: "a", - }, - }, - }, - }, - CheckNestedFields: true, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": map[string]interface{}{ - "test1": "a", - }, - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "field not nested", - SampleRate: 10, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test.test1", - Operator: "=", - Value: "a", - }, - }, - }, - }, - CheckNestedFields: true, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test.test1": "a", - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "not exists test", - SampleRate: 4, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test.test1", - Operator: "not-exists", - }, - }, - }, - }, - CheckNestedFields: true, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": map[string]interface{}{ - "test2": "b", - }, - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 4, - }, - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "do not check nested", - SampleRate: 4, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "test.test1", - Operator: "exists", - }, - }, - }, - }, - CheckNestedFields: false, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "test": map[string]interface{}{ - "test1": "a", - }, - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 1, - ExpectedName: "no rule matched", - }, - } - - for _, d := range data { - sampler := &RulesBasedSampler{ - Config: d.Rules, - Logger: &logger.NullLogger{}, - Metrics: &metrics.NullMetrics{}, - } - - trace := &types.Trace{} - - for _, span := range d.Spans { - trace.AddSpan(span) - } - - rate, keep, reason := sampler.GetSampleRate(trace) - - assert.Equal(t, d.ExpectedRate, rate, d.Rules) - name := d.ExpectedName - if name == "" { - name = d.Rules.Rule[0].Name - } - assert.Contains(t, reason, name) - - // we can only test when we don't expect to keep the trace - if !d.ExpectedKeep { - assert.Equal(t, d.ExpectedKeep, keep, d.Rules) - } - } -} - -func TestRulesWithDynamicSampler(t *testing.T) { - data := []TestRulesData{ - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "downstream-dynamic", - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "rule_test", - Operator: "=", - Value: int64(1), - }, - }, - Sampler: &config.RulesBasedDownstreamSampler{ - DynamicSampler: &config.DynamicSamplerConfig{ - SampleRate: 10, - FieldList: []string{"http.status_code"}, - AddSampleRateKeyToTrace: true, - AddSampleRateKeyToTraceField: "meta.key", - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "rule_test": int64(1), - "http.status_code": "200", - }, - }, - }, - { - Event: types.Event{ - Data: map[string]interface{}{ - "rule_test": int64(1), - "http.status_code": "200", - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - } - - for _, d := range data { - sampler := &RulesBasedSampler{ - Config: d.Rules, - Logger: &logger.NullLogger{}, - Metrics: &metrics.NullMetrics{}, - } - - trace := &types.Trace{} - - for _, span := range d.Spans { - trace.AddSpan(span) - } - - sampler.Start() - rate, keep, reason := sampler.GetSampleRate(trace) - - assert.Equal(t, d.ExpectedRate, rate, d.Rules) - name := d.ExpectedName - if name == "" { - name = d.Rules.Rule[0].Name - } - assert.Contains(t, reason, name) - - // we can only test when we don't expect to keep the trace - if !d.ExpectedKeep { - assert.Equal(t, d.ExpectedKeep, keep, d.Rules) - } - - spans := trace.GetSpans() - assert.Len(t, spans, len(d.Spans), "should have the same number of spans as input") - for _, span := range spans { - assert.Equal(t, span.Event.Data, map[string]interface{}{ - "rule_test": int64(1), - "http.status_code": "200", - "meta.key": "200•,", - }, "should add the sampling key to all spans in the trace") - } - } -} - -func TestRulesWithEMADynamicSampler(t *testing.T) { - data := []TestRulesData{ - { - Rules: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "downstream-dynamic", - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "rule_test", - Operator: "=", - Value: int64(1), - }, - }, - Sampler: &config.RulesBasedDownstreamSampler{ - EMADynamicSampler: &config.EMADynamicSamplerConfig{ - GoalSampleRate: 10, - FieldList: []string{"http.status_code"}, - AddSampleRateKeyToTrace: true, - AddSampleRateKeyToTraceField: "meta.key", - }, - }, - }, - }, - }, - Spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "rule_test": int64(1), - "http.status_code": "200", - }, - }, - }, - { - Event: types.Event{ - Data: map[string]interface{}{ - "rule_test": int64(1), - "http.status_code": "200", - }, - }, - }, - }, - ExpectedKeep: true, - ExpectedRate: 10, - }, - } - - for _, d := range data { - sampler := &RulesBasedSampler{ - Config: d.Rules, - Logger: &logger.NullLogger{}, - Metrics: &metrics.NullMetrics{}, - } - - trace := &types.Trace{} - - for _, span := range d.Spans { - trace.AddSpan(span) - } - - sampler.Start() - rate, keep, reason := sampler.GetSampleRate(trace) - - assert.Equal(t, d.ExpectedRate, rate, d.Rules) - name := d.ExpectedName - if name == "" { - name = d.Rules.Rule[0].Name - } - assert.Contains(t, reason, name) - - // we can only test when we don't expect to keep the trace - if !d.ExpectedKeep { - assert.Equal(t, d.ExpectedKeep, keep, d.Rules) - } - - spans := trace.GetSpans() - assert.Len(t, spans, len(d.Spans), "should have the same number of spans as input") - for _, span := range spans { - assert.Equal(t, span.Event.Data, map[string]interface{}{ - "rule_test": int64(1), - "http.status_code": "200", - "meta.key": "200•,", - }, "should add the sampling key to all spans in the trace") - } - } -} - -func TestRuleMatchesSpanMatchingSpan(t *testing.T) { - testCases := []struct { - name string - spans []*types.Span - keepSpanScope bool - keepTraceScope bool - }{ - { - name: "all conditions match single span", - keepSpanScope: true, - keepTraceScope: true, - spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "rule_test": int64(1), - "http.status_code": "200", - }, - }, - }, - { - Event: types.Event{ - Data: map[string]interface{}{ - "rule_test": int64(5), - "http.status_code": "500", - }, - }, - }, - }, - }, - { - name: "all conditions do not match single span", - keepSpanScope: false, - keepTraceScope: true, - spans: []*types.Span{ - { - Event: types.Event{ - Data: map[string]interface{}{ - "rule_test": int64(1), - "http.status_code": "500", - }, - }, - }, - { - Event: types.Event{ - Data: map[string]interface{}{ - "rule_test": int64(5), - "http.status_code": "200", - }, - }, - }, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - for _, scope := range []string{"span", "trace"} { - sampler := &RulesBasedSampler{ - Config: &config.RulesBasedSamplerConfig{ - Rule: []*config.RulesBasedSamplerRule{ - { - Name: "Rule to match span", - Scope: scope, - SampleRate: 1, - Condition: []*config.RulesBasedSamplerCondition{ - { - Field: "rule_test", - Operator: "=", - Value: int64(1), - }, - { - Field: "http.status_code", - Operator: "=", - Value: "200", - }, - }, - }, - { - Name: "Default rule", - Drop: true, - SampleRate: 1, - }, - }, - }, - Logger: &logger.NullLogger{}, - Metrics: &metrics.NullMetrics{}, - } - - trace := &types.Trace{} - - for _, span := range tc.spans { - trace.AddSpan(span) - } - - sampler.Start() - rate, keep, _ := sampler.GetSampleRate(trace) - - assert.Equal(t, uint(1), rate, rate) - if scope == "span" { - assert.Equal(t, tc.keepSpanScope, keep, keep) - } else { - assert.Equal(t, tc.keepTraceScope, keep, keep) - } - } - }) - } -} diff --git a/sharder/deterministic_test.go b/sharder/deterministic_test.go index 9e2dba88fc..da44379dd9 100644 --- a/sharder/deterministic_test.go +++ b/sharder/deterministic_test.go @@ -267,7 +267,6 @@ func TestShardDrop(t *testing.T) { results[s.GetAddress()]++ placements[i].shard = s.GetAddress() } - fmt.Println(results) // reach in and delete one of the peers, then reshard config.GetPeersVal = config.GetPeersVal[1:] @@ -341,7 +340,6 @@ func TestShardAddHash(t *testing.T) { results[s.GetAddress()]++ placements[i].shard = s.GetAddress() } - fmt.Println(results) // reach in and add a peer, then reshard config.GetPeersVal = append(config.GetPeersVal, "http://2.2.2.255/:8081") diff --git a/types/event.go b/types/event.go index 446d7bec08..a2f1b92648 100644 --- a/types/event.go +++ b/types/event.go @@ -6,13 +6,11 @@ import ( ) const ( - APIKeyHeader = "X-Honeycomb-Team" - // libtrace-js uses this - APIKeyHeaderShort = "X-Hny-Team" - DatasetHeader = "X-Honeycomb-Dataset" - SampleRateHeader = "X-Honeycomb-Samplerate" - TimestampHeader = "X-Honeycomb-Event-Time" - QueryTokenHeader = "X-Honeycomb-Refinery-Query" + APIKeyHeader = "X-OpsRamp-Team" + + SampleRateHeader = "X-OpsRamp-Samplerate" + TimestampHeader = "X-OpsRamp-Event-Time" + QueryTokenHeader = "X-OpsRamp-Refinery-Query" ) // used to put a request ID into the request context for logging From b6b6f9a6fdb1d5fe39d1f3ee1bce8e2e63b52e43 Mon Sep 17 00:00:00 2001 From: sai kalyan bhagavathula Date: Thu, 30 Mar 2023 13:32:34 +0530 Subject: [PATCH 294/351] changes related to packaging deb/rpm --- build/tracing-deb/config_complete.toml | 511 +++++++++++++++++++++++ build/tracing-deb/configure.go | 16 +- build/tracing-deb/rules_complete.toml | 229 ++++++++++ build/tracing-deb/script.sh | 6 +- build/tracing-deb/tracing/DEBIAN/control | 2 +- build/tracing-rpm/config_complete.toml | 511 +++++++++++++++++++++++ build/tracing-rpm/configure.go | 17 +- build/tracing-rpm/rules_complete.toml | 229 ++++++++++ build/tracing-rpm/script.sh | 6 +- config_complete.toml | 96 ++--- rules_complete.toml | 472 +++++++++------------ 11 files changed, 1757 insertions(+), 338 deletions(-) create mode 100644 build/tracing-deb/config_complete.toml create mode 100644 build/tracing-deb/rules_complete.toml create mode 100644 build/tracing-rpm/config_complete.toml create mode 100644 build/tracing-rpm/rules_complete.toml diff --git a/build/tracing-deb/config_complete.toml b/build/tracing-deb/config_complete.toml new file mode 100644 index 0000000000..dcbf861bd4 --- /dev/null +++ b/build/tracing-deb/config_complete.toml @@ -0,0 +1,511 @@ +##################### +## Tracing-proxy Config ## +##################### + +# ListenAddr is the IP and port on which to listen for incoming events. Incoming +# traffic is expected to be HTTP, so if using SSL put something like nginx in +# front to do the decryption. +# Should be of the form 0.0.0.0:8082 +# Not eligible for live reload. +ListenAddr = "0.0.0.0:8082" + +# GRPCListenAddr is the IP and port on which to listen for incoming events over +# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put +# something like nginx in front to do the decryption. +# Should be of the form 0.0.0.0:9090 +# Not eligible for live reload. +GRPCListenAddr = "0.0.0.0:9090" + +# PeerListenAddr is the IP and port on which to listen for traffic being +# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL +# put something like nginx in front to do the decryption. Must be different from +# ListenAddr +# Should be of the form 0.0.0.0:8081 +# Not eligible for live reload. +PeerListenAddr = "0.0.0.0:8083" +GRPCPeerListenAddr = "0.0.0.0:8084" + +# ProxyProtocol accepts http and https +# Not Eligible for live reload. +ProxyProtocol = "" + +# ProxyServer takes the proxy server address +# Not Eligible for live reload. +ProxyServer = "" + +# ProxyPort takes the proxy server port +# Not Eligible for live reload. +ProxyPort = 0 + +# ProxyUserName takes the proxy username +# Not Eligible for live reload. +ProxyUserName = "" + +# ProxyPassword takes the proxy password +# Not Eligible for live reload. +ProxyPassword = "" + +# CompressPeerCommunication determines whether tracin will compress span data +# it forwards to peers. If it costs money to transmit data between tracin +# instances (e.g. they're spread across AWS availability zones), then you +# almost certainly want compression enabled to reduce your bill. The option to +# disable it is provided as an escape hatch for deployments that value lower CPU +# utilization over data transfer costs. +CompressPeerCommunication = true + +# OpsrampAPI is the URL for the upstream Opsramp API. +# Eligible for live reload. +OpsrampAPI = + +# OpsrampKey is used to get the OauthToken +OpsrampKey = + +# OpsrampSecret is used to get the OauthToken +OpsrampSecret = + +# Traces are send to the client with given tenantid +TenantId = + +# Dataset you want to use for sampling +Dataset = "ds" + +#Tls Options +UseTls = true +UseTlsInsecure = false + +# SendDelay is a short timer that will be triggered when a trace is complete. +# Tracing-proxy will wait this duration before actually sending the trace. The +# reason for this short delay is to allow for small network delays or clock +# jitters to elapse and any final spans to arrive before actually sending the +# trace. This supports duration strings with supplied units. Set to 0 for +# immediate sends. +# Eligible for live reload. +SendDelay = "2s" + +# BatchTimeout dictates how frequently to send unfulfilled batches. By default +# this will use the DefaultBatchTimeout in libhoney as its value, which is 100ms. +# Eligible for live reload. +BatchTimeout = "1s" + +# TraceTimeout is a long timer; it represents the outside boundary of how long +# to wait before sending an incomplete trace. Normally traces are sent when the +# root span arrives. Sometimes the root span never arrives (due to crashes or +# whatever), and this timer will send a trace even without having received the +# root span. If you have particularly long-lived traces you should increase this +# timer. This supports duration strings with supplied units. +# Eligible for live reload. +TraceTimeout = "60s" + +# MaxBatchSize is the number of events to be included in the batch for sending +MaxBatchSize = 500 + +# SendTicker is a short timer; it determines the duration to use to check for traces to send +SendTicker = "100ms" + +# LoggingLevel is the level above which we should log. Debug is very verbose, +# and should only be used in pre-production environments. Info is the +# recommended level. Valid options are "debug", "info", "error", and +# "panic" +# Not eligible for live reload. +LoggingLevel = "error" + +# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use +# when buffering events that will be forwarded to peers or the upstream API. +UpstreamBufferSize = 10000 +PeerBufferSize = 10000 + +# DebugServiceAddr sets the IP and port the debug service will run on +# The debug service will only run if the command line flag -d is specified +# The debug service runs on the first open port between localhost:6060 and :6069 by default +# DebugServiceAddr = "localhost:8085" + +# AddHostMetadataToTrace determines whether or not to add information about +# the host that Tracing-proxy is running on to the spans that it processes. +# If enabled, information about the host will be added to each span with the +# prefix `meta.tracing-procy.`. +# Currently the only value added is 'meta.tracing-proxy.local_hostname'. +# Not eligible for live reload +AddHostMetadataToTrace = false + +# EnvironmentCacheTTL is the amount of time a cache entry will live that associates +# an API key with an environment name. +# Cache misses lookup the environment name using OpsrampAPI config value. +# Default is 1 hour ("1h"). +# Not eligible for live reload. +EnvironmentCacheTTL = "1h" + +# QueryAuthToken, if specified, provides a token that must be specified with +# the header "X-Opsramp-Tracing-proxy-Query" in order for a /query request to succeed. +# These /query requests are intended for debugging tracin installations and +# are not typically needed in normal operation. +# Can be specified in the environment as tracing-proxy_QUERY_AUTH_TOKEN. +# If left unspecified, the /query endpoints are inaccessible. +# Not eligible for live reload. +# QueryAuthToken = "some-random-value" + +# AddRuleReasonToTrace causes traces that are sent to Opsramp to include the field `meta.tracin.reason`. +# This field contains text indicating which rule was evaluated that caused the trace to be included. +# Eligible for live reload. +# AddRuleReasonToTrace = true + +# AdditionalErrorFields should be a list of span fields that should be included when logging +# errors that happen during ingestion of events (for example, the span too large error). +# This is primarily useful in trying to track down misbehaving senders in a large installation. +# The fields `dataset`, `apihost`, and `environment` are always included. +# If a field is not present in the span, it will not be present in the error log. +# Default is ["trace.span_id"]. +# Eligible for live reload. +AdditionalErrorFields = [ + "trace.span_id" +] + +# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate +# the number of child spans on the trace at the time the sampling decision was made. +# This value is available to the rules-based sampler, making it possible to write rules that +# are dependent upon the number of spans in the trace. +# Default is false. +# Eligible for live reload. +# AddSpanCountToRoot = true + +# CacheOverrunStrategy controls the cache management behavior under memory pressure. +# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, +# which is generally not helpful unless it occurs because of a permanent change in traffic patterns. +# In the "impact" strategy, the items having the most impact on the cache size are +# ejected from the cache earlier than normal but the cache is not resized. +# In all cases, it only applies if MaxAlloc is nonzero. +# Default is "resize" for compatibility but "impact" is recommended for most installations. +# Eligible for live reload. +# CacheOverrunStrategy = "impact" + +# Metrics are sent to OpsRamp (The collection happens based on configuration specifie +# in OpsRampMetrics and only works when the Metrics is set to "prometheus") +SendMetricsToOpsRamp = false + +############################ +## Implementation Choices ## +############################ + +# Each of the config options below chooses an implementation of a Tracing-proxy +# component to use. Depending on the choice there may be more configuration +# required below in the section for that choice. Changing implementation choices +# requires a process restart; these changes will not be picked up by a live +# config reload. (Individual config options for a given implementation may be +# eligible for live reload). + +# Collector describes which collector to use for collecting traces. The only +# current valid option is "InMemCollector".. More can be added by adding +# implementations of the Collector interface. +Collector = "InMemCollector" + +######################### +## Peer Management ## +######################### + +[PeerManagement] +Type = "file" +## Peers is the list of all servers participating in this proxy cluster. Events +## will be sharded evenly across all peers based on the Trace ID. Values here +## should be the base URL used to access the peer, and should include scheme, +## hostname (or ip address) and port. All servers in the cluster should be in +## this list, including this host. +Peers = [ + "http://127.0.0.1:8084", #only grpc peer listener used +# # "http://127.0.0.1:8083", +# # "http://10.1.2.3.4:8080", +# # "http://tracin-1231:8080", +# # "http://peer-3.fqdn" // assumes port 80 +] + +#[PeerManagement] +#Type = "redis" +# RedisHost is is used to connect to redis for peer cluster membership management. +# Further, if the environment variable 'Tracing_Proxy_REDIS_HOST' is set it takes +# precedence and this value is ignored. +# Not eligible for live reload. +#RedisHost = "redis:22122" + +# RedisUsername is the username used to connect to redis for peer cluster membership management. +# If the environment variable 'Tracing_Proxy_REDIS_USERNAME' is set it takes +# precedence and this value is ignored. +# Not eligible for live reload. +# RedisUsername = "" + +# RedisPassword is the password used to connect to redis for peer cluster membership management. +# If the environment variable 'Tracing_Proxy_REDIS_PASSWORD' is set it takes +# precedence and this value is ignored. +# Not eligible for live reload. +# RedisPassword = "" + +# RedisPrefix is a string used as a prefix for the keys in redis while storing +# the peer membership. It might be useful to set this in any situation where +# multiple tracing-proxy clusters or multiple applications want to share a single +# Redis instance. If not set then "tracing-proxy" is used as prefix +# RedisPrefix = "customPrefix" + +# RedisDatabase is an integer from 0-15 indicating the database number to use +# for the Redis instance storing the peer membership. It might be useful to set +# this in any situation where multiple trace-proxy clusters or multiple +# applications want to share a single Redis instance. if not set Default = 0 +# RedisDatabase = 1 + +# UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. +# Not eligible for live reload. +# UseTLS = false + +# UseTLSInsecure disables certificate checks +# Not eligible for live reload. +# UseTLSInsecure = false + +# IdentifierInterfaceName is optional. By default, when using RedisHost, Tracing-proxy will use +# the local hostname to identify itself to other peers in Redis. If your environment +# requires that you use IPs as identifiers (for example, if peers can't resolve eachother +# by name), you can specify the network interface that Tracing-proxy is listening on here. +# Tracing-proxy will use the first unicast address that it finds on the specified network +# interface as its identifier. +# Not eligible for live reload. +# IdentifierInterfaceName = "eth0" + +# UseIPV6Identifier is optional. If using IdentifierInterfaceName, Tracing-proxy will default to the first +# IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use +# the first IPV6 unicast address found. +# UseIPV6Identifier = false + +# RedisIdentifier is optional. By default, when using RedisHost, Tracing-proxy will use +# the local hostname to id788714cd-a17a-4d7e-9bac-c35131f4bcc2entify itself to other peers in Redis. If your environment +# requires that you use IPs as identifiers (for example, if peers can't resolve eachother +# by name), you can specify the exact identifier (IP address, etc) to use here. +# Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. +# RedisIdentifier = "192.168.1.1" + +# Timeout is optional. By default, when using RedisHost, Tracing-proxy will timeout +# after 5s when communicating with Redis. +# Timeout = "5s" + +# Strategy controls the way that traces are assigned to tracin nodes. +# The "legacy" strategy uses a simple algorithm that unfortunately causes +# 1/2 of the in-flight traces to be assigned to a different node whenever the +# number of nodes changes. +# The legacy strategy is deprecated and is intended to be removed in a future release. +# The "hash" strategy is strongly recommended, as only 1/N traces (where N is the +# number of nodes) are disrupted when the node count changes. +# Not eligible for live reload. +Strategy = "hash" + +######################### +## In-Memory Collector ## +######################### + +# InMemCollector brings together all the settings that are relevant to +# collecting spans together to make traces. +[InMemCollector] + +# The collection cache is used to collect all spans into a trace as well as +# remember the sampling decision for any spans that might come in after the +# trace has been marked "complete" (either by timing out or seeing the root +# span). The number of traces in the cache should be many multiples (100x to +# 1000x) of the total number of concurrently active traces (trace throughput * +# trace duration). +# Eligible for live reload. Growing the cache capacity with a live config reload +# is fine. Avoid shrinking it with a live reload (you can, but it may cause +# temporary odd sampling decisions). +CacheCapacity = 1000 + +# MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are +# supported. +# If set to a non-zero value, once per tick (see SendTicker) the collector +# will compare total allocated bytes to this value. If allocation is too +# high, cache capacity will be adjusted according to the setting for +# CacheOverrunStrategy. +# Useful values for this setting are generally in the range of 75%-90% of +# available system memory. +MaxAlloc = 0 + +################### +## Logrus Logger ## +################### + +# LogrusLogger is a section of the config only used if you are using the +# LogrusLogger to send all logs to STDOUT using the logrus package. If you are +# using a different logger (eg Opsramp logger) you can leave all this +# commented out. +[LogrusLogger] + +# LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] +LogFormatter = "logfmt" + +# LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] +LogOutput = "file" + +## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" +[LogrusLogger.File] + +# FileName specifies the location where the logs are supposed be stored +FileName = "/var/log/opsramp/tracing-proxy.log" + +# MaxSize is the maximum size in megabytes of the log file before it gets rotated. +MaxSize = 1 + +# MaxBackups is the maximum number of old log files to retain. +MaxBackups = 3 + +# Compress determines if the rotated log files should be compressed +# using gzip. +Compress = true + + +####################### +## Prometheus Metrics ## +####################### + +[OpsRampMetrics] +# MetricsListenAddr determines the interface and port on which Prometheus will +# listen for requests for /metrics. Must be different from the main Tracing-proxy +# listener. +# Not eligible for live reload. +MetricsListenAddr = "localhost:2112" + +# OpsRampMetricsAPI is the URL for the upstream OpsRamp API. +# Not Eligible for live reload. +OpsRampMetricsAPI = + +# OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. +# Not Eligible for live reload. +OpsRampTenantID = + +# OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. +# This is separate from the APIKeys used to authenticate regular +# traffic. +# Not Eligible for live reload. +OpsRampMetricsAPIKey = + +# OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. +# This is separate from the APISecret used to authenticate regular +# traffic. +# Not Eligible for live reload. +OpsRampMetricsAPISecret = + +# OpsRampMetricsReportingInterval is frequency specified in seconds at which +# the metrics are collected and sent to OpsRamp +# Not Eligible for live reload. +OpsRampMetricsReportingInterval = 10 + +# OpsRampMetricsRetryCount is the number of times we retry incase the send fails +# Not Eligible for live reload. +OpsRampMetricsRetryCount = 2 + +# ProxyProtocol accepts http and https +# Not Eligible for live reload. +ProxyProtocol = "" + +# ProxyServer takes the proxy server address +# Not Eligible for live reload. +ProxyServer = "" + +# ProxyPort takes the proxy server port +# Not Eligible for live reload. +ProxyPort = 3128 + +# ProxyUserName takes the proxy username +# Not Eligible for live reload. +ProxyUserName = "" + +# ProxyPassword takes the proxy password +# Not Eligible for live reload. +ProxyPassword = "" + +# OpsRampMetricsList is a list of regular expressions which match the metric +# names. Keep the list as small as possible since too many regular expressions can lead to bad performance. +# Internally all the regex in the list are concatinated using '|' to make the computation little faster. +# Not Eligible for live reload +OpsRampMetricsList = [".*"] + + +[GRPCServerParameters] + +# MaxConnectionIdle is a duration for the amount of time after which an +# idle connection would be closed by sending a GoAway. Idleness duration is +# defined since the most recent time the number of outstanding RPCs became +# zero or the connection establishment. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 +# Not eligible for live reload. +# MaxConnectionIdle = "1m" + +# MaxConnectionAge is a duration for the maximum amount of time a +# connection may exist before it will be closed by sending a GoAway. A +# random jitter of +/-10% will be added to MaxConnectionAge to spread out +# connection storms. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 +# Not eligible for live reload. +# MaxConnectionAge = "0s" + +# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after +# which the connection will be forcibly closed. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 +# Not eligible for live reload. +# MaxConnectionAgeGrace = "0s" + +# After a duration of this time if the server doesn't see any activity it +# pings the client to see if the transport is still alive. +# If set below 1s, a minimum value of 1s will be used instead. +# 0s sets duration to 2 hours which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 +# Not eligible for live reload. +# Time = "10s" + +# After having pinged for keepalive check, the server waits for a duration +# of Timeout and if no activity is seen even after that the connection is +# closed. +# 0s sets duration to 20 seconds which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 +# Not eligible for live reload. +# Timeout = "2s" + + + +################################ +## Sample Cache Configuration ## +################################ + +# Sample Cache Configuration controls the sample cache used to retain information about trace +# status after the sampling decision has been made. + +[SampleCacheConfig] + +# Type controls the type of sample cache used. +# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is +# 5x the size of the trace cache. This is Tracing-proxy's original sample cache strategy. +# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember +# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. +# It is also more configurable. The cuckoo filter is recommended for most installations. +# Default is "legacy". +# Not eligible for live reload (you cannot change the type of cache with reload). +# Type = "cuckoo" + +# KeptSize controls the number of traces preserved in the cuckoo kept traces cache. +# Tracing-proxy keeps a record of each trace that was kept and sent to Opsramp, along with some +# statistical information. This is most useful in cases where the trace was sent before sending +# the root span, so that the root span can be decorated with accurate metadata. +# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). +# Does not apply to the "legacy" type of cache. +# Eligible for live reload. +# KeptSize = 10_000 + +# DroppedSize controls the size of the cuckoo dropped traces cache. +# This cache consumes 4-6 bytes per trace at a scale of millions of traces. +# Changing its size with live reload sets a future limit, but does not have an immediate effect. +# Default is 1_000_000 traces. +# Does not apply to the "legacy" type of cache. +# Eligible for live reload. +# DroppedSize = 1_000_000 + +# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates +# the remaining capacity of its dropped traces cache and possibly cycles it. +# This cache is quite resilient so it doesn't need to happen very often, but the +# operation is also inexpensive. +# Default is 10 seconds. +# Does not apply to the "legacy" type of cache. +# Eligible for live reload. +# SizeCheckInterval = "10s" diff --git a/build/tracing-deb/configure.go b/build/tracing-deb/configure.go index f8663aabc7..1daecd2039 100644 --- a/build/tracing-deb/configure.go +++ b/build/tracing-deb/configure.go @@ -23,26 +23,26 @@ func main() { opsrampApiHost := "OpsrampAPI = \"" + *api + "\"" opsrampMetricsApiHost := "OpsRampMetricsAPI = \"" + *api + "\"" - updatedConfigFile = bytes.Replace(configFile, []byte("OpsrampAPI = "), []byte(opsrampApiHost), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPI = "), []byte(opsrampMetricsApiHost), 1) + updatedConfigFile = bytes.Replace(configFile, []byte("OpsrampAPI ="), []byte(opsrampApiHost), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPI ="), []byte(opsrampMetricsApiHost), 1) opsrampKey := "OpsrampKey = \"" + *key + "\"" opsrampMetricsApiKey := "OpsRampMetricsAPIKey = \"" + *key + "\"" - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampKey = "), []byte(opsrampKey), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPIKey = "), []byte(opsrampMetricsApiKey), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampKey ="), []byte(opsrampKey), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPIKey ="), []byte(opsrampMetricsApiKey), 1) OpsrampSecret := "OpsrampSecret = \"" + *secret + "\"" OpsRampMetricsAPISecret := "OpsRampMetricsAPISecret = \"" + *secret + "\"" - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampSecret = "), []byte(OpsrampSecret), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPISecret = "), []byte(OpsRampMetricsAPISecret), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampSecret ="), []byte(OpsrampSecret), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPISecret ="), []byte(OpsRampMetricsAPISecret), 1) opsrampTenantID := "OpsRampTenantID = \"" + *tenant + "\"" TenantId := "TenantId = \"" + *tenant + "\"" - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampTenantID = "), []byte(opsrampTenantID), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("TenantId = "), []byte(TenantId), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampTenantID ="), []byte(opsrampTenantID), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("TenantId ="), []byte(TenantId), 1) if err = ioutil.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml", updatedConfigFile, 0666); err != nil { fmt.Println(err) diff --git a/build/tracing-deb/rules_complete.toml b/build/tracing-deb/rules_complete.toml new file mode 100644 index 0000000000..1ff80e7295 --- /dev/null +++ b/build/tracing-deb/rules_complete.toml @@ -0,0 +1,229 @@ +############################ +## Sampling Rules Config ## +############################ + +# DryRun - If enabled, marks traces that would be dropped given current sampling rules, +# and sends all traces regardless +DryRun = true + +# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to tracing_proxy_kept +DryRunFieldName = "fromProxy" + +# DeterministicSampler is a section of the config for manipulating the +# Deterministic Sampler implementation. This is the simplest sampling algorithm +# - it is a static sample rate, choosing traces randomly to either keep or send +# (at the appropriate rate). It is not influenced by the contents of the trace. +Sampler = "DeterministicSampler" + +# SampleRate is the rate at which to sample. It indicates a ratio, where one +# sample trace is kept for every n traces seen. For example, a SampleRate of 30 +# will keep 1 out of every 30 traces. The choice on whether to keep any specific +# trace is random, so the rate is approximate. +# Eligible for live reload. +SampleRate = 1 + +# [dataset1] + +# # Note: If your dataset name contains a space, you will have to escape the dataset name +# # using single quotes, such as ['dataset 1'] + +# # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler +# # implementation. This sampler collects the values of a number of fields from a +# # trace and uses them to form a key. This key is handed to the standard dynamic +# # sampler algorithm which generates a sample rate based on the frequency with +# # which that key has appeared in the previous ClearFrequencySec seconds. See +# # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics +# # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from +# # that package. +# Sampler = "DynamicSampler" + +# # SampleRate is the goal rate at which to sample. It indicates a ratio, where +# # one sample trace is kept for every n traces seen. For example, a SampleRate of +# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic +# # sampler, who assigns a sample rate for each trace based on the fields selected +# # from that trace. +# # Eligible for live reload. +# SampleRate = 1 + +# # FieldList is a list of all the field names to use to form the key that will be +# # handed to the dynamic sampler. The cardinality of the combination of values +# # from all of these keys should be reasonable in the face of the frequency of +# # those keys. If the combination of fields in these keys essentially makes them +# # unique, the dynamic sampler will do no sampling. If the keys have too few +# # values, you won't get samples of the most interesting traces. A good key +# # selection will have consistent values for high frequency boring traffic and +# # unique values for outliers and interesting traffic. Including an error field +# # (or something like HTTP status code) is an excellent choice. As an example, +# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code +# # would be a good set of keys in order to let you see accurately use of all +# # endpoints and call out when there is failing traffic to any endpoint. Field +# # names may come from any span in the trace. +# # Eligible for live reload. +# FieldList = [""] + +# # UseTraceLength will add the number of spans in the trace in to the dynamic +# # sampler as part of the key. The number of spans is exact, so if there are +# # normally small variations in trace length you may want to leave this off. If +# # traces are consistent lengths and changes in trace length is a useful +# # indicator of traces you'd like to see in Opsramp, set this to true. +# # Eligible for live reload. +# UseTraceLength = true + +# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field +# # to the root span of the trace containing the key used by the sampler to decide +# # the sample rate. This can be helpful in understanding why the sampler is +# # making certain decisions about sample rate and help you understand how to +# # better choose the sample rate key (aka the FieldList setting above) to use. +# AddSampleRateKeyToTrace = true + +# # AddSampleRateKeyToTraceField is the name of the field the sampler will use +# # when adding the sample rate key to the trace. This setting is only used when +# # AddSampleRateKeyToTrace is true. +# AddSampleRateKeyToTraceField = "" + +# # ClearFrequencySec is the name of the field the sampler will use to determine +# # the period over which it will calculate the sample rate. This setting defaults +# # to 30. +# # Eligible for live reload. +# ClearFrequencySec = 60 + +# [dataset2] + +# # EMADynamicSampler is a section of the config for manipulating the Exponential +# # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, +# # it attempts to average a given sample rate, weighting rare traffic and frequent +# # traffic differently so as to end up with the correct average. +# # +# # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended +# # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs +# # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential +# # Moving Average of counts seen per key, and adjusts this average at regular intervals. +# # The weight applied to more recent intervals is defined by `weight`, a number between +# # (0, 1) - larger values weight the average more toward recent observations. In other words, +# # a larger weight will cause sample rates more quickly adapt to traffic patterns, +# # while a smaller weight will result in sample rates that are less sensitive to bursts or drops +# # in traffic and thus more consistent over time. +# # +# # Keys that are not found in the EMA will always have a sample +# # rate of 1. Keys that occur more frequently will be sampled on a logarithmic +# # curve. In other words, every key will be represented at least once in any +# # given window and more frequent keys will have their sample rate +# # increased proportionally to wind up with the goal sample rate. +# Sampler = "EMADynamicSampler" + +# # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where +# # one sample trace is kept for every n traces seen. For example, a SampleRate of +# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic +# # sampler, who assigns a sample rate for each trace based on the fields selected +# # from that trace. +# # Eligible for live reload. +# GoalSampleRate = 1 + +# # FieldList is a list of all the field names to use to form the key that will be +# # handed to the dynamic sampler. The cardinality of the combination of values +# # from all of these keys should be reasonable in the face of the frequency of +# # those keys. If the combination of fields in these keys essentially makes them +# # unique, the dynamic sampler will do no sampling. If the keys have too few +# # values, you won't get samples of the most interesting traces. A good key +# # selection will have consistent values for high frequency boring traffic and +# # unique values for outliers and interesting traffic. Including an error field +# # (or something like HTTP status code) is an excellent choice. As an example, +# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code +# # would be a good set of keys in order to let you see accurately use of all +# # endpoints and call out when there is failing traffic to any endpoint. Field +# # names may come from any span in the trace. +# # Eligible for live reload. +# FieldList = [""] + +# # UseTraceLength will add the number of spans in the trace in to the dynamic +# # sampler as part of the key. The number of spans is exact, so if there are +# # normally small variations in trace length you may want to leave this off. If +# # traces are consistent lengths and changes in trace length is a useful +# # indicator of traces you'd like to see in Opsramp, set this to true. +# # Eligible for live reload. +# UseTraceLength = true + +# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field +# # to the root span of the trace containing the key used by the sampler to decide +# # the sample rate. This can be helpful in understanding why the sampler is +# # making certain decisions about sample rate and help you understand how to +# # better choose the sample rate key (aka the FieldList setting above) to use. +# AddSampleRateKeyToTrace = true + +# # AddSampleRateKeyToTraceField is the name of the field the sampler will use +# # when adding the sample rate key to the trace. This setting is only used when +# # AddSampleRateKeyToTrace is true. +# AddSampleRateKeyToTraceField = "" + +# # AdjustmentInterval defines how often (in seconds) we adjust the moving average from +# # recent observations. Default 15s +# # Eligible for live reload. +# AdjustmentInterval = 15 + +# # Weight is a value between (0, 1) indicating the weighting factor used to adjust +# # the EMA. With larger values, newer data will influence the average more, and older +# # values will be factored out more quickly. In mathematical literature concerning EMA, +# # this is referred to as the `alpha` constant. +# # Default is 0.5 +# # Eligible for live reload. +# Weight = 0.5 + +# # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. +# # Once MaxKeys is reached, new keys will not be included in the sample rate map, but +# # existing keys will continue to be be counted. You can use this to keep the sample rate +# # map size under control. +# # Eligible for live reload +# MaxKeys = 0 + +# # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key +# # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to +# # decide what constitutes "zero". Keys with averages below this threshold will be removed +# # from the EMA. Default is the same as Weight, as this prevents a key with the smallest +# # integer value (1) from being aged out immediately. This value should generally be <= Weight, +# # unless you have very specific reasons to set it higher. +# # Eligible for live reload +# AgeOutValue = 0.5 + +# # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define +# # the burst detection threshold. If total counts observed for a given interval exceed the threshold +# # EMA is updated immediately, rather than waiting on the AdjustmentInterval. +# # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, +# # burst detection will kick in. +# # Eligible for live reload +# BurstMultiple = 2.0 + +# # BurstDetectionDelay indicates the number of intervals to run after Start is called before +# # burst detection kicks in. +# # Defaults to 3 +# # Eligible for live reload +# BurstDetectionDelay = 3 + +# [dataset3] + +# Sampler = "DeterministicSampler" +# SampleRate = 10 + +# [dataset4] + +# Sampler = "RulesBasedSampler" + +# [[dataset4.rule]] +# # Rule name +# name = "" +# # Drop Condition (examples: true, false) +# drop = +# [[dataset4.rule.condition]] +# # Field Name (example: status_code) +# field = "" +# # Operator Value (example: =) +# operator = "" +# # Field Value (example: 500) +# value = "" + + + +# [dataset5] + +# Sampler = "TotalThroughputSampler" +# GoalThroughputPerSec = 100 +# FieldList = "[]" diff --git a/build/tracing-deb/script.sh b/build/tracing-deb/script.sh index 4d9b8c7f21..5a97cf8371 100644 --- a/build/tracing-deb/script.sh +++ b/build/tracing-deb/script.sh @@ -13,9 +13,9 @@ sed -i "/^Architecture/s/:.*$/: ${architecture}/g" tracing/DEBIAN/control # Updating the files mkdir -p tracing/opt/opsramp/tracing-proxy/bin mkdir -p tracing/opt/opsramp/tracing-proxy/conf -cp ../../config_complete.toml tracing/opt/opsramp/tracing-proxy/conf/config_complete.toml -cp ../../rules_complete.toml tracing/opt/opsramp/tracing-proxy/conf/rules_complete.toml -go build ../../cmd/tracing-proxy/main.go +cp config_complete.toml tracing/opt/opsramp/tracing-proxy/conf/config_complete.toml +cp rules_complete.toml tracing/opt/opsramp/tracing-proxy/conf/rules_complete.toml +go build -o ../../cmd/tracing-proxy/main ../../cmd/tracing-proxy/main.go cp ../../cmd/tracing-proxy/main tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy go build configure.go cp configure tracing/opt/opsramp/tracing-proxy/bin/configure diff --git a/build/tracing-deb/tracing/DEBIAN/control b/build/tracing-deb/tracing/DEBIAN/control index 5180c14cc4..59984a4d9e 100644 --- a/build/tracing-deb/tracing/DEBIAN/control +++ b/build/tracing-deb/tracing/DEBIAN/control @@ -1,5 +1,5 @@ Package: tracing-proxy -Version: 5.0.0 +Version: 1.0.0 Architecture: amd64 Essential: no Priority: optional diff --git a/build/tracing-rpm/config_complete.toml b/build/tracing-rpm/config_complete.toml new file mode 100644 index 0000000000..dcbf861bd4 --- /dev/null +++ b/build/tracing-rpm/config_complete.toml @@ -0,0 +1,511 @@ +##################### +## Tracing-proxy Config ## +##################### + +# ListenAddr is the IP and port on which to listen for incoming events. Incoming +# traffic is expected to be HTTP, so if using SSL put something like nginx in +# front to do the decryption. +# Should be of the form 0.0.0.0:8082 +# Not eligible for live reload. +ListenAddr = "0.0.0.0:8082" + +# GRPCListenAddr is the IP and port on which to listen for incoming events over +# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put +# something like nginx in front to do the decryption. +# Should be of the form 0.0.0.0:9090 +# Not eligible for live reload. +GRPCListenAddr = "0.0.0.0:9090" + +# PeerListenAddr is the IP and port on which to listen for traffic being +# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL +# put something like nginx in front to do the decryption. Must be different from +# ListenAddr +# Should be of the form 0.0.0.0:8081 +# Not eligible for live reload. +PeerListenAddr = "0.0.0.0:8083" +GRPCPeerListenAddr = "0.0.0.0:8084" + +# ProxyProtocol accepts http and https +# Not Eligible for live reload. +ProxyProtocol = "" + +# ProxyServer takes the proxy server address +# Not Eligible for live reload. +ProxyServer = "" + +# ProxyPort takes the proxy server port +# Not Eligible for live reload. +ProxyPort = 0 + +# ProxyUserName takes the proxy username +# Not Eligible for live reload. +ProxyUserName = "" + +# ProxyPassword takes the proxy password +# Not Eligible for live reload. +ProxyPassword = "" + +# CompressPeerCommunication determines whether tracin will compress span data +# it forwards to peers. If it costs money to transmit data between tracin +# instances (e.g. they're spread across AWS availability zones), then you +# almost certainly want compression enabled to reduce your bill. The option to +# disable it is provided as an escape hatch for deployments that value lower CPU +# utilization over data transfer costs. +CompressPeerCommunication = true + +# OpsrampAPI is the URL for the upstream Opsramp API. +# Eligible for live reload. +OpsrampAPI = + +# OpsrampKey is used to get the OauthToken +OpsrampKey = + +# OpsrampSecret is used to get the OauthToken +OpsrampSecret = + +# Traces are send to the client with given tenantid +TenantId = + +# Dataset you want to use for sampling +Dataset = "ds" + +#Tls Options +UseTls = true +UseTlsInsecure = false + +# SendDelay is a short timer that will be triggered when a trace is complete. +# Tracing-proxy will wait this duration before actually sending the trace. The +# reason for this short delay is to allow for small network delays or clock +# jitters to elapse and any final spans to arrive before actually sending the +# trace. This supports duration strings with supplied units. Set to 0 for +# immediate sends. +# Eligible for live reload. +SendDelay = "2s" + +# BatchTimeout dictates how frequently to send unfulfilled batches. By default +# this will use the DefaultBatchTimeout in libhoney as its value, which is 100ms. +# Eligible for live reload. +BatchTimeout = "1s" + +# TraceTimeout is a long timer; it represents the outside boundary of how long +# to wait before sending an incomplete trace. Normally traces are sent when the +# root span arrives. Sometimes the root span never arrives (due to crashes or +# whatever), and this timer will send a trace even without having received the +# root span. If you have particularly long-lived traces you should increase this +# timer. This supports duration strings with supplied units. +# Eligible for live reload. +TraceTimeout = "60s" + +# MaxBatchSize is the number of events to be included in the batch for sending +MaxBatchSize = 500 + +# SendTicker is a short timer; it determines the duration to use to check for traces to send +SendTicker = "100ms" + +# LoggingLevel is the level above which we should log. Debug is very verbose, +# and should only be used in pre-production environments. Info is the +# recommended level. Valid options are "debug", "info", "error", and +# "panic" +# Not eligible for live reload. +LoggingLevel = "error" + +# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use +# when buffering events that will be forwarded to peers or the upstream API. +UpstreamBufferSize = 10000 +PeerBufferSize = 10000 + +# DebugServiceAddr sets the IP and port the debug service will run on +# The debug service will only run if the command line flag -d is specified +# The debug service runs on the first open port between localhost:6060 and :6069 by default +# DebugServiceAddr = "localhost:8085" + +# AddHostMetadataToTrace determines whether or not to add information about +# the host that Tracing-proxy is running on to the spans that it processes. +# If enabled, information about the host will be added to each span with the +# prefix `meta.tracing-procy.`. +# Currently the only value added is 'meta.tracing-proxy.local_hostname'. +# Not eligible for live reload +AddHostMetadataToTrace = false + +# EnvironmentCacheTTL is the amount of time a cache entry will live that associates +# an API key with an environment name. +# Cache misses lookup the environment name using OpsrampAPI config value. +# Default is 1 hour ("1h"). +# Not eligible for live reload. +EnvironmentCacheTTL = "1h" + +# QueryAuthToken, if specified, provides a token that must be specified with +# the header "X-Opsramp-Tracing-proxy-Query" in order for a /query request to succeed. +# These /query requests are intended for debugging tracin installations and +# are not typically needed in normal operation. +# Can be specified in the environment as tracing-proxy_QUERY_AUTH_TOKEN. +# If left unspecified, the /query endpoints are inaccessible. +# Not eligible for live reload. +# QueryAuthToken = "some-random-value" + +# AddRuleReasonToTrace causes traces that are sent to Opsramp to include the field `meta.tracin.reason`. +# This field contains text indicating which rule was evaluated that caused the trace to be included. +# Eligible for live reload. +# AddRuleReasonToTrace = true + +# AdditionalErrorFields should be a list of span fields that should be included when logging +# errors that happen during ingestion of events (for example, the span too large error). +# This is primarily useful in trying to track down misbehaving senders in a large installation. +# The fields `dataset`, `apihost`, and `environment` are always included. +# If a field is not present in the span, it will not be present in the error log. +# Default is ["trace.span_id"]. +# Eligible for live reload. +AdditionalErrorFields = [ + "trace.span_id" +] + +# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate +# the number of child spans on the trace at the time the sampling decision was made. +# This value is available to the rules-based sampler, making it possible to write rules that +# are dependent upon the number of spans in the trace. +# Default is false. +# Eligible for live reload. +# AddSpanCountToRoot = true + +# CacheOverrunStrategy controls the cache management behavior under memory pressure. +# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, +# which is generally not helpful unless it occurs because of a permanent change in traffic patterns. +# In the "impact" strategy, the items having the most impact on the cache size are +# ejected from the cache earlier than normal but the cache is not resized. +# In all cases, it only applies if MaxAlloc is nonzero. +# Default is "resize" for compatibility but "impact" is recommended for most installations. +# Eligible for live reload. +# CacheOverrunStrategy = "impact" + +# Metrics are sent to OpsRamp (The collection happens based on configuration specifie +# in OpsRampMetrics and only works when the Metrics is set to "prometheus") +SendMetricsToOpsRamp = false + +############################ +## Implementation Choices ## +############################ + +# Each of the config options below chooses an implementation of a Tracing-proxy +# component to use. Depending on the choice there may be more configuration +# required below in the section for that choice. Changing implementation choices +# requires a process restart; these changes will not be picked up by a live +# config reload. (Individual config options for a given implementation may be +# eligible for live reload). + +# Collector describes which collector to use for collecting traces. The only +# current valid option is "InMemCollector".. More can be added by adding +# implementations of the Collector interface. +Collector = "InMemCollector" + +######################### +## Peer Management ## +######################### + +[PeerManagement] +Type = "file" +## Peers is the list of all servers participating in this proxy cluster. Events +## will be sharded evenly across all peers based on the Trace ID. Values here +## should be the base URL used to access the peer, and should include scheme, +## hostname (or ip address) and port. All servers in the cluster should be in +## this list, including this host. +Peers = [ + "http://127.0.0.1:8084", #only grpc peer listener used +# # "http://127.0.0.1:8083", +# # "http://10.1.2.3.4:8080", +# # "http://tracin-1231:8080", +# # "http://peer-3.fqdn" // assumes port 80 +] + +#[PeerManagement] +#Type = "redis" +# RedisHost is is used to connect to redis for peer cluster membership management. +# Further, if the environment variable 'Tracing_Proxy_REDIS_HOST' is set it takes +# precedence and this value is ignored. +# Not eligible for live reload. +#RedisHost = "redis:22122" + +# RedisUsername is the username used to connect to redis for peer cluster membership management. +# If the environment variable 'Tracing_Proxy_REDIS_USERNAME' is set it takes +# precedence and this value is ignored. +# Not eligible for live reload. +# RedisUsername = "" + +# RedisPassword is the password used to connect to redis for peer cluster membership management. +# If the environment variable 'Tracing_Proxy_REDIS_PASSWORD' is set it takes +# precedence and this value is ignored. +# Not eligible for live reload. +# RedisPassword = "" + +# RedisPrefix is a string used as a prefix for the keys in redis while storing +# the peer membership. It might be useful to set this in any situation where +# multiple tracing-proxy clusters or multiple applications want to share a single +# Redis instance. If not set then "tracing-proxy" is used as prefix +# RedisPrefix = "customPrefix" + +# RedisDatabase is an integer from 0-15 indicating the database number to use +# for the Redis instance storing the peer membership. It might be useful to set +# this in any situation where multiple trace-proxy clusters or multiple +# applications want to share a single Redis instance. if not set Default = 0 +# RedisDatabase = 1 + +# UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. +# Not eligible for live reload. +# UseTLS = false + +# UseTLSInsecure disables certificate checks +# Not eligible for live reload. +# UseTLSInsecure = false + +# IdentifierInterfaceName is optional. By default, when using RedisHost, Tracing-proxy will use +# the local hostname to identify itself to other peers in Redis. If your environment +# requires that you use IPs as identifiers (for example, if peers can't resolve eachother +# by name), you can specify the network interface that Tracing-proxy is listening on here. +# Tracing-proxy will use the first unicast address that it finds on the specified network +# interface as its identifier. +# Not eligible for live reload. +# IdentifierInterfaceName = "eth0" + +# UseIPV6Identifier is optional. If using IdentifierInterfaceName, Tracing-proxy will default to the first +# IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use +# the first IPV6 unicast address found. +# UseIPV6Identifier = false + +# RedisIdentifier is optional. By default, when using RedisHost, Tracing-proxy will use +# the local hostname to id788714cd-a17a-4d7e-9bac-c35131f4bcc2entify itself to other peers in Redis. If your environment +# requires that you use IPs as identifiers (for example, if peers can't resolve eachother +# by name), you can specify the exact identifier (IP address, etc) to use here. +# Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. +# RedisIdentifier = "192.168.1.1" + +# Timeout is optional. By default, when using RedisHost, Tracing-proxy will timeout +# after 5s when communicating with Redis. +# Timeout = "5s" + +# Strategy controls the way that traces are assigned to tracin nodes. +# The "legacy" strategy uses a simple algorithm that unfortunately causes +# 1/2 of the in-flight traces to be assigned to a different node whenever the +# number of nodes changes. +# The legacy strategy is deprecated and is intended to be removed in a future release. +# The "hash" strategy is strongly recommended, as only 1/N traces (where N is the +# number of nodes) are disrupted when the node count changes. +# Not eligible for live reload. +Strategy = "hash" + +######################### +## In-Memory Collector ## +######################### + +# InMemCollector brings together all the settings that are relevant to +# collecting spans together to make traces. +[InMemCollector] + +# The collection cache is used to collect all spans into a trace as well as +# remember the sampling decision for any spans that might come in after the +# trace has been marked "complete" (either by timing out or seeing the root +# span). The number of traces in the cache should be many multiples (100x to +# 1000x) of the total number of concurrently active traces (trace throughput * +# trace duration). +# Eligible for live reload. Growing the cache capacity with a live config reload +# is fine. Avoid shrinking it with a live reload (you can, but it may cause +# temporary odd sampling decisions). +CacheCapacity = 1000 + +# MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are +# supported. +# If set to a non-zero value, once per tick (see SendTicker) the collector +# will compare total allocated bytes to this value. If allocation is too +# high, cache capacity will be adjusted according to the setting for +# CacheOverrunStrategy. +# Useful values for this setting are generally in the range of 75%-90% of +# available system memory. +MaxAlloc = 0 + +################### +## Logrus Logger ## +################### + +# LogrusLogger is a section of the config only used if you are using the +# LogrusLogger to send all logs to STDOUT using the logrus package. If you are +# using a different logger (eg Opsramp logger) you can leave all this +# commented out. +[LogrusLogger] + +# LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] +LogFormatter = "logfmt" + +# LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] +LogOutput = "file" + +## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" +[LogrusLogger.File] + +# FileName specifies the location where the logs are supposed be stored +FileName = "/var/log/opsramp/tracing-proxy.log" + +# MaxSize is the maximum size in megabytes of the log file before it gets rotated. +MaxSize = 1 + +# MaxBackups is the maximum number of old log files to retain. +MaxBackups = 3 + +# Compress determines if the rotated log files should be compressed +# using gzip. +Compress = true + + +####################### +## Prometheus Metrics ## +####################### + +[OpsRampMetrics] +# MetricsListenAddr determines the interface and port on which Prometheus will +# listen for requests for /metrics. Must be different from the main Tracing-proxy +# listener. +# Not eligible for live reload. +MetricsListenAddr = "localhost:2112" + +# OpsRampMetricsAPI is the URL for the upstream OpsRamp API. +# Not Eligible for live reload. +OpsRampMetricsAPI = + +# OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. +# Not Eligible for live reload. +OpsRampTenantID = + +# OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. +# This is separate from the APIKeys used to authenticate regular +# traffic. +# Not Eligible for live reload. +OpsRampMetricsAPIKey = + +# OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. +# This is separate from the APISecret used to authenticate regular +# traffic. +# Not Eligible for live reload. +OpsRampMetricsAPISecret = + +# OpsRampMetricsReportingInterval is frequency specified in seconds at which +# the metrics are collected and sent to OpsRamp +# Not Eligible for live reload. +OpsRampMetricsReportingInterval = 10 + +# OpsRampMetricsRetryCount is the number of times we retry incase the send fails +# Not Eligible for live reload. +OpsRampMetricsRetryCount = 2 + +# ProxyProtocol accepts http and https +# Not Eligible for live reload. +ProxyProtocol = "" + +# ProxyServer takes the proxy server address +# Not Eligible for live reload. +ProxyServer = "" + +# ProxyPort takes the proxy server port +# Not Eligible for live reload. +ProxyPort = 3128 + +# ProxyUserName takes the proxy username +# Not Eligible for live reload. +ProxyUserName = "" + +# ProxyPassword takes the proxy password +# Not Eligible for live reload. +ProxyPassword = "" + +# OpsRampMetricsList is a list of regular expressions which match the metric +# names. Keep the list as small as possible since too many regular expressions can lead to bad performance. +# Internally all the regex in the list are concatinated using '|' to make the computation little faster. +# Not Eligible for live reload +OpsRampMetricsList = [".*"] + + +[GRPCServerParameters] + +# MaxConnectionIdle is a duration for the amount of time after which an +# idle connection would be closed by sending a GoAway. Idleness duration is +# defined since the most recent time the number of outstanding RPCs became +# zero or the connection establishment. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 +# Not eligible for live reload. +# MaxConnectionIdle = "1m" + +# MaxConnectionAge is a duration for the maximum amount of time a +# connection may exist before it will be closed by sending a GoAway. A +# random jitter of +/-10% will be added to MaxConnectionAge to spread out +# connection storms. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 +# Not eligible for live reload. +# MaxConnectionAge = "0s" + +# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after +# which the connection will be forcibly closed. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 +# Not eligible for live reload. +# MaxConnectionAgeGrace = "0s" + +# After a duration of this time if the server doesn't see any activity it +# pings the client to see if the transport is still alive. +# If set below 1s, a minimum value of 1s will be used instead. +# 0s sets duration to 2 hours which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 +# Not eligible for live reload. +# Time = "10s" + +# After having pinged for keepalive check, the server waits for a duration +# of Timeout and if no activity is seen even after that the connection is +# closed. +# 0s sets duration to 20 seconds which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 +# Not eligible for live reload. +# Timeout = "2s" + + + +################################ +## Sample Cache Configuration ## +################################ + +# Sample Cache Configuration controls the sample cache used to retain information about trace +# status after the sampling decision has been made. + +[SampleCacheConfig] + +# Type controls the type of sample cache used. +# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is +# 5x the size of the trace cache. This is Tracing-proxy's original sample cache strategy. +# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember +# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. +# It is also more configurable. The cuckoo filter is recommended for most installations. +# Default is "legacy". +# Not eligible for live reload (you cannot change the type of cache with reload). +# Type = "cuckoo" + +# KeptSize controls the number of traces preserved in the cuckoo kept traces cache. +# Tracing-proxy keeps a record of each trace that was kept and sent to Opsramp, along with some +# statistical information. This is most useful in cases where the trace was sent before sending +# the root span, so that the root span can be decorated with accurate metadata. +# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). +# Does not apply to the "legacy" type of cache. +# Eligible for live reload. +# KeptSize = 10_000 + +# DroppedSize controls the size of the cuckoo dropped traces cache. +# This cache consumes 4-6 bytes per trace at a scale of millions of traces. +# Changing its size with live reload sets a future limit, but does not have an immediate effect. +# Default is 1_000_000 traces. +# Does not apply to the "legacy" type of cache. +# Eligible for live reload. +# DroppedSize = 1_000_000 + +# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates +# the remaining capacity of its dropped traces cache and possibly cycles it. +# This cache is quite resilient so it doesn't need to happen very often, but the +# operation is also inexpensive. +# Default is 10 seconds. +# Does not apply to the "legacy" type of cache. +# Eligible for live reload. +# SizeCheckInterval = "10s" diff --git a/build/tracing-rpm/configure.go b/build/tracing-rpm/configure.go index 521a55474f..1daecd2039 100644 --- a/build/tracing-rpm/configure.go +++ b/build/tracing-rpm/configure.go @@ -23,26 +23,26 @@ func main() { opsrampApiHost := "OpsrampAPI = \"" + *api + "\"" opsrampMetricsApiHost := "OpsRampMetricsAPI = \"" + *api + "\"" - updatedConfigFile = bytes.Replace(configFile, []byte("OpsrampAPI = "), []byte(opsrampApiHost), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPI = "), []byte(opsrampMetricsApiHost), 1) + updatedConfigFile = bytes.Replace(configFile, []byte("OpsrampAPI ="), []byte(opsrampApiHost), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPI ="), []byte(opsrampMetricsApiHost), 1) opsrampKey := "OpsrampKey = \"" + *key + "\"" opsrampMetricsApiKey := "OpsRampMetricsAPIKey = \"" + *key + "\"" - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampKey = "), []byte(opsrampKey), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPIKey = "), []byte(opsrampMetricsApiKey), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampKey ="), []byte(opsrampKey), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPIKey ="), []byte(opsrampMetricsApiKey), 1) OpsrampSecret := "OpsrampSecret = \"" + *secret + "\"" OpsRampMetricsAPISecret := "OpsRampMetricsAPISecret = \"" + *secret + "\"" - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampSecret = "), []byte(OpsrampSecret), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPISecret = "), []byte(OpsRampMetricsAPISecret), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampSecret ="), []byte(OpsrampSecret), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPISecret ="), []byte(OpsRampMetricsAPISecret), 1) opsrampTenantID := "OpsRampTenantID = \"" + *tenant + "\"" TenantId := "TenantId = \"" + *tenant + "\"" - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampTenantID = "), []byte(opsrampTenantID), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("TenantId = "), []byte(TenantId), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampTenantID ="), []byte(opsrampTenantID), 1) + updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("TenantId ="), []byte(TenantId), 1) if err = ioutil.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml", updatedConfigFile, 0666); err != nil { fmt.Println(err) @@ -55,4 +55,3 @@ func main() { } fmt.Println("Tracing-Proxy Started Successfully") } - diff --git a/build/tracing-rpm/rules_complete.toml b/build/tracing-rpm/rules_complete.toml new file mode 100644 index 0000000000..1ff80e7295 --- /dev/null +++ b/build/tracing-rpm/rules_complete.toml @@ -0,0 +1,229 @@ +############################ +## Sampling Rules Config ## +############################ + +# DryRun - If enabled, marks traces that would be dropped given current sampling rules, +# and sends all traces regardless +DryRun = true + +# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to tracing_proxy_kept +DryRunFieldName = "fromProxy" + +# DeterministicSampler is a section of the config for manipulating the +# Deterministic Sampler implementation. This is the simplest sampling algorithm +# - it is a static sample rate, choosing traces randomly to either keep or send +# (at the appropriate rate). It is not influenced by the contents of the trace. +Sampler = "DeterministicSampler" + +# SampleRate is the rate at which to sample. It indicates a ratio, where one +# sample trace is kept for every n traces seen. For example, a SampleRate of 30 +# will keep 1 out of every 30 traces. The choice on whether to keep any specific +# trace is random, so the rate is approximate. +# Eligible for live reload. +SampleRate = 1 + +# [dataset1] + +# # Note: If your dataset name contains a space, you will have to escape the dataset name +# # using single quotes, such as ['dataset 1'] + +# # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler +# # implementation. This sampler collects the values of a number of fields from a +# # trace and uses them to form a key. This key is handed to the standard dynamic +# # sampler algorithm which generates a sample rate based on the frequency with +# # which that key has appeared in the previous ClearFrequencySec seconds. See +# # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics +# # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from +# # that package. +# Sampler = "DynamicSampler" + +# # SampleRate is the goal rate at which to sample. It indicates a ratio, where +# # one sample trace is kept for every n traces seen. For example, a SampleRate of +# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic +# # sampler, who assigns a sample rate for each trace based on the fields selected +# # from that trace. +# # Eligible for live reload. +# SampleRate = 1 + +# # FieldList is a list of all the field names to use to form the key that will be +# # handed to the dynamic sampler. The cardinality of the combination of values +# # from all of these keys should be reasonable in the face of the frequency of +# # those keys. If the combination of fields in these keys essentially makes them +# # unique, the dynamic sampler will do no sampling. If the keys have too few +# # values, you won't get samples of the most interesting traces. A good key +# # selection will have consistent values for high frequency boring traffic and +# # unique values for outliers and interesting traffic. Including an error field +# # (or something like HTTP status code) is an excellent choice. As an example, +# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code +# # would be a good set of keys in order to let you see accurately use of all +# # endpoints and call out when there is failing traffic to any endpoint. Field +# # names may come from any span in the trace. +# # Eligible for live reload. +# FieldList = [""] + +# # UseTraceLength will add the number of spans in the trace in to the dynamic +# # sampler as part of the key. The number of spans is exact, so if there are +# # normally small variations in trace length you may want to leave this off. If +# # traces are consistent lengths and changes in trace length is a useful +# # indicator of traces you'd like to see in Opsramp, set this to true. +# # Eligible for live reload. +# UseTraceLength = true + +# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field +# # to the root span of the trace containing the key used by the sampler to decide +# # the sample rate. This can be helpful in understanding why the sampler is +# # making certain decisions about sample rate and help you understand how to +# # better choose the sample rate key (aka the FieldList setting above) to use. +# AddSampleRateKeyToTrace = true + +# # AddSampleRateKeyToTraceField is the name of the field the sampler will use +# # when adding the sample rate key to the trace. This setting is only used when +# # AddSampleRateKeyToTrace is true. +# AddSampleRateKeyToTraceField = "" + +# # ClearFrequencySec is the name of the field the sampler will use to determine +# # the period over which it will calculate the sample rate. This setting defaults +# # to 30. +# # Eligible for live reload. +# ClearFrequencySec = 60 + +# [dataset2] + +# # EMADynamicSampler is a section of the config for manipulating the Exponential +# # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, +# # it attempts to average a given sample rate, weighting rare traffic and frequent +# # traffic differently so as to end up with the correct average. +# # +# # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended +# # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs +# # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential +# # Moving Average of counts seen per key, and adjusts this average at regular intervals. +# # The weight applied to more recent intervals is defined by `weight`, a number between +# # (0, 1) - larger values weight the average more toward recent observations. In other words, +# # a larger weight will cause sample rates more quickly adapt to traffic patterns, +# # while a smaller weight will result in sample rates that are less sensitive to bursts or drops +# # in traffic and thus more consistent over time. +# # +# # Keys that are not found in the EMA will always have a sample +# # rate of 1. Keys that occur more frequently will be sampled on a logarithmic +# # curve. In other words, every key will be represented at least once in any +# # given window and more frequent keys will have their sample rate +# # increased proportionally to wind up with the goal sample rate. +# Sampler = "EMADynamicSampler" + +# # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where +# # one sample trace is kept for every n traces seen. For example, a SampleRate of +# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic +# # sampler, who assigns a sample rate for each trace based on the fields selected +# # from that trace. +# # Eligible for live reload. +# GoalSampleRate = 1 + +# # FieldList is a list of all the field names to use to form the key that will be +# # handed to the dynamic sampler. The cardinality of the combination of values +# # from all of these keys should be reasonable in the face of the frequency of +# # those keys. If the combination of fields in these keys essentially makes them +# # unique, the dynamic sampler will do no sampling. If the keys have too few +# # values, you won't get samples of the most interesting traces. A good key +# # selection will have consistent values for high frequency boring traffic and +# # unique values for outliers and interesting traffic. Including an error field +# # (or something like HTTP status code) is an excellent choice. As an example, +# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code +# # would be a good set of keys in order to let you see accurately use of all +# # endpoints and call out when there is failing traffic to any endpoint. Field +# # names may come from any span in the trace. +# # Eligible for live reload. +# FieldList = [""] + +# # UseTraceLength will add the number of spans in the trace in to the dynamic +# # sampler as part of the key. The number of spans is exact, so if there are +# # normally small variations in trace length you may want to leave this off. If +# # traces are consistent lengths and changes in trace length is a useful +# # indicator of traces you'd like to see in Opsramp, set this to true. +# # Eligible for live reload. +# UseTraceLength = true + +# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field +# # to the root span of the trace containing the key used by the sampler to decide +# # the sample rate. This can be helpful in understanding why the sampler is +# # making certain decisions about sample rate and help you understand how to +# # better choose the sample rate key (aka the FieldList setting above) to use. +# AddSampleRateKeyToTrace = true + +# # AddSampleRateKeyToTraceField is the name of the field the sampler will use +# # when adding the sample rate key to the trace. This setting is only used when +# # AddSampleRateKeyToTrace is true. +# AddSampleRateKeyToTraceField = "" + +# # AdjustmentInterval defines how often (in seconds) we adjust the moving average from +# # recent observations. Default 15s +# # Eligible for live reload. +# AdjustmentInterval = 15 + +# # Weight is a value between (0, 1) indicating the weighting factor used to adjust +# # the EMA. With larger values, newer data will influence the average more, and older +# # values will be factored out more quickly. In mathematical literature concerning EMA, +# # this is referred to as the `alpha` constant. +# # Default is 0.5 +# # Eligible for live reload. +# Weight = 0.5 + +# # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. +# # Once MaxKeys is reached, new keys will not be included in the sample rate map, but +# # existing keys will continue to be be counted. You can use this to keep the sample rate +# # map size under control. +# # Eligible for live reload +# MaxKeys = 0 + +# # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key +# # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to +# # decide what constitutes "zero". Keys with averages below this threshold will be removed +# # from the EMA. Default is the same as Weight, as this prevents a key with the smallest +# # integer value (1) from being aged out immediately. This value should generally be <= Weight, +# # unless you have very specific reasons to set it higher. +# # Eligible for live reload +# AgeOutValue = 0.5 + +# # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define +# # the burst detection threshold. If total counts observed for a given interval exceed the threshold +# # EMA is updated immediately, rather than waiting on the AdjustmentInterval. +# # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, +# # burst detection will kick in. +# # Eligible for live reload +# BurstMultiple = 2.0 + +# # BurstDetectionDelay indicates the number of intervals to run after Start is called before +# # burst detection kicks in. +# # Defaults to 3 +# # Eligible for live reload +# BurstDetectionDelay = 3 + +# [dataset3] + +# Sampler = "DeterministicSampler" +# SampleRate = 10 + +# [dataset4] + +# Sampler = "RulesBasedSampler" + +# [[dataset4.rule]] +# # Rule name +# name = "" +# # Drop Condition (examples: true, false) +# drop = +# [[dataset4.rule.condition]] +# # Field Name (example: status_code) +# field = "" +# # Operator Value (example: =) +# operator = "" +# # Field Value (example: 500) +# value = "" + + + +# [dataset5] + +# Sampler = "TotalThroughputSampler" +# GoalThroughputPerSec = 100 +# FieldList = "[]" diff --git a/build/tracing-rpm/script.sh b/build/tracing-rpm/script.sh index ec474f63fb..603002859d 100644 --- a/build/tracing-rpm/script.sh +++ b/build/tracing-rpm/script.sh @@ -11,9 +11,9 @@ sed -i "/^\%define version/s/^.*$/\%define version ${Version}/g" tracing-pro # Updating the files mkdir -p opt/opsramp/tracing-proxy/conf mkdir -p opt/opsramp/tracing-proxy/bin -cp ../../config_complete.toml opt/opsramp/tracing-proxy/conf/config_complete.toml -cp ../../rules_complete.toml opt/opsramp/tracing-proxy/conf/rules_complete.toml -go build ../../cmd/tracing-proxy/main.go +cp .config_complete.toml opt/opsramp/tracing-proxy/conf/config_complete.toml +cp rules_complete.toml opt/opsramp/tracing-proxy/conf/rules_complete.toml +go build -o ../../cmd/tracing-proxy/main ../../cmd/tracing-proxy/main.go go build configure.go cp ../../cmd/tracing-proxy/main opt/opsramp/tracing-proxy/bin/tracing-proxy cp configure opt/opsramp/tracing-proxy/bin/configure diff --git a/config_complete.toml b/config_complete.toml index 731fc6578e..5d20659ecf 100644 --- a/config_complete.toml +++ b/config_complete.toml @@ -1,5 +1,5 @@ ##################### -## Refinery Config ## +## Tracing-proxy Config ## ##################### # ListenAddr is the IP and port on which to listen for incoming events. Incoming @@ -14,7 +14,7 @@ ListenAddr = "0.0.0.0:8082" # something like nginx in front to do the decryption. # Should be of the form 0.0.0.0:9090 # Not eligible for live reload. -GRPCListenAddr = "0.0.0.0:4317" +GRPCListenAddr = "0.0.0.0:9090" # PeerListenAddr is the IP and port on which to listen for traffic being # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL @@ -23,7 +23,7 @@ GRPCListenAddr = "0.0.0.0:4317" # Should be of the form 0.0.0.0:8081 # Not eligible for live reload. PeerListenAddr = "0.0.0.0:8083" -GRPCPeerListenAddr = "tracing-proxy:8084" +GRPCPeerListenAddr = "0.0.0.0:8084" # ProxyProtocol accepts http and https # Not Eligible for live reload. @@ -45,27 +45,27 @@ ProxyUserName = "" # Not Eligible for live reload. ProxyPassword = "" -# CompressPeerCommunication determines whether refinery will compress span data -# it forwards to peers. If it costs money to transmit data between refinery +# CompressPeerCommunication determines whether tracin will compress span data +# it forwards to peers. If it costs money to transmit data between tracin # instances (e.g. they're spread across AWS availability zones), then you # almost certainly want compression enabled to reduce your bill. The option to -# disable it is provided as an escape hatch for deployments that value lower CPU +# disable it is provided as an escape hatch for deployments that value lower CPU # utilization over data transfer costs. CompressPeerCommunication = true # OpsrampAPI is the URL for the upstream Opsramp API. # Eligible for live reload. #OpsrampAPI = "localhost:50052" -OpsrampAPI = "" +OpsrampAPI = # OpsrampKey is used to get the OauthToken -OpsrampKey = "" +OpsrampKey = # OpsrampSecret is used to get the OauthToken -OpsrampSecret = "" +OpsrampSecret = # Traces are send to the client with given tenantid -TenantId = "" +TenantId = # Dataset you want to use for sampling Dataset = "ds" @@ -75,7 +75,7 @@ UseTls = true UseTlsInsecure = false # SendDelay is a short timer that will be triggered when a trace is complete. -# Refinery will wait this duration before actually sending the trace. The +# Tracing-proxy will wait this duration before actually sending the trace. The # reason for this short delay is to allow for small network delays or clock # jitters to elapse and any final spans to arrive before actually sending the # trace. This supports duration strings with supplied units. Set to 0 for @@ -108,7 +108,7 @@ SendTicker = "100ms" # recommended level. Valid options are "debug", "info", "error", and # "panic" # Not eligible for live reload. -LoggingLevel = "debug" +LoggingLevel = "error" # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use # when buffering events that will be forwarded to peers or the upstream API. @@ -121,30 +121,30 @@ PeerBufferSize = 10000 # DebugServiceAddr = "localhost:8085" # AddHostMetadataToTrace determines whether or not to add information about -# the host that Refinery is running on to the spans that it processes. +# the host that Tracing-proxy is running on to the spans that it processes. # If enabled, information about the host will be added to each span with the -# prefix `meta.refinery.`. -# Currently the only value added is 'meta.refinery.local_hostname'. +# prefix `meta.tracing-procy.`. +# Currently the only value added is 'meta.tracing-proxy.local_hostname'. # Not eligible for live reload AddHostMetadataToTrace = false # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. -# Cache misses lookup the environment name using HoneycombAPI config value. +# Cache misses lookup the environment name using OpsrampAPI config value. # Default is 1 hour ("1h"). # Not eligible for live reload. EnvironmentCacheTTL = "1h" # QueryAuthToken, if specified, provides a token that must be specified with -# the header "X-Honeycomb-Refinery-Query" in order for a /query request to succeed. -# These /query requests are intended for debugging refinery installations and +# the header "X-Opsramp-Tracing-proxy-Query" in order for a /query request to succeed. +# These /query requests are intended for debugging tracin installations and # are not typically needed in normal operation. -# Can be specified in the environment as REFINERY_QUERY_AUTH_TOKEN. +# Can be specified in the environment as tracing-proxy_QUERY_AUTH_TOKEN. # If left unspecified, the /query endpoints are inaccessible. # Not eligible for live reload. # QueryAuthToken = "some-random-value" -# AddRuleReasonToTrace causes traces that are sent to Honeycomb to include the field `meta.refinery.reason`. +# AddRuleReasonToTrace causes traces that are sent to Opsramp to include the field `meta.tracin.reason`. # This field contains text indicating which rule was evaluated that caused the trace to be included. # Eligible for live reload. # AddRuleReasonToTrace = true @@ -186,7 +186,7 @@ SendMetricsToOpsRamp = false ## Implementation Choices ## ############################ -# Each of the config options below chooses an implementation of a Refinery +# Each of the config options below chooses an implementation of a Tracing-proxy # component to use. Depending on the choice there may be more configuration # required below in the section for that choice. Changing implementation choices # requires a process restart; these changes will not be picked up by a live @@ -202,37 +202,37 @@ Collector = "InMemCollector" ## Peer Management ## ######################### -#[PeerManagement] -#Type = "file" +[PeerManagement] +Type = "file" ## Peers is the list of all servers participating in this proxy cluster. Events ## will be sharded evenly across all peers based on the Trace ID. Values here ## should be the base URL used to access the peer, and should include scheme, ## hostname (or ip address) and port. All servers in the cluster should be in ## this list, including this host. -#Peers = [ -# "http://127.0.0.1:8084", #only grpc peer listener used +Peers = [ + "http://127.0.0.1:8084", #only grpc peer listener used # # "http://127.0.0.1:8083", # # "http://10.1.2.3.4:8080", -# # "http://refinery-1231:8080", +# # "http://tracin-1231:8080", # # "http://peer-3.fqdn" // assumes port 80 -#] +] -[PeerManagement] -Type = "redis" +#[PeerManagement] +#Type = "redis" # RedisHost is is used to connect to redis for peer cluster membership management. -# Further, if the environment variable 'REFINERY_REDIS_HOST' is set it takes +# Further, if the environment variable 'Tracing_Proxy_REDIS_HOST' is set it takes # precedence and this value is ignored. # Not eligible for live reload. -RedisHost = "redis:22122" +#RedisHost = "redis:22122" # RedisUsername is the username used to connect to redis for peer cluster membership management. -# If the environment variable 'REFINERY_REDIS_USERNAME' is set it takes +# If the environment variable 'Tracing_Proxy_REDIS_USERNAME' is set it takes # precedence and this value is ignored. # Not eligible for live reload. # RedisUsername = "" # RedisPassword is the password used to connect to redis for peer cluster membership management. -# If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes +# If the environment variable 'Tracing_Proxy_REDIS_PASSWORD' is set it takes # precedence and this value is ignored. # Not eligible for live reload. # RedisPassword = "" @@ -257,32 +257,32 @@ RedisHost = "redis:22122" # Not eligible for live reload. # UseTLSInsecure = false -# IdentifierInterfaceName is optional. By default, when using RedisHost, Refinery will use +# IdentifierInterfaceName is optional. By default, when using RedisHost, Tracing-proxy will use # the local hostname to identify itself to other peers in Redis. If your environment # requires that you use IPs as identifiers (for example, if peers can't resolve eachother -# by name), you can specify the network interface that Refinery is listening on here. -# Refinery will use the first unicast address that it finds on the specified network +# by name), you can specify the network interface that Tracing-proxy is listening on here. +# Tracing-proxy will use the first unicast address that it finds on the specified network # interface as its identifier. # Not eligible for live reload. # IdentifierInterfaceName = "eth0" -# UseIPV6Identifier is optional. If using IdentifierInterfaceName, Refinery will default to the first +# UseIPV6Identifier is optional. If using IdentifierInterfaceName, Tracing-proxy will default to the first # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use # the first IPV6 unicast address found. # UseIPV6Identifier = false -# RedisIdentifier is optional. By default, when using RedisHost, Refinery will use +# RedisIdentifier is optional. By default, when using RedisHost, Tracing-proxy will use # the local hostname to id788714cd-a17a-4d7e-9bac-c35131f4bcc2entify itself to other peers in Redis. If your environment # requires that you use IPs as identifiers (for example, if peers can't resolve eachother # by name), you can specify the exact identifier (IP address, etc) to use here. # Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. # RedisIdentifier = "192.168.1.1" -# Timeout is optional. By default, when using RedisHost, Refinery will timeout +# Timeout is optional. By default, when using RedisHost, Tracing-proxy will timeout # after 5s when communicating with Redis. # Timeout = "5s" -# Strategy controls the way that traces are assigned to refinery nodes. +# Strategy controls the way that traces are assigned to tracin nodes. # The "legacy" strategy uses a simple algorithm that unfortunately causes # 1/2 of the in-flight traces to be assigned to a different node whenever the # number of nodes changes. @@ -335,7 +335,7 @@ MaxAlloc = 0 LogFormatter = "logfmt" # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] -LogOutput = "stdout" +LogOutput = "file" ## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" [LogrusLogger.File] @@ -360,30 +360,30 @@ Compress = true [OpsRampMetrics] # MetricsListenAddr determines the interface and port on which Prometheus will -# listen for requests for /metrics. Must be different from the main Refinery +# listen for requests for /metrics. Must be different from the main Tracing-proxy # listener. # Not eligible for live reload. MetricsListenAddr = "localhost:2112" # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. # Not Eligible for live reload. -OpsRampMetricsAPI = "" +OpsRampMetricsAPI = # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. # Not Eligible for live reload. -OpsRampTenantID = "" +OpsRampTenantID = # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. # This is separate from the APIKeys used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPIKey = "" +OpsRampMetricsAPIKey = # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. # This is separate from the APISecret used to authenticate regular # traffic. # Not Eligible for live reload. -OpsRampMetricsAPISecret = "" +OpsRampMetricsAPISecret = # OpsRampMetricsReportingInterval is frequency specified in seconds at which # the metrics are collected and sent to OpsRamp @@ -477,7 +477,7 @@ OpsRampMetricsList = [".*"] # Type controls the type of sample cache used. # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is -# 5x the size of the trace cache. This is Refinery's original sample cache strategy. +# 5x the size of the trace cache. This is Tracing-proxy's original sample cache strategy. # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. # It is also more configurable. The cuckoo filter is recommended for most installations. @@ -486,7 +486,7 @@ OpsRampMetricsList = [".*"] # Type = "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. -# Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some +# Tracing-proxy keeps a record of each trace that was kept and sent to Opsramp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). diff --git a/rules_complete.toml b/rules_complete.toml index 2f50ef1733..1ff80e7295 100644 --- a/rules_complete.toml +++ b/rules_complete.toml @@ -6,7 +6,7 @@ # and sends all traces regardless DryRun = true -# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept +# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to tracing_proxy_kept DryRunFieldName = "fromProxy" # DeterministicSampler is a section of the config for manipulating the @@ -22,268 +22,208 @@ Sampler = "DeterministicSampler" # Eligible for live reload. SampleRate = 1 -[dataset1] - - # Note: If your dataset name contains a space, you will have to escape the dataset name - # using single quotes, such as ['dataset 1'] - - # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler - # implementation. This sampler collects the values of a number of fields from a - # trace and uses them to form a key. This key is handed to the standard dynamic - # sampler algorithm which generates a sample rate based on the frequency with - # which that key has appeared in the previous ClearFrequencySec seconds. See - # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics - # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from - # that package. - Sampler = "DynamicSampler" - - # SampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. - SampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. - FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. - UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. - AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # ClearFrequencySec is the name of the field the sampler will use to determine - # the period over which it will calculate the sample rate. This setting defaults - # to 30. - # Eligible for live reload. - ClearFrequencySec = 60 - -[dataset2] - - # EMADynamicSampler is a section of the config for manipulating the Exponential - # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, - # it attempts to average a given sample rate, weighting rare traffic and frequent - # traffic differently so as to end up with the correct average. - # - # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended - # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs - # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential - # Moving Average of counts seen per key, and adjusts this average at regular intervals. - # The weight applied to more recent intervals is defined by `weight`, a number between - # (0, 1) - larger values weight the average more toward recent observations. In other words, - # a larger weight will cause sample rates more quickly adapt to traffic patterns, - # while a smaller weight will result in sample rates that are less sensitive to bursts or drops - # in traffic and thus more consistent over time. - # - # Keys that are not found in the EMA will always have a sample - # rate of 1. Keys that occur more frequently will be sampled on a logarithmic - # curve. In other words, every key will be represented at least once in any - # given window and more frequent keys will have their sample rate - # increased proportionally to wind up with the goal sample rate. - Sampler = "EMADynamicSampler" - - # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. - GoalSampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. - FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. - UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. - AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # AdjustmentInterval defines how often (in seconds) we adjust the moving average from - # recent observations. Default 15s - # Eligible for live reload. - AdjustmentInterval = 15 - - # Weight is a value between (0, 1) indicating the weighting factor used to adjust - # the EMA. With larger values, newer data will influence the average more, and older - # values will be factored out more quickly. In mathematical literature concerning EMA, - # this is referred to as the `alpha` constant. - # Default is 0.5 - # Eligible for live reload. - Weight = 0.5 - - # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. - # Once MaxKeys is reached, new keys will not be included in the sample rate map, but - # existing keys will continue to be be counted. You can use this to keep the sample rate - # map size under control. - # Eligible for live reload - MaxKeys = 0 - - # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key - # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to - # decide what constitutes "zero". Keys with averages below this threshold will be removed - # from the EMA. Default is the same as Weight, as this prevents a key with the smallest - # integer value (1) from being aged out immediately. This value should generally be <= Weight, - # unless you have very specific reasons to set it higher. - # Eligible for live reload - AgeOutValue = 0.5 - - # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define - # the burst detection threshold. If total counts observed for a given interval exceed the threshold - # EMA is updated immediately, rather than waiting on the AdjustmentInterval. - # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, - # burst detection will kick in. - # Eligible for live reload - BurstMultiple = 2.0 - - # BurstDetectionDelay indicates the number of intervals to run after Start is called before - # burst detection kicks in. - # Defaults to 3 - # Eligible for live reload - BurstDetectionDelay = 3 - -[dataset3] - - Sampler = "DeterministicSampler" - SampleRate = 10 - -[dataset4] - - Sampler = "RulesBasedSampler" - # Optional, if set to true then the rules will also check nested json fields, in the format of parent.child - CheckNestedFields = false - - [[dataset4.rule]] - name = "drop healthchecks" - drop = true - [[dataset4.rule.condition]] - field = "http.route" - operator = "=" - value = "/health-check" - - [[dataset4.rule]] - name = "keep slow 500 errors" - SampleRate = 1 - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 500 - [[dataset4.rule.condition]] - field = "duration_ms" - operator = ">=" - value = 1000.789 - - [[dataset4.rule]] - name = "dynamically sample 200 responses" - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 200 - [dataset4.rule.sampler.EMADynamicSampler] - Sampler = "EMADynamicSampler" - GoalSampleRate = 15 - FieldList = ["request.method", "request.route"] - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # Note that Refinery comparisons are type-dependent. If you are operating in an environment where different - # telemetry may send the same field with different types (for example, some systems send status codes as "200" - # instead of 200), you may need to create additional rules to cover these cases. - [[dataset4.rule]] - name = "dynamically sample 200 string responses" - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = "200" - [dataset4.rule.sampler.EMADynamicSampler] - Sampler = "EMADynamicSampler" - GoalSampleRate = 15 - FieldList = ["request.method", "request.route"] - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - [[dataset4.rule]] - name = "sample traces originating from a service" - # if scope is set to "span", a single span in the trace must match - # *all* of the conditions associated with this rule for the rule to - # apply to the trace. - # - # this is especially helpful when sampling a dataset written to - # by multiple services that call one another in normal operation – - # you can set Scope to 'span' to attribute traces to an origin - # service in a way that would be difficult without it. - Scope = "span" - SampleRate = 5 - [[dataset4.rule.condition]] - field = "service name" - operator = "=" - value = "users" - [[dataset4.rule.condition]] - field = "meta.span_type" - operator = "=" - value = "root" - - [[dataset4.rule]] - SampleRate = 10 # default when no rules match, if missing defaults to 10 - -[dataset5] - - Sampler = "TotalThroughputSampler" - GoalThroughputPerSec = 100 - FieldList = "[request.method]" +# [dataset1] + +# # Note: If your dataset name contains a space, you will have to escape the dataset name +# # using single quotes, such as ['dataset 1'] + +# # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler +# # implementation. This sampler collects the values of a number of fields from a +# # trace and uses them to form a key. This key is handed to the standard dynamic +# # sampler algorithm which generates a sample rate based on the frequency with +# # which that key has appeared in the previous ClearFrequencySec seconds. See +# # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics +# # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from +# # that package. +# Sampler = "DynamicSampler" + +# # SampleRate is the goal rate at which to sample. It indicates a ratio, where +# # one sample trace is kept for every n traces seen. For example, a SampleRate of +# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic +# # sampler, who assigns a sample rate for each trace based on the fields selected +# # from that trace. +# # Eligible for live reload. +# SampleRate = 1 + +# # FieldList is a list of all the field names to use to form the key that will be +# # handed to the dynamic sampler. The cardinality of the combination of values +# # from all of these keys should be reasonable in the face of the frequency of +# # those keys. If the combination of fields in these keys essentially makes them +# # unique, the dynamic sampler will do no sampling. If the keys have too few +# # values, you won't get samples of the most interesting traces. A good key +# # selection will have consistent values for high frequency boring traffic and +# # unique values for outliers and interesting traffic. Including an error field +# # (or something like HTTP status code) is an excellent choice. As an example, +# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code +# # would be a good set of keys in order to let you see accurately use of all +# # endpoints and call out when there is failing traffic to any endpoint. Field +# # names may come from any span in the trace. +# # Eligible for live reload. +# FieldList = [""] + +# # UseTraceLength will add the number of spans in the trace in to the dynamic +# # sampler as part of the key. The number of spans is exact, so if there are +# # normally small variations in trace length you may want to leave this off. If +# # traces are consistent lengths and changes in trace length is a useful +# # indicator of traces you'd like to see in Opsramp, set this to true. +# # Eligible for live reload. +# UseTraceLength = true + +# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field +# # to the root span of the trace containing the key used by the sampler to decide +# # the sample rate. This can be helpful in understanding why the sampler is +# # making certain decisions about sample rate and help you understand how to +# # better choose the sample rate key (aka the FieldList setting above) to use. +# AddSampleRateKeyToTrace = true + +# # AddSampleRateKeyToTraceField is the name of the field the sampler will use +# # when adding the sample rate key to the trace. This setting is only used when +# # AddSampleRateKeyToTrace is true. +# AddSampleRateKeyToTraceField = "" + +# # ClearFrequencySec is the name of the field the sampler will use to determine +# # the period over which it will calculate the sample rate. This setting defaults +# # to 30. +# # Eligible for live reload. +# ClearFrequencySec = 60 + +# [dataset2] + +# # EMADynamicSampler is a section of the config for manipulating the Exponential +# # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, +# # it attempts to average a given sample rate, weighting rare traffic and frequent +# # traffic differently so as to end up with the correct average. +# # +# # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended +# # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs +# # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential +# # Moving Average of counts seen per key, and adjusts this average at regular intervals. +# # The weight applied to more recent intervals is defined by `weight`, a number between +# # (0, 1) - larger values weight the average more toward recent observations. In other words, +# # a larger weight will cause sample rates more quickly adapt to traffic patterns, +# # while a smaller weight will result in sample rates that are less sensitive to bursts or drops +# # in traffic and thus more consistent over time. +# # +# # Keys that are not found in the EMA will always have a sample +# # rate of 1. Keys that occur more frequently will be sampled on a logarithmic +# # curve. In other words, every key will be represented at least once in any +# # given window and more frequent keys will have their sample rate +# # increased proportionally to wind up with the goal sample rate. +# Sampler = "EMADynamicSampler" + +# # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where +# # one sample trace is kept for every n traces seen. For example, a SampleRate of +# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic +# # sampler, who assigns a sample rate for each trace based on the fields selected +# # from that trace. +# # Eligible for live reload. +# GoalSampleRate = 1 + +# # FieldList is a list of all the field names to use to form the key that will be +# # handed to the dynamic sampler. The cardinality of the combination of values +# # from all of these keys should be reasonable in the face of the frequency of +# # those keys. If the combination of fields in these keys essentially makes them +# # unique, the dynamic sampler will do no sampling. If the keys have too few +# # values, you won't get samples of the most interesting traces. A good key +# # selection will have consistent values for high frequency boring traffic and +# # unique values for outliers and interesting traffic. Including an error field +# # (or something like HTTP status code) is an excellent choice. As an example, +# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code +# # would be a good set of keys in order to let you see accurately use of all +# # endpoints and call out when there is failing traffic to any endpoint. Field +# # names may come from any span in the trace. +# # Eligible for live reload. +# FieldList = [""] + +# # UseTraceLength will add the number of spans in the trace in to the dynamic +# # sampler as part of the key. The number of spans is exact, so if there are +# # normally small variations in trace length you may want to leave this off. If +# # traces are consistent lengths and changes in trace length is a useful +# # indicator of traces you'd like to see in Opsramp, set this to true. +# # Eligible for live reload. +# UseTraceLength = true + +# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field +# # to the root span of the trace containing the key used by the sampler to decide +# # the sample rate. This can be helpful in understanding why the sampler is +# # making certain decisions about sample rate and help you understand how to +# # better choose the sample rate key (aka the FieldList setting above) to use. +# AddSampleRateKeyToTrace = true + +# # AddSampleRateKeyToTraceField is the name of the field the sampler will use +# # when adding the sample rate key to the trace. This setting is only used when +# # AddSampleRateKeyToTrace is true. +# AddSampleRateKeyToTraceField = "" + +# # AdjustmentInterval defines how often (in seconds) we adjust the moving average from +# # recent observations. Default 15s +# # Eligible for live reload. +# AdjustmentInterval = 15 + +# # Weight is a value between (0, 1) indicating the weighting factor used to adjust +# # the EMA. With larger values, newer data will influence the average more, and older +# # values will be factored out more quickly. In mathematical literature concerning EMA, +# # this is referred to as the `alpha` constant. +# # Default is 0.5 +# # Eligible for live reload. +# Weight = 0.5 + +# # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. +# # Once MaxKeys is reached, new keys will not be included in the sample rate map, but +# # existing keys will continue to be be counted. You can use this to keep the sample rate +# # map size under control. +# # Eligible for live reload +# MaxKeys = 0 + +# # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key +# # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to +# # decide what constitutes "zero". Keys with averages below this threshold will be removed +# # from the EMA. Default is the same as Weight, as this prevents a key with the smallest +# # integer value (1) from being aged out immediately. This value should generally be <= Weight, +# # unless you have very specific reasons to set it higher. +# # Eligible for live reload +# AgeOutValue = 0.5 + +# # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define +# # the burst detection threshold. If total counts observed for a given interval exceed the threshold +# # EMA is updated immediately, rather than waiting on the AdjustmentInterval. +# # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, +# # burst detection will kick in. +# # Eligible for live reload +# BurstMultiple = 2.0 + +# # BurstDetectionDelay indicates the number of intervals to run after Start is called before +# # burst detection kicks in. +# # Defaults to 3 +# # Eligible for live reload +# BurstDetectionDelay = 3 + +# [dataset3] + +# Sampler = "DeterministicSampler" +# SampleRate = 10 + +# [dataset4] + +# Sampler = "RulesBasedSampler" + +# [[dataset4.rule]] +# # Rule name +# name = "" +# # Drop Condition (examples: true, false) +# drop = +# [[dataset4.rule.condition]] +# # Field Name (example: status_code) +# field = "" +# # Operator Value (example: =) +# operator = "" +# # Field Value (example: 500) +# value = "" + + + +# [dataset5] + +# Sampler = "TotalThroughputSampler" +# GoalThroughputPerSec = 100 +# FieldList = "[]" From ab860dc3ce3b7c6d4455a1f33a1c6862830d9738 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 30 Mar 2023 19:45:44 +0530 Subject: [PATCH 295/351] separation of auth token endpoint from traces export endpoint --- Dockerfile | 6 +- cmd/tracing-proxy/main.go | 19 +- config/config.go | 30 +-- config/config_test.go | 6 +- config/file_config.go | 208 +++++----------- config/mock.go | 4 +- config_complete.toml | 512 -------------------------------------- config_complete.yaml | 403 ++++++++++++++++++++++++++++++ go.mod | 4 +- go.sum | 8 +- metrics/opsramp.go | 96 ++++--- route/otlp_trace.go | 35 ++- route/route.go | 10 - rules_complete.toml | 229 ----------------- rules_complete.yaml | 261 +++++++++++++++++++ transmit/transmit.go | 18 +- 16 files changed, 847 insertions(+), 1002 deletions(-) delete mode 100644 config_complete.toml create mode 100644 config_complete.yaml delete mode 100644 rules_complete.toml create mode 100644 rules_complete.yaml diff --git a/Dockerfile b/Dockerfile index d45f7cd878..5f518d2cb3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,9 +24,9 @@ FROM alpine:3.17 RUN apk update && apk add --no-cache bash ca-certificates && update-ca-certificates -COPY config_complete.toml /etc/tracing-proxy/config.toml -COPY rules_complete.toml /etc/tracing-proxy/rules.toml +COPY --from=builder /app/config_complete.yaml /etc/tracing-proxy/config.yaml +COPY --from=builder /app/rules_complete.yaml /etc/tracing-proxy/rules.yaml COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy -CMD ["/usr/bin/tracing-proxy", "--config", "/etc/tracing-proxy/config.toml", "--rules_config", "/etc/tracing-proxy/rules.toml"] \ No newline at end of file +CMD ["/usr/bin/tracing-proxy", "--config", "/etc/tracing-proxy/config.yaml", "--rules_config", "/etc/tracing-proxy/rules.yaml"] \ No newline at end of file diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 3c0d2c85f4..c0660c53df 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -134,8 +134,7 @@ func main() { upstreamMetricsConfig := metrics.GetMetricsImplementation("libtrace_upstream") peerMetricsConfig := metrics.GetMetricsImplementation("libtrace_peer") - opsrampkey, _ := c.GetOpsrampKey() - opsrampsecret, _ := c.GetOpsrampSecret() + authConfig := c.GetAuthConfig() opsrampapi, err := c.GetOpsrampAPI() if err != nil { logrusLogger.Fatal(err) @@ -143,7 +142,7 @@ func main() { userAgentAddition := "tracing-proxy/" + version upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{ - Transmission: &transmission.Opsramptraceproxy{ + Transmission: &transmission.TraceProxy{ MaxBatchSize: c.GetMaxBatchSize(), BatchTimeout: c.GetBatchTimeout(), MaxConcurrentBatches: libtrace.DefaultMaxConcurrentBatches, @@ -155,9 +154,11 @@ func main() { Metrics: upstreamMetricsConfig, UseTls: c.GetGlobalUseTLS(), UseTlsInsecure: c.GetGlobalUseTLSInsecureSkip(), - OpsrampKey: opsrampkey, - OpsrampSecret: opsrampsecret, + AuthTokenEndpoint: authConfig.Endpoint, + AuthTokenKey: authConfig.Key, + AuthTokenSecret: authConfig.Secret, ApiHost: opsrampapi, + TenantId: authConfig.TenantId, }, }) if err != nil { @@ -166,7 +167,7 @@ func main() { } peerClient, err := libtrace.NewClient(libtrace.ClientConfig{ - Transmission: &transmission.Opsramptraceproxy{ + Transmission: &transmission.TraceProxy{ MaxBatchSize: c.GetMaxBatchSize(), BatchTimeout: c.GetBatchTimeout(), MaxConcurrentBatches: libtrace.DefaultMaxConcurrentBatches, @@ -176,9 +177,11 @@ func main() { DisableCompression: !c.GetCompressPeerCommunication(), EnableMsgpackEncoding: false, Metrics: peerMetricsConfig, - OpsrampKey: opsrampkey, - OpsrampSecret: opsrampsecret, + AuthTokenEndpoint: authConfig.Endpoint, + AuthTokenKey: authConfig.Key, + AuthTokenSecret: authConfig.Secret, ApiHost: opsrampapi, + TenantId: authConfig.TenantId, }, }) if err != nil { diff --git a/config/config.go b/config/config.go index 9ee23b2266..702fe40df6 100644 --- a/config/config.go +++ b/config/config.go @@ -116,8 +116,8 @@ type Config interface { // GetLogrusConfig returns the config specific to Logrus GetLogrusConfig() (*LogrusLoggerConfig, error) - // GetOpsRampMetricsConfig returns the config specific to PrometheusMetrics - GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) + // GetMetricsConfig returns the config specific to PrometheusMetrics + GetMetricsConfig() MetricsConfig // GetUpstreamBufferSize returns the size of the libtrace buffer to use for the upstream // libtrace client @@ -153,29 +153,11 @@ type Config interface { // GetUseTLSInsecureSkip returns false when certificate checks are disabled GetGlobalUseTLSInsecureSkip() bool - // GetProxyProtocol returns protocol on which to listen for - // proxy traffic - GetProxyProtocol() (string, error) + // GetProxyConfig returns proxy configuration + GetProxyConfig() ProxyConfiguration - // GetProxyServer returns the address on which to listen for - // proxy traffic - GetProxyServer() (string, error) - - // GetProxyPort returns the port on which to listen for - // proxy traffic - GetProxyPort() int64 - - // GetProxyUsername returns the username on which to listen for - // proxy traffic - GetProxyUsername() (string, error) - - // GetProxyPassword returns the password of proxy user on which to listen for - // proxy traffic - GetProxyPassword() (string, error) - - GetOpsrampKey() (string, error) - - GetOpsrampSecret() (string, error) + // GetAuthConfig return the authentication configuration + GetAuthConfig() AuthConfiguration GetTenantId() (string, error) diff --git a/config/config_test.go b/config/config_test.go index ecd516e73c..65ecacfccb 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -106,7 +106,7 @@ func TestMetricsAPIKeyEnvVar(t *testing.T) { t.Error(err) } - if d, _ := c.GetOpsRampMetricsConfig(); d.OpsRampMetricsAPIKey != tc.key { + if d := c.GetAuthConfig(); d.Key != tc.key { t.Error("received", d, "expected", tc.key) } }) @@ -130,7 +130,7 @@ func TestMetricsAPIKeyMultipleEnvVar(t *testing.T) { t.Error(err) } - if d, _ := c.GetOpsRampMetricsConfig(); d.OpsRampMetricsAPIKey != specificKey { + if d := c.GetAuthConfig(); d.Key != specificKey { t.Error("received", d, "expected", specificKey) } } @@ -147,7 +147,7 @@ func TestMetricsAPIKeyFallbackEnvVar(t *testing.T) { t.Error(err) } - if d, _ := c.GetOpsRampMetricsConfig(); d.OpsRampMetricsAPIKey != key { + if d := c.GetAuthConfig(); d.Key != key { t.Error("received", d, "expected", key) } } diff --git a/config/file_config.go b/config/file_config.go index c8b829582d..7135471556 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -20,6 +20,10 @@ import ( "github.com/spf13/viper" ) +const ( + DefaultDataset = "ds" +) + type fileConfig struct { config *viper.Viper rules *viper.Viper @@ -37,9 +41,6 @@ type configContents struct { GRPCListenAddr string GRPCPeerListenAddr string OpsrampAPI string `validate:"required,url"` - OpsrampKey string - OpsrampSecret string - TenantId string Dataset string LoggingLevel string `validate:"required"` Collector string `validate:"required,oneof= InMemCollector"` @@ -67,14 +68,28 @@ type configContents struct { CacheOverrunStrategy string SampleCache SampleCacheConfig `validate:"required"` - SendMetricsToOpsRamp bool - UseTls bool - UseTlsInSecure bool - ProxyProtocol string - ProxyServer string - ProxyPort int64 - ProxyUsername string - ProxyPassword string + UseTls bool + UseTlsInSecure bool + + ProxyConfiguration + AuthConfiguration + MetricsConfig +} + +type ProxyConfiguration struct { + Protocol string + Host string + Port int64 + Username string + Password string +} + +type AuthConfiguration struct { + SkipAuth bool + Endpoint string `validate:"url"` + Key string + Secret string + TenantId string } type InMemoryCollectorCacheCapacity struct { @@ -94,20 +109,13 @@ type LogrusLoggerConfig struct { } `toml:"File"` } -type OpsRampMetricsConfig struct { - MetricsListenAddr string `validate:"required"` - OpsRampMetricsAPI string - OpsRampTenantID string - OpsRampMetricsAPIKey string - OpsRampMetricsAPISecret string - OpsRampMetricsReportingInterval int64 - OpsRampMetricsRetryCount int64 - ProxyProtocol string - ProxyServer string - ProxyPort int64 - ProxyUserName string - ProxyPassword string - OpsRampMetricsList []string +type MetricsConfig struct { + Enable bool `validate:"required"` + ListenAddr string `validate:"required"` + OpsRampAPI string + ReportingInterval int64 + RetryCount int64 + MetricsList []string } type PeerManagementConfig struct { @@ -147,29 +155,19 @@ type GRPCServerParameters struct { func NewConfig(config, rules string, errorCallback func(error)) (Config, error) { c := viper.New() - c.BindEnv("GRPCListenAddr", "TRACE_PROXY_GRPC_LISTEN_ADDRESS") - c.BindEnv("PeerManagement.RedisHost", "TRACE_PROXY_REDIS_HOST") - c.BindEnv("PeerManagement.RedisUsername", "TRACE_PROXY_REDIS_USERNAME") - c.BindEnv("PeerManagement.RedisPassword", "TRACE_PROXY_REDIS_PASSWORD") - c.BindEnv("QueryAuthToken", "TRACE_PROXY_QUERY_AUTH_TOKEN") - - c.SetDefault("ListenAddr", "0.0.0.0:8080") - c.SetDefault("PeerListenAddr", "0.0.0.0:8081") + c.SetDefault("ListenAddr", "0.0.0.0:8082") + c.SetDefault("PeerListenAddr", "0.0.0.0:8083") c.SetDefault("CompressPeerCommunication", true) - c.SetDefault("APIKeys", []string{"*"}) - c.SetDefault("PeerManagement.Peers", []string{"http://127.0.0.1:8081"}) + c.SetDefault("PeerManagement.Peers", []string{"http://127.0.0.1:8082"}) c.SetDefault("PeerManagement.Type", "file") c.SetDefault("PeerManagement.UseTLS", false) c.SetDefault("PeerManagement.UseTLSInsecure", false) c.SetDefault("PeerManagement.UseIPV6Identifier", false) c.SetDefault("OpsrampAPI", "") - c.SetDefault("OpsrampKey", "") - c.SetDefault("OpsrampSecret", "") - c.SetDefault("TenantId", "") - c.SetDefault("Dataset", "ds") + c.SetDefault("Dataset", DefaultDataset) c.SetDefault("PeerManagement.Timeout", 5*time.Second) c.SetDefault("PeerManagement.Strategy", "legacy") - c.SetDefault("LoggingLevel", "debug") + c.SetDefault("LoggingLevel", "info") c.SetDefault("Collector", "InMemCollector") c.SetDefault("SendDelay", 2*time.Second) c.SetDefault("BatchTimeout", libtrace.DefaultBatchTimeout) @@ -194,12 +192,16 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("SampleCache.KeptSize", 10_000) c.SetDefault("SampleCache.DroppedSize", 1_000_000) c.SetDefault("SampleCache.SizeCheckInterval", 10*time.Second) - c.SetDefault("SendMetricsToOpsRamp", false) - c.SetDefault("ProxyProtocol", "") - c.SetDefault("ProxyServer", "") - c.SetDefault("ProxyPort", int64(0)) - c.SetDefault("ProxyUsername", "") - c.SetDefault("ProxyPassword", "") + + // AuthConfig Defaults + c.SetDefault("AuthConfiguration.SkipAuth", false) + + // MetricsConfig Defaults + c.SetDefault("MetricsConfig.Enable", false) + c.SetDefault("MetricsConfig.ListenAddr", "0.0.0.0:2112") + c.SetDefault("MetricsConfig.ReportingInterval", 10) + c.SetDefault("MetricsConfig.RetryCount", 2) + c.SetDefault("MetricsConfig.MetricsList", []string{".*"}) c.SetConfigFile(config) err := c.ReadInConfig() @@ -310,10 +312,17 @@ func (f *fileConfig) validateGeneralConfigs() error { f.lastLoadTime = time.Now() // validate metrics config - _, err := f.GetOpsRampMetricsConfig() - if err != nil { - return err + metricsConfig := f.GetMetricsConfig() + if metricsConfig.RetryCount < 0 || metricsConfig.RetryCount > 10 { + return fmt.Errorf("metrics retry count %d invalid, must be in range 1-10", metricsConfig.RetryCount) } + if metricsConfig.ReportingInterval < 10 { + return fmt.Errorf("mertics reporting interval %d not allowed, must be >= 10", metricsConfig.ReportingInterval) + } + if len(metricsConfig.MetricsList) < 1 { + return fmt.Errorf("mertics list cant be empty") + } + return nil } @@ -513,30 +522,11 @@ func (f *fileConfig) GetRedisDatabase() int { return f.config.GetInt("PeerManagement.RedisDatabase") } -func (f *fileConfig) GetProxyProtocol() (string, error) { +func (f *fileConfig) GetProxyConfig() ProxyConfiguration { f.mux.RLock() defer f.mux.RUnlock() - return f.conf.ProxyProtocol, nil -} -func (f *fileConfig) GetProxyServer() (string, error) { - f.mux.RLock() - defer f.mux.RUnlock() - return f.conf.ProxyServer, nil -} -func (f *fileConfig) GetProxyPort() int64 { - f.mux.RLock() - defer f.mux.RUnlock() - return f.conf.ProxyPort -} -func (f *fileConfig) GetProxyUsername() (string, error) { - f.mux.RLock() - defer f.mux.RUnlock() - return f.conf.ProxyUsername, nil -} -func (f *fileConfig) GetProxyPassword() (string, error) { - f.mux.RLock() - defer f.mux.RUnlock() - return f.conf.ProxyPassword, nil + + return f.conf.ProxyConfiguration } func (f *fileConfig) GetUseTLS() (bool, error) { @@ -586,18 +576,11 @@ func (f *fileConfig) GetOpsrampAPI() (string, error) { return fmt.Sprintf("%s://%s", u.Scheme, u.Hostname()), nil } -func (f *fileConfig) GetOpsrampKey() (string, error) { - f.mux.RLock() - defer f.mux.RUnlock() - - return f.conf.OpsrampKey, nil -} - -func (f *fileConfig) GetOpsrampSecret() (string, error) { +func (f *fileConfig) GetAuthConfig() AuthConfiguration { f.mux.RLock() defer f.mux.RUnlock() - return f.conf.OpsrampSecret, nil + return f.conf.AuthConfiguration } func (f *fileConfig) GetDataset() (string, error) { @@ -611,7 +594,7 @@ func (f *fileConfig) GetTenantId() (string, error) { f.mux.RLock() defer f.mux.RUnlock() - return f.conf.TenantId, nil + return f.conf.AuthConfiguration.TenantId, nil } func (f *fileConfig) GetLoggingLevel() (string, error) { @@ -757,68 +740,11 @@ func (f *fileConfig) GetLogrusConfig() (*LogrusLoggerConfig, error) { return nil, errors.New("No config found for LogrusConfig") } -func (f *fileConfig) GetOpsRampMetricsConfig() (*OpsRampMetricsConfig, error) { +func (f *fileConfig) GetMetricsConfig() MetricsConfig { f.mux.RLock() defer f.mux.RUnlock() - opsRampMetricsConfig := &OpsRampMetricsConfig{} - - if sub := f.config.Sub("OpsRampMetrics"); sub != nil { - err := sub.UnmarshalExact(opsRampMetricsConfig) - if err != nil { - return opsRampMetricsConfig, err - } - - if opsRampMetricsConfig.OpsRampMetricsRetryCount < 0 || opsRampMetricsConfig.OpsRampMetricsRetryCount > 10 { - opsRampMetricsConfig.OpsRampMetricsRetryCount = 2 - } - - if opsRampMetricsConfig.OpsRampMetricsReportingInterval < 10 { - opsRampMetricsConfig.OpsRampMetricsReportingInterval = 10 - } - - if len(opsRampMetricsConfig.OpsRampMetricsList) < 1 { - opsRampMetricsConfig.OpsRampMetricsList = []string{".*"} - } - - // setting values from main configurations when OpsRampMetrics is empty - if opsRampMetricsConfig.OpsRampMetricsAPI == "" { - opsRampMetricsConfig.OpsRampMetricsAPI = f.conf.OpsrampAPI - } - if opsRampMetricsConfig.OpsRampMetricsAPIKey == "" { - opsRampMetricsConfig.OpsRampMetricsAPIKey = f.conf.OpsrampKey - } - if opsRampMetricsConfig.OpsRampMetricsAPISecret == "" { - opsRampMetricsConfig.OpsRampMetricsAPISecret = f.conf.OpsrampSecret - } - if opsRampMetricsConfig.OpsRampTenantID == "" { - opsRampMetricsConfig.OpsRampTenantID = f.conf.TenantId - } - if opsRampMetricsConfig.ProxyServer == "" { - opsRampMetricsConfig.ProxyServer = f.conf.ProxyServer - } - if opsRampMetricsConfig.ProxyPort <= 0 { - opsRampMetricsConfig.ProxyPort = f.conf.ProxyPort - } - if opsRampMetricsConfig.ProxyProtocol != "" { - opsRampMetricsConfig.ProxyProtocol = f.conf.ProxyProtocol - } - if opsRampMetricsConfig.ProxyUserName != "" { - opsRampMetricsConfig.ProxyUserName = f.conf.ProxyUsername - } - if opsRampMetricsConfig.ProxyPassword != "" { - opsRampMetricsConfig.ProxyPassword = f.conf.ProxyPassword - } - - v := validator.New() - err = v.Struct(opsRampMetricsConfig) - if err != nil { - return opsRampMetricsConfig, err - } - - return opsRampMetricsConfig, nil - } - return nil, errors.New("No config found for OpsRampMetrics") + return f.conf.MetricsConfig } func (f *fileConfig) GetSendDelay() (time.Duration, error) { @@ -921,7 +847,7 @@ func (f *fileConfig) GetSendMetricsToOpsRamp() bool { f.mux.RLock() defer f.mux.RUnlock() - return f.conf.SendMetricsToOpsRamp + return f.conf.MetricsConfig.Enable } func (f *fileConfig) GetGlobalUseTLS() bool { diff --git a/config/mock.go b/config/mock.go index 8c47e9112c..9611efb03a 100644 --- a/config/mock.go +++ b/config/mock.go @@ -50,7 +50,7 @@ type MockConfig struct { GetMetricsTypeErr error GetMetricsTypeVal string GetOpsRampMetricsConfigErr error - GetOpsRampMetricsConfigVal OpsRampMetricsConfig + GetOpsRampMetricsConfigVal MetricsConfig GetSendDelayErr error GetSendDelayVal time.Duration GetBatchTimeoutVal time.Duration @@ -233,7 +233,7 @@ func (m *MockConfig) GetMetricsType() (string, error) { return m.GetMetricsTypeVal, m.GetMetricsTypeErr } -func (m *MockConfig) GetPrometheusMetricsConfig() (OpsRampMetricsConfig, error) { +func (m *MockConfig) GetPrometheusMetricsConfig() (MetricsConfig, error) { m.Mux.RLock() defer m.Mux.RUnlock() diff --git a/config_complete.toml b/config_complete.toml deleted file mode 100644 index 5d20659ecf..0000000000 --- a/config_complete.toml +++ /dev/null @@ -1,512 +0,0 @@ -##################### -## Tracing-proxy Config ## -##################### - -# ListenAddr is the IP and port on which to listen for incoming events. Incoming -# traffic is expected to be HTTP, so if using SSL put something like nginx in -# front to do the decryption. -# Should be of the form 0.0.0.0:8082 -# Not eligible for live reload. -ListenAddr = "0.0.0.0:8082" - -# GRPCListenAddr is the IP and port on which to listen for incoming events over -# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put -# something like nginx in front to do the decryption. -# Should be of the form 0.0.0.0:9090 -# Not eligible for live reload. -GRPCListenAddr = "0.0.0.0:9090" - -# PeerListenAddr is the IP and port on which to listen for traffic being -# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL -# put something like nginx in front to do the decryption. Must be different from -# ListenAddr -# Should be of the form 0.0.0.0:8081 -# Not eligible for live reload. -PeerListenAddr = "0.0.0.0:8083" -GRPCPeerListenAddr = "0.0.0.0:8084" - -# ProxyProtocol accepts http and https -# Not Eligible for live reload. -ProxyProtocol = "" - -# ProxyServer takes the proxy server address -# Not Eligible for live reload. -ProxyServer = "" - -# ProxyPort takes the proxy server port -# Not Eligible for live reload. -ProxyPort = 0 - -# ProxyUserName takes the proxy username -# Not Eligible for live reload. -ProxyUserName = "" - -# ProxyPassword takes the proxy password -# Not Eligible for live reload. -ProxyPassword = "" - -# CompressPeerCommunication determines whether tracin will compress span data -# it forwards to peers. If it costs money to transmit data between tracin -# instances (e.g. they're spread across AWS availability zones), then you -# almost certainly want compression enabled to reduce your bill. The option to -# disable it is provided as an escape hatch for deployments that value lower CPU -# utilization over data transfer costs. -CompressPeerCommunication = true - -# OpsrampAPI is the URL for the upstream Opsramp API. -# Eligible for live reload. -#OpsrampAPI = "localhost:50052" -OpsrampAPI = - -# OpsrampKey is used to get the OauthToken -OpsrampKey = - -# OpsrampSecret is used to get the OauthToken -OpsrampSecret = - -# Traces are send to the client with given tenantid -TenantId = - -# Dataset you want to use for sampling -Dataset = "ds" - -#Tls Options -UseTls = true -UseTlsInsecure = false - -# SendDelay is a short timer that will be triggered when a trace is complete. -# Tracing-proxy will wait this duration before actually sending the trace. The -# reason for this short delay is to allow for small network delays or clock -# jitters to elapse and any final spans to arrive before actually sending the -# trace. This supports duration strings with supplied units. Set to 0 for -# immediate sends. -# Eligible for live reload. -SendDelay = "2s" - -# BatchTimeout dictates how frequently to send unfulfilled batches. By default -# this will use the DefaultBatchTimeout in libhoney as its value, which is 100ms. -# Eligible for live reload. -BatchTimeout = "1s" - -# TraceTimeout is a long timer; it represents the outside boundary of how long -# to wait before sending an incomplete trace. Normally traces are sent when the -# root span arrives. Sometimes the root span never arrives (due to crashes or -# whatever), and this timer will send a trace even without having received the -# root span. If you have particularly long-lived traces you should increase this -# timer. This supports duration strings with supplied units. -# Eligible for live reload. -TraceTimeout = "60s" - -# MaxBatchSize is the number of events to be included in the batch for sending -MaxBatchSize = 500 - -# SendTicker is a short timer; it determines the duration to use to check for traces to send -SendTicker = "100ms" - -# LoggingLevel is the level above which we should log. Debug is very verbose, -# and should only be used in pre-production environments. Info is the -# recommended level. Valid options are "debug", "info", "error", and -# "panic" -# Not eligible for live reload. -LoggingLevel = "error" - -# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use -# when buffering events that will be forwarded to peers or the upstream API. -UpstreamBufferSize = 10000 -PeerBufferSize = 10000 - -# DebugServiceAddr sets the IP and port the debug service will run on -# The debug service will only run if the command line flag -d is specified -# The debug service runs on the first open port between localhost:6060 and :6069 by default -# DebugServiceAddr = "localhost:8085" - -# AddHostMetadataToTrace determines whether or not to add information about -# the host that Tracing-proxy is running on to the spans that it processes. -# If enabled, information about the host will be added to each span with the -# prefix `meta.tracing-procy.`. -# Currently the only value added is 'meta.tracing-proxy.local_hostname'. -# Not eligible for live reload -AddHostMetadataToTrace = false - -# EnvironmentCacheTTL is the amount of time a cache entry will live that associates -# an API key with an environment name. -# Cache misses lookup the environment name using OpsrampAPI config value. -# Default is 1 hour ("1h"). -# Not eligible for live reload. -EnvironmentCacheTTL = "1h" - -# QueryAuthToken, if specified, provides a token that must be specified with -# the header "X-Opsramp-Tracing-proxy-Query" in order for a /query request to succeed. -# These /query requests are intended for debugging tracin installations and -# are not typically needed in normal operation. -# Can be specified in the environment as tracing-proxy_QUERY_AUTH_TOKEN. -# If left unspecified, the /query endpoints are inaccessible. -# Not eligible for live reload. -# QueryAuthToken = "some-random-value" - -# AddRuleReasonToTrace causes traces that are sent to Opsramp to include the field `meta.tracin.reason`. -# This field contains text indicating which rule was evaluated that caused the trace to be included. -# Eligible for live reload. -# AddRuleReasonToTrace = true - -# AdditionalErrorFields should be a list of span fields that should be included when logging -# errors that happen during ingestion of events (for example, the span too large error). -# This is primarily useful in trying to track down misbehaving senders in a large installation. -# The fields `dataset`, `apihost`, and `environment` are always included. -# If a field is not present in the span, it will not be present in the error log. -# Default is ["trace.span_id"]. -# Eligible for live reload. -AdditionalErrorFields = [ - "trace.span_id" -] - -# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate -# the number of child spans on the trace at the time the sampling decision was made. -# This value is available to the rules-based sampler, making it possible to write rules that -# are dependent upon the number of spans in the trace. -# Default is false. -# Eligible for live reload. -# AddSpanCountToRoot = true - -# CacheOverrunStrategy controls the cache management behavior under memory pressure. -# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, -# which is generally not helpful unless it occurs because of a permanent change in traffic patterns. -# In the "impact" strategy, the items having the most impact on the cache size are -# ejected from the cache earlier than normal but the cache is not resized. -# In all cases, it only applies if MaxAlloc is nonzero. -# Default is "resize" for compatibility but "impact" is recommended for most installations. -# Eligible for live reload. -# CacheOverrunStrategy = "impact" - -# Metrics are sent to OpsRamp (The collection happens based on configuration specifie -# in OpsRampMetrics and only works when the Metrics is set to "prometheus") -SendMetricsToOpsRamp = false - -############################ -## Implementation Choices ## -############################ - -# Each of the config options below chooses an implementation of a Tracing-proxy -# component to use. Depending on the choice there may be more configuration -# required below in the section for that choice. Changing implementation choices -# requires a process restart; these changes will not be picked up by a live -# config reload. (Individual config options for a given implementation may be -# eligible for live reload). - -# Collector describes which collector to use for collecting traces. The only -# current valid option is "InMemCollector".. More can be added by adding -# implementations of the Collector interface. -Collector = "InMemCollector" - -######################### -## Peer Management ## -######################### - -[PeerManagement] -Type = "file" -## Peers is the list of all servers participating in this proxy cluster. Events -## will be sharded evenly across all peers based on the Trace ID. Values here -## should be the base URL used to access the peer, and should include scheme, -## hostname (or ip address) and port. All servers in the cluster should be in -## this list, including this host. -Peers = [ - "http://127.0.0.1:8084", #only grpc peer listener used -# # "http://127.0.0.1:8083", -# # "http://10.1.2.3.4:8080", -# # "http://tracin-1231:8080", -# # "http://peer-3.fqdn" // assumes port 80 -] - -#[PeerManagement] -#Type = "redis" -# RedisHost is is used to connect to redis for peer cluster membership management. -# Further, if the environment variable 'Tracing_Proxy_REDIS_HOST' is set it takes -# precedence and this value is ignored. -# Not eligible for live reload. -#RedisHost = "redis:22122" - -# RedisUsername is the username used to connect to redis for peer cluster membership management. -# If the environment variable 'Tracing_Proxy_REDIS_USERNAME' is set it takes -# precedence and this value is ignored. -# Not eligible for live reload. -# RedisUsername = "" - -# RedisPassword is the password used to connect to redis for peer cluster membership management. -# If the environment variable 'Tracing_Proxy_REDIS_PASSWORD' is set it takes -# precedence and this value is ignored. -# Not eligible for live reload. -# RedisPassword = "" - -# RedisPrefix is a string used as a prefix for the keys in redis while storing -# the peer membership. It might be useful to set this in any situation where -# multiple tracing-proxy clusters or multiple applications want to share a single -# Redis instance. If not set then "tracing-proxy" is used as prefix -# RedisPrefix = "customPrefix" - -# RedisDatabase is an integer from 0-15 indicating the database number to use -# for the Redis instance storing the peer membership. It might be useful to set -# this in any situation where multiple trace-proxy clusters or multiple -# applications want to share a single Redis instance. if not set Default = 0 -# RedisDatabase = 1 - -# UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. -# Not eligible for live reload. -# UseTLS = false - -# UseTLSInsecure disables certificate checks -# Not eligible for live reload. -# UseTLSInsecure = false - -# IdentifierInterfaceName is optional. By default, when using RedisHost, Tracing-proxy will use -# the local hostname to identify itself to other peers in Redis. If your environment -# requires that you use IPs as identifiers (for example, if peers can't resolve eachother -# by name), you can specify the network interface that Tracing-proxy is listening on here. -# Tracing-proxy will use the first unicast address that it finds on the specified network -# interface as its identifier. -# Not eligible for live reload. -# IdentifierInterfaceName = "eth0" - -# UseIPV6Identifier is optional. If using IdentifierInterfaceName, Tracing-proxy will default to the first -# IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use -# the first IPV6 unicast address found. -# UseIPV6Identifier = false - -# RedisIdentifier is optional. By default, when using RedisHost, Tracing-proxy will use -# the local hostname to id788714cd-a17a-4d7e-9bac-c35131f4bcc2entify itself to other peers in Redis. If your environment -# requires that you use IPs as identifiers (for example, if peers can't resolve eachother -# by name), you can specify the exact identifier (IP address, etc) to use here. -# Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. -# RedisIdentifier = "192.168.1.1" - -# Timeout is optional. By default, when using RedisHost, Tracing-proxy will timeout -# after 5s when communicating with Redis. -# Timeout = "5s" - -# Strategy controls the way that traces are assigned to tracin nodes. -# The "legacy" strategy uses a simple algorithm that unfortunately causes -# 1/2 of the in-flight traces to be assigned to a different node whenever the -# number of nodes changes. -# The legacy strategy is deprecated and is intended to be removed in a future release. -# The "hash" strategy is strongly recommended, as only 1/N traces (where N is the -# number of nodes) are disrupted when the node count changes. -# Not eligible for live reload. -Strategy = "hash" - -######################### -## In-Memory Collector ## -######################### - -# InMemCollector brings together all the settings that are relevant to -# collecting spans together to make traces. -[InMemCollector] - -# The collection cache is used to collect all spans into a trace as well as -# remember the sampling decision for any spans that might come in after the -# trace has been marked "complete" (either by timing out or seeing the root -# span). The number of traces in the cache should be many multiples (100x to -# 1000x) of the total number of concurrently active traces (trace throughput * -# trace duration). -# Eligible for live reload. Growing the cache capacity with a live config reload -# is fine. Avoid shrinking it with a live reload (you can, but it may cause -# temporary odd sampling decisions). -CacheCapacity = 1000 - -# MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are -# supported. -# If set to a non-zero value, once per tick (see SendTicker) the collector -# will compare total allocated bytes to this value. If allocation is too -# high, cache capacity will be adjusted according to the setting for -# CacheOverrunStrategy. -# Useful values for this setting are generally in the range of 75%-90% of -# available system memory. -MaxAlloc = 0 - -################### -## Logrus Logger ## -################### - -# LogrusLogger is a section of the config only used if you are using the -# LogrusLogger to send all logs to STDOUT using the logrus package. If you are -# using a different logger (eg Opsramp logger) you can leave all this -# commented out. -[LogrusLogger] - -# LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] -LogFormatter = "logfmt" - -# LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] -LogOutput = "file" - -## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" -[LogrusLogger.File] - -# FileName specifies the location where the logs are supposed be stored -FileName = "/var/log/opsramp/tracing-proxy.log" - -# MaxSize is the maximum size in megabytes of the log file before it gets rotated. -MaxSize = 1 - -# MaxBackups is the maximum number of old log files to retain. -MaxBackups = 3 - -# Compress determines if the rotated log files should be compressed -# using gzip. -Compress = true - - -####################### -## Prometheus Metrics ## -####################### - -[OpsRampMetrics] -# MetricsListenAddr determines the interface and port on which Prometheus will -# listen for requests for /metrics. Must be different from the main Tracing-proxy -# listener. -# Not eligible for live reload. -MetricsListenAddr = "localhost:2112" - -# OpsRampMetricsAPI is the URL for the upstream OpsRamp API. -# Not Eligible for live reload. -OpsRampMetricsAPI = - -# OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. -# Not Eligible for live reload. -OpsRampTenantID = - -# OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. -# This is separate from the APIKeys used to authenticate regular -# traffic. -# Not Eligible for live reload. -OpsRampMetricsAPIKey = - -# OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. -# This is separate from the APISecret used to authenticate regular -# traffic. -# Not Eligible for live reload. -OpsRampMetricsAPISecret = - -# OpsRampMetricsReportingInterval is frequency specified in seconds at which -# the metrics are collected and sent to OpsRamp -# Not Eligible for live reload. -OpsRampMetricsReportingInterval = 10 - -# OpsRampMetricsRetryCount is the number of times we retry incase the send fails -# Not Eligible for live reload. -OpsRampMetricsRetryCount = 2 - -# ProxyProtocol accepts http and https -# Not Eligible for live reload. -ProxyProtocol = "" - -# ProxyServer takes the proxy server address -# Not Eligible for live reload. -ProxyServer = "" - -# ProxyPort takes the proxy server port -# Not Eligible for live reload. -ProxyPort = 3128 - -# ProxyUserName takes the proxy username -# Not Eligible for live reload. -ProxyUserName = "" - -# ProxyPassword takes the proxy password -# Not Eligible for live reload. -ProxyPassword = "" - -# OpsRampMetricsList is a list of regular expressions which match the metric -# names. Keep the list as small as possible since too many regular expressions can lead to bad performance. -# Internally all the regex in the list are concatinated using '|' to make the computation little faster. -# Not Eligible for live reload -OpsRampMetricsList = [".*"] - - -[GRPCServerParameters] - -# MaxConnectionIdle is a duration for the amount of time after which an -# idle connection would be closed by sending a GoAway. Idleness duration is -# defined since the most recent time the number of outstanding RPCs became -# zero or the connection establishment. -# 0s sets duration to infinity which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 -# Not eligible for live reload. -# MaxConnectionIdle = "1m" - -# MaxConnectionAge is a duration for the maximum amount of time a -# connection may exist before it will be closed by sending a GoAway. A -# random jitter of +/-10% will be added to MaxConnectionAge to spread out -# connection storms. -# 0s sets duration to infinity which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 -# Not eligible for live reload. -# MaxConnectionAge = "0s" - -# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after -# which the connection will be forcibly closed. -# 0s sets duration to infinity which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 -# Not eligible for live reload. -# MaxConnectionAgeGrace = "0s" - -# After a duration of this time if the server doesn't see any activity it -# pings the client to see if the transport is still alive. -# If set below 1s, a minimum value of 1s will be used instead. -# 0s sets duration to 2 hours which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 -# Not eligible for live reload. -# Time = "10s" - -# After having pinged for keepalive check, the server waits for a duration -# of Timeout and if no activity is seen even after that the connection is -# closed. -# 0s sets duration to 20 seconds which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 -# Not eligible for live reload. -# Timeout = "2s" - - - -################################ -## Sample Cache Configuration ## -################################ - -# Sample Cache Configuration controls the sample cache used to retain information about trace -# status after the sampling decision has been made. - -[SampleCacheConfig] - -# Type controls the type of sample cache used. -# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is -# 5x the size of the trace cache. This is Tracing-proxy's original sample cache strategy. -# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember -# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. -# It is also more configurable. The cuckoo filter is recommended for most installations. -# Default is "legacy". -# Not eligible for live reload (you cannot change the type of cache with reload). -# Type = "cuckoo" - -# KeptSize controls the number of traces preserved in the cuckoo kept traces cache. -# Tracing-proxy keeps a record of each trace that was kept and sent to Opsramp, along with some -# statistical information. This is most useful in cases where the trace was sent before sending -# the root span, so that the root span can be decorated with accurate metadata. -# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). -# Does not apply to the "legacy" type of cache. -# Eligible for live reload. -# KeptSize = 10_000 - -# DroppedSize controls the size of the cuckoo dropped traces cache. -# This cache consumes 4-6 bytes per trace at a scale of millions of traces. -# Changing its size with live reload sets a future limit, but does not have an immediate effect. -# Default is 1_000_000 traces. -# Does not apply to the "legacy" type of cache. -# Eligible for live reload. -# DroppedSize = 1_000_000 - -# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates -# the remaining capacity of its dropped traces cache and possibly cycles it. -# This cache is quite resilient so it doesn't need to happen very often, but the -# operation is also inexpensive. -# Default is 10 seconds. -# Does not apply to the "legacy" type of cache. -# Eligible for live reload. -# SizeCheckInterval = "10s" diff --git a/config_complete.yaml b/config_complete.yaml new file mode 100644 index 0000000000..11de94b45f --- /dev/null +++ b/config_complete.yaml @@ -0,0 +1,403 @@ +######################## +## Trace Proxy Config ## +######################## + +# ListenAddr is the IP and port on which to listen for incoming events. Incoming +# traffic is expected to be HTTP, so if using SSL put something like nginx in +# front to do the TLS Termination. +ListenAddr: 0.0.0.0:8082 + +# GRPCListenAddr is the IP and port on which to listen for incoming events over +# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in +# front to do the TLS Termination. +GRPCListenAddr: 0.0.0.0:4317 + +# PeerListenAddr is the IP and port on which to listen for traffic being +# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL +# put something like nginx in front to do the decryption. Must be different from +# ListenAddr +PeerListenAddr: 0.0.0.0:8083 + +GRPCPeerListenAddr: 0.0.0.0:8084 + +# CompressPeerCommunication determines whether to compress span data +# it forwards to peers. If it costs money to transmit data between different +# instances (e.g. they're spread across AWS availability zones), then you +# almost certainly want compression enabled to reduce your bill. The option to +# disable it is provided as an escape hatch for deployments that value lower CPU +# utilization over data transfer costs. +CompressPeerCommunication: true + +# OpsrampAPI is the URL for the upstream Opsramp API. +OpsrampAPI: "" + +# Dataset you want to use for sampling +Dataset: "ds" + +#Tls Options +UseTls: true +UseTlsInsecure: false + +# LoggingLevel valid options are "debug", "info", "error", and "panic". +LoggingLevel: error + +# SendDelay is a short timer that will be triggered when a trace is complete. +# Trace Proxy will wait for this duration before actually sending the trace. The +# reason for this short delay is to allow for small network delays or clock +# jitters to elapse and any final spans to arrive before actually sending the +# trace. This supports duration strings with supplied units. Set to 0 for +# immediate sends. +SendDelay: 2s + +# BatchTimeout dictates how frequently to send unfulfilled batches. By default +# this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms. +# Eligible for live reload. +BatchTimeout: 1s + +# TraceTimeout is a long timer; it represents the outside boundary of how long +# to wait before sending an incomplete trace. Normally traces are sent when the +# root span arrives. Sometimes the root span never arrives (due to crashes or +# whatever), and this timer will send a trace even without having received the +# root span. If you have particularly long-lived traces you should increase this +# timer. This supports duration strings with supplied units. +TraceTimeout: 60s + +# MaxBatchSize is the number of events to be included in the batch for sending +MaxBatchSize: 500 + +# SendTicker is a short timer; it determines the duration to use to check for traces to send +SendTicker: 100ms + +# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use +# when buffering events that will be forwarded to peers or the upstream API. +UpstreamBufferSize: 1000 +PeerBufferSize: 1000 + +# AddHostMetadataToTrace determines whether to add information about +# the host that Refinery is running on to the spans that it processes. +# If enabled, information about the host will be added to each span with the +# prefix `meta.refinery.`. +# Currently, the only value added is 'meta.refinery.local_hostname'. +AddHostMetadataToTrace: false + +# EnvironmentCacheTTL is the amount of time a cache entry will live that associates +# an API key with an environment name. +# Cache misses lookup the environment name using OpsRampAPI config value. +# Default is 1 hour ("1h"). +EnvironmentCacheTTL: "1h" + +# QueryAuthToken, if specified, provides a token that must be specified with +# the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed. +# These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and +# are not typically needed in normal operation. +# Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN. +# If left unspecified, the /query endpoints are inaccessible. +# QueryAuthToken: "some-random-value" + +# AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which +# contains text indicating which rule was evaluated that caused the trace to be included. +AddRuleReasonToTrace: true + +# AdditionalErrorFields should be a list of span fields that should be included when logging +# errors that happen during ingestion of events (for example, the span too large error). +# This is primarily useful in trying to track down misbehaving senders in a large installation. +# The fields `dataset`, `apihost`, and `environment` are always included. +# If a field is not present in the span, it will not be present in the error log. +# Default is ["trace.span_id"]. +AdditionalErrorFields: + - trace.span_id + +# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate +# the number of child spans on the trace at the time the sampling decision was made. +# This value is available to the rules-based sampler, making it possible to write rules that +# are dependent upon the number of spans in the trace. +# Default is false. +AddSpanCountToRoot: false + +# CacheOverrunStrategy controls the cache management behavior under memory pressure. +# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, +# which is generally not helpful unless it occurs because of a permanent change in traffic patterns. +# In the "impact" strategy, the items having the most impact on the cache size are +# ejected from the cache earlier than normal but the cache is not resized. +# In all cases, it only applies if MaxAlloc is nonzero. +# Default is "resize" for compatibility but "impact" is recommended for most installations. +CacheOverrunStrategy: "impact" + +######################### +## Proxy Configuration ## +######################### +ProxyConfiguration: + # Protocol accepts http and https + Protocol: "http" + # Host takes the proxy server address + Host: "" + # Port takes the proxy server port + Port: 3128 + # UserName takes the proxy username + Username: "" + # Password takes the proxy password + Password: "" + +################################## +## Authentication Configuration ## +################################## +AuthConfiguration: + # SkipAuth - skips authentication while sending requests (only to be used for debugging) + SkipAuth: false + + # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made + Endpoint: "" + # Key - authentication key provided in OpsRamp Portal + Key: "" + # Secret - authentication Secret provided in OpsRamp Portal + Secret: "" + # TenantId - tenant/client id to which the traces are to be posted + TenantId: "" + +############################ +## Implementation Choices ## +############################ +# Each of the config options below chooses an implementation of a Trace Proxy +# component to use. Depending on the choice, there may be more configuration +# required below in the section for that choice. Changing implementation choices +# requires a process restart. +# Collector describes which collector to use for collecting traces. The only +# current valid option is "InMemCollector". More can be added by adding +# implementations of the Collector interface. +Collector: "InMemCollector" + +# InMemCollector brings together all the settings that are relevant to +# collecting spans together to make traces. +InMemCollector: + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + CacheCapacity: 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. Using 80% is the recommended. + # This value should be set in according to the resources.limits.memory + # By default that setting is 4GB, and this is set to 85% of that limit + # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 + # MaxAlloc: 3435973836 + MaxAlloc: 0 + +##################### +## Peer Management ## +##################### + +# Configure how OpsRamp-Tracing-Proxy peers are discovered and managed +PeerManagement: + # Strategy controls the way that traces are assigned to Trace Proxy nodes. + # The "legacy" strategy uses a simple algorithm that unfortunately causes + # 1/2 of the in-flight traces to be assigned to a different node whenever the + # number of nodes changes. + # The legacy strategy is deprecated and is intended to be removed in a future release. + # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the + # number of nodes) are disrupted when the node count changes. + # Not eligible for live reload. + Strategy: "hash" + + ########################################################### + ###### File (Suitable only for VM based deployments) ###### + ########################################################### + # Type: "file" + # + # # Peers is the list of all servers participating in this proxy cluster. Events + # # will be sharded evenly across all peers based on the Trace ID. Values here + # # should be the base URL used to access the peer, and should include scheme, + # # hostname (or ip address) and port. All servers in the cluster should be in + # # this list, including this host. + # Peers: [ + # "http://127.0.0.1:8084", #only grpc peer listener used + # # "http://127.0.0.1:8083", + # # "http://10.1.2.3.4:8080", + # # "http://refinery-1231:8080", + # # "http://peer-3.fqdn" // assumes port 80 + # ] + ########################################################### + + ########################################################### + ###### Redis (Suitable for all types of deployments) ###### + ########################################################### + # The type should always be redis when deployed to Kubernetes environments + Type: "redis" + + # RedisHost is used to connect to redis for peer cluster membership management. + # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + # RedisHost will default to the name used for the release or name overrides depending on what is used, + # but can be overriden to a specific value. + RedisHost: 0.0.0.0:22122 + + # RedisUsername is the username used to connect to redis for peer cluster membership management. + # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + RedisUsername: "" + + # RedisPassword is the password used to connect to redis for peer cluster membership management. + # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes + # precedence and this value is ignored. + # Not eligible for live reload. + RedisPassword: "" + + # RedisPrefix is a string used as a prefix for the keys in redis while storing + # the peer membership. It might be useful to set this in any situation where + # multiple trace-proxy clusters or multiple applications want to share a single + # Redis instance. It may not be blank. + RedisPrefix: "tracing-proxy" + + # RedisDatabase is an integer from 0-15 indicating the database number to use + # for the Redis instance storing the peer membership. It might be useful to set + # this in any situation where multiple trace-proxy clusters or multiple + # applications want to share a single Redis instance. + RedisDatabase: 0 + + # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + # Not eligible for live reload. + UseTLS: false + + # UseTLSInsecure disables certificate checks + # Not eligible for live reload. + UseTLSInsecure: false + + # IdentifierInterfaceName is optional. + # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. + # When configured the pod's IP will be used in the peer list + IdentifierInterfaceName: eth0 + + # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first + # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + # the first IPV6 unicast address found. + UseIPV6Identifier: false + ########################################################### + +# LogrusLogger is a section of the config only used if you are using the +# LogrusLogger to send all logs to STDOUT using the logrus package. +LogrusLogger: + # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] + LogFormatter: 'json' + # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] + LogOutput: 'stdout' + + # specifies configs for logs when LogOutput is set to "file" + File: + # FileName specifies the location where the logs are supposed be stored + FileName: "/var/log/opsramp/tracing-proxy.log" + # MaxSize is the maximum size in megabytes of the log file before it gets rotated. + MaxSize: 1 + # MaxBackups is the maximum number of old log files to retain. + MaxBackups: 3 + # Compress determines if the rotated log files should be compressed + # using gzip. + Compress: true + +MetricsConfig: + # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp + Enable: true + + # ListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Trace Proxy + # listener. + ListenAddr: '0.0.0.0:2112' + + # OpsRampAPI is the URL for the upstream OpsRamp API. + OpsRampAPI: "" + + # ReportingInterval is the frequency specified in seconds at which + # the metrics are collected and sent to OpsRamp + ReportingInterval: 10 + + # OpsRampMetricsRetryCount is the number of times we retry incase the send fails + RetryCount: 2 + + # MetricsList is a list of regular expressions which match the metric + # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. + # Internally, all the items in the list are concatenated using '|' to make the computation faster. + MetricsList: [ ".*" ] + +GRPCServerParameters: +# MaxConnectionIdle is a duration for the amount of time after which an +# idle connection would be closed by sending a GoAway. Idleness duration is +# defined since the most recent time the number of outstanding RPCs became +# zero or the connection establishment. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 +# MaxConnectionIdle: "1m" + +# MaxConnectionAge is a duration for the maximum amount of time a +# connection may exist before it will be closed by sending a GoAway. A +# random jitter of +/-10% will be added to MaxConnectionAge to spread out +# connection storms. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 +# MaxConnectionAge: "0s" + +# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after +# which the connection will be forcibly closed. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 +# MaxConnectionAgeGrace: "0s" + +# After a duration of this time if the server doesn't see any activity it +# pings the client to see if the transport is still alive. +# If set below 1s, a minimum value of 1s will be used instead. +# 0s sets duration to 2 hours which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 +# Time: "10s" + +# After having pinged for keepalive check, the server waits for a duration +# of Timeout and if no activity is seen even after that the connection is +# closed. +# 0s sets duration to 20 seconds which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 +# Timeout: "2s" + +################################ +## Sample Cache Configuration ## +################################ + +# Sample Cache Configuration controls the sample cache used to retain information about trace +# status after the sampling decision has been made. +SampleCacheConfig: +# Type controls the type of sample cache used. +# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is +# 5x the size of the trace cache. This is Refinery's original sample cache strategy. +# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember +# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. +# It is also more configurable. The cuckoo filter is recommended for most installations. +# Default is "legacy". +# Type: "cuckoo" + +# KeptSize controls the number of traces preserved in the cuckoo kept traces cache. +# Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some +# statistical information. This is most useful in cases where the trace was sent before sending +# the root span, so that the root span can be decorated with accurate metadata. +# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). +# It Does not apply to the "legacy" type of cache. +# KeptSize: 10_000 + +# DroppedSize controls the size of the cuckoo dropped traces cache. +# This cache consumes 4-6 bytes per trace at a scale of millions of traces. +# Changing its size with live reload sets a future limit, but does not have an immediate effect. +# Default is 1_000_000 traces. +# It Does not apply to the "legacy" type of cache. +# DroppedSize: 1_000_000 + +# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates +# the remaining capacity of its dropped traces cache and possibly cycles it. +# This cache is quite resilient so it doesn't need to happen very often, but the +# operation is also inexpensive. +# Default is 10 seconds. +# It Does not apply to the "legacy" type of cache. +# SizeCheckInterval: "10s" diff --git a/go.mod b/go.mod index 94b6699575..1cd12b0c43 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 - github.com/opsramp/husky v0.0.0-20230329062823-ef17d98937d7 - github.com/opsramp/libtrace-go v0.0.0-20230314065618-8eadd7f71090 + github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0 + github.com/opsramp/libtrace-go v0.0.0-20230330140347-a09e0a6b627f github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 47542c9993..37ca30a94d 100644 --- a/go.sum +++ b/go.sum @@ -579,10 +579,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opsramp/husky v0.0.0-20230329062823-ef17d98937d7 h1:xP2qFwbG494966JCq3Qjx24SP45IrbqUVqinp1hT4X0= -github.com/opsramp/husky v0.0.0-20230329062823-ef17d98937d7/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230314065618-8eadd7f71090 h1:tamz5ez5Fei3glpqMrvLBldrG9Kf/+lkh/pKnBX99XA= -github.com/opsramp/libtrace-go v0.0.0-20230314065618-8eadd7f71090/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0 h1:qj7a1B/GFWxFVWvpzTV2V0bbxGNFI8bGM+ElTxwJP20= +github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= +github.com/opsramp/libtrace-go v0.0.0-20230330140347-a09e0a6b627f h1:UxD+NprBlcKATAwSSWlfHWYeDXQZruMdGFx9EUFif3w= +github.com/opsramp/libtrace-go v0.0.0-20230330140347-a09e0a6b627f/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= diff --git a/metrics/opsramp.go b/metrics/opsramp.go index a804199794..b980aa8c19 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -26,6 +26,10 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" ) +const ( + missingMetricsWriteScope = "auth token provided not not have metrics:write scope" +) + var metricsServer sync.Once type OpsRampMetrics struct { @@ -36,32 +40,31 @@ type OpsRampMetrics struct { metrics map[string]interface{} lock sync.RWMutex - Client http.Client - oAuthToken *OpsRampAuthTokenResponse + Client http.Client + apiEndpoint string tenantID string - apiKey string - apiSecret string retryCount int64 re *regexp.Regexp + prefix string - prefix string + authTokenEndpoint string + apiKey string + apiSecret string + oAuthToken *OpsRampAuthTokenResponse } func (p *OpsRampMetrics) Start() error { p.Logger.Debug().Logf("Starting OpsRampMetrics") defer func() { p.Logger.Debug().Logf("Finished starting OpsRampMetrics") }() - metricsConfig, err := p.Config.GetOpsRampMetricsConfig() - if err != nil { - return err - } + metricsConfig := p.Config.GetMetricsConfig() if p.Config.GetSendMetricsToOpsRamp() { go func() { - metricsTicker := time.NewTicker(time.Duration(metricsConfig.OpsRampMetricsReportingInterval) * time.Second) + metricsTicker := time.NewTicker(time.Duration(metricsConfig.ReportingInterval) * time.Second) defer metricsTicker.Stop() - p.Populate(metricsConfig) + p.Populate() // populating the oAuth Token Initially err := p.RenewOAuthToken() @@ -73,6 +76,13 @@ func (p *OpsRampMetrics) Start() error { statusCode, err := p.PushMetrics() if err != nil { p.Logger.Error().Logf("error while pushing metrics with statusCode: %d and Error: %v", statusCode, err) + if err.Error() == missingMetricsWriteScope { + p.Logger.Info().Logf("renewing auth token since the existing token is missing metrics:write scope") + err := p.RenewOAuthToken() + if err != nil { + p.Logger.Error().Logf("error while initializing oAuth Token Err: %v", err) + } + } } } }() @@ -85,7 +95,7 @@ func (p *OpsRampMetrics) Start() error { muxer.Handle("/metrics", promhttp.Handler()) go func() { - err := http.ListenAndServe(metricsConfig.MetricsListenAddr, muxer) + err := http.ListenAndServe(metricsConfig.ListenAddr, muxer) if err != nil { p.Logger.Error().Logf("failed to create /metrics server Error: %v", err) } @@ -265,35 +275,47 @@ type OpsRampAuthTokenResponse struct { Scope string `json:"scope"` } -func (p *OpsRampMetrics) Populate(metricsConfig *config.OpsRampMetricsConfig) { - p.apiEndpoint = metricsConfig.OpsRampMetricsAPI - p.apiKey = metricsConfig.OpsRampMetricsAPIKey - p.apiSecret = metricsConfig.OpsRampMetricsAPISecret - p.tenantID = metricsConfig.OpsRampTenantID - p.retryCount = metricsConfig.OpsRampMetricsRetryCount +func (p *OpsRampMetrics) Populate() { + + metricsConfig := p.Config.GetMetricsConfig() + authConfig := p.Config.GetAuthConfig() + proxyConfig := p.Config.GetProxyConfig() + + p.apiEndpoint = metricsConfig.OpsRampAPI + p.retryCount = metricsConfig.RetryCount + + p.authTokenEndpoint = authConfig.Endpoint + p.apiKey = authConfig.Key + p.apiSecret = authConfig.Secret + p.tenantID = authConfig.TenantId // Creating Regex for a list of metrics regexString := ".*" // the default value is to take everything - if len(metricsConfig.OpsRampMetricsList) >= 1 { - regexString = metricsConfig.OpsRampMetricsList[0] - for index := 0; index < len(metricsConfig.OpsRampMetricsList); index++ { - regexString = fmt.Sprintf("%s|%s", regexString, metricsConfig.OpsRampMetricsList[index]) + if len(metricsConfig.MetricsList) >= 1 { + regexString = metricsConfig.MetricsList[0] + for index := 0; index < len(metricsConfig.MetricsList); index++ { + regexString = fmt.Sprintf("%s|%s", regexString, metricsConfig.MetricsList[index]) } } p.re = regexp.MustCompile(regexString) proxyUrl := "" - if metricsConfig.ProxyServer != "" && metricsConfig.ProxyProtocol != "" { - proxyUrl = fmt.Sprintf("%s://%s:%d/", metricsConfig.ProxyProtocol, metricsConfig.ProxyServer, metricsConfig.ProxyPort) - if metricsConfig.ProxyUserName != "" && metricsConfig.ProxyPassword != "" { - proxyUrl = fmt.Sprintf("%s://%s:%s@%s:%d", metricsConfig.ProxyProtocol, metricsConfig.ProxyUserName, metricsConfig.ProxyPassword, metricsConfig.ProxyServer, metricsConfig.ProxyPort) - p.Logger.Debug().Logf("Using Authentication for Proxy Communication for Metrics") + if proxyConfig.Host != "" && proxyConfig.Protocol != "" { + proxyUrl = fmt.Sprintf("%s://%s:%d/", proxyConfig.Protocol, proxyConfig.Host, proxyConfig.Port) + if proxyConfig.Username != "" && proxyConfig.Password != "" { + proxyUrl = fmt.Sprintf("%s://%s:%s@%s:%d", proxyConfig.Protocol, proxyConfig.Username, proxyConfig.Password, proxyConfig.Host, proxyConfig.Port) + p.Logger.Debug().Logf("Using Authentication for ProxyConfiguration Communication for Metrics") } } p.Client = http.Client{ - Transport: &http.Transport{Proxy: http.ProxyFromEnvironment}, - Timeout: time.Duration(240) * time.Second, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConns: 10, + MaxConnsPerHost: 10, + IdleConnTimeout: 5 * time.Minute, + }, + Timeout: time.Duration(240) * time.Second, } if proxyUrl != "" { proxyURL, err := url.Parse(proxyUrl) @@ -301,8 +323,13 @@ func (p *OpsRampMetrics) Populate(metricsConfig *config.OpsRampMetricsConfig) { p.Logger.Error().Logf("skipping proxy err: %v", err) } else { p.Client = http.Client{ - Transport: &http.Transport{Proxy: http.ProxyURL(proxyURL)}, - Timeout: time.Duration(240) * time.Second, + Transport: &http.Transport{ + Proxy: http.ProxyURL(proxyURL), + MaxIdleConns: 10, + MaxConnsPerHost: 10, + IdleConnTimeout: 5 * time.Minute, + }, + Timeout: time.Duration(240) * time.Second, } } } @@ -474,9 +501,12 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { } req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") - //req.Header.Set("Connection", "close") req.Header.Set("Content-Encoding", "snappy") req.Header.Set("Content-Type", "application/x-protobuf") + + if !strings.Contains(p.oAuthToken.Scope, "metrics:write") { + return -1, fmt.Errorf(missingMetricsWriteScope) + } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken)) resp, err := p.SendWithRetry(req) @@ -500,7 +530,7 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { func (p *OpsRampMetrics) RenewOAuthToken() error { p.oAuthToken = new(OpsRampAuthTokenResponse) - endpoint := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(p.apiEndpoint, "/")) + endpoint := fmt.Sprintf("%s/auth/oauth/token", strings.TrimRight(p.authTokenEndpoint, "/")) requestBody := strings.NewReader("client_id=" + p.apiKey + "&client_secret=" + p.apiSecret + "&grant_type=client_credentials") diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 96cb315fb0..b09cc155f7 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -19,7 +19,12 @@ import ( func (r *Router) postOTLP(w http.ResponseWriter, req *http.Request) { ri := huskyotlp.GetRequestInfoFromHttpHeaders(req.Header) - r.Logger.Info().Logf("ri: %+v", ri) + if ri.ApiTenantId == "" { + ri.ApiTenantId, _ = r.Config.GetTenantId() + } + if ri.Dataset == "" { + ri.Dataset, _ = r.Config.GetDataset() + } result, err := huskyotlp.TranslateTraceRequestFromReader(req.Body, ri) if err != nil { @@ -27,12 +32,7 @@ func (r *Router) postOTLP(w http.ResponseWriter, req *http.Request) { return } - token := ri.ApiToken - tenantID := ri.ApiTenantId - if tenantID == "" { - tenantID, _ = r.Config.GetTenantId() - } - if err := processTraceRequest(req.Context(), r, result.Batches, ri.Dataset, token, tenantID); err != nil { + if err := processTraceRequest(req.Context(), r, result.Batches, ri.Dataset, ri.ApiToken, ri.ApiTenantId); err != nil { r.handlerReturnWithError(w, ErrUpstreamFailed, err) } } @@ -40,25 +40,21 @@ func (r *Router) postOTLP(w http.ResponseWriter, req *http.Request) { func (r *Router) Export(ctx context.Context, req *collectortrace.ExportTraceServiceRequest) (*collectortrace.ExportTraceServiceResponse, error) { ri := huskyotlp.GetRequestInfoFromGrpcMetadata(ctx) + if ri.ApiTenantId == "" { + ri.ApiTenantId, _ = r.Config.GetTenantId() + } + if ri.Dataset == "" { + ri.Dataset, _ = r.Config.GetDataset() + } + r.Metrics.Increment(r.incomingOrPeer + "_router_batch") result, err := huskyotlp.TranslateTraceRequest(req, ri) if err != nil { return nil, huskyotlp.AsGRPCError(err) } - token := ri.ApiToken - tenantID := ri.ApiTenantId - if len(tenantID) == 0 { - opsrampTenantID, _ := r.Config.GetTenantId() - tenantID = opsrampTenantID - } - if len(ri.Dataset) == 0 { - dataset, _ := r.Config.GetDataset() - ri.Dataset = dataset - } - - if err := processTraceRequest(ctx, r, result.Batches, ri.Dataset, token, tenantID); err != nil { + if err := processTraceRequest(ctx, r, result.Batches, ri.Dataset, ri.ApiToken, ri.ApiTenantId); err != nil { return nil, huskyotlp.AsGRPCError(err) } @@ -72,7 +68,6 @@ func processTraceRequest( datasetName string, token string, tenantID string) error { - var requestID types.RequestIDContextKey apiHost, err := router.Config.GetOpsrampAPI() if err != nil { diff --git a/route/route.go b/route/route.go index 517680ebee..294b187f94 100644 --- a/route/route.go +++ b/route/route.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" "gopkg.in/yaml.v2" // grpc/gzip compressor, auto registers on import @@ -732,15 +731,6 @@ func unmarshal(r *http.Request, data io.Reader, v interface{}) error { } } -// getFirstValueFromMetadata returns the first value of a metadata entry using a -// case-insensitive key -func getFirstValueFromMetadata(key string, md metadata.MD) string { - if values := md.Get(key); len(values) > 0 { - return values[0] - } - return "" -} - type environmentCache struct { mutex sync.RWMutex items map[string]*cacheItem diff --git a/rules_complete.toml b/rules_complete.toml deleted file mode 100644 index 1ff80e7295..0000000000 --- a/rules_complete.toml +++ /dev/null @@ -1,229 +0,0 @@ -############################ -## Sampling Rules Config ## -############################ - -# DryRun - If enabled, marks traces that would be dropped given current sampling rules, -# and sends all traces regardless -DryRun = true - -# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to tracing_proxy_kept -DryRunFieldName = "fromProxy" - -# DeterministicSampler is a section of the config for manipulating the -# Deterministic Sampler implementation. This is the simplest sampling algorithm -# - it is a static sample rate, choosing traces randomly to either keep or send -# (at the appropriate rate). It is not influenced by the contents of the trace. -Sampler = "DeterministicSampler" - -# SampleRate is the rate at which to sample. It indicates a ratio, where one -# sample trace is kept for every n traces seen. For example, a SampleRate of 30 -# will keep 1 out of every 30 traces. The choice on whether to keep any specific -# trace is random, so the rate is approximate. -# Eligible for live reload. -SampleRate = 1 - -# [dataset1] - -# # Note: If your dataset name contains a space, you will have to escape the dataset name -# # using single quotes, such as ['dataset 1'] - -# # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler -# # implementation. This sampler collects the values of a number of fields from a -# # trace and uses them to form a key. This key is handed to the standard dynamic -# # sampler algorithm which generates a sample rate based on the frequency with -# # which that key has appeared in the previous ClearFrequencySec seconds. See -# # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics -# # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from -# # that package. -# Sampler = "DynamicSampler" - -# # SampleRate is the goal rate at which to sample. It indicates a ratio, where -# # one sample trace is kept for every n traces seen. For example, a SampleRate of -# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic -# # sampler, who assigns a sample rate for each trace based on the fields selected -# # from that trace. -# # Eligible for live reload. -# SampleRate = 1 - -# # FieldList is a list of all the field names to use to form the key that will be -# # handed to the dynamic sampler. The cardinality of the combination of values -# # from all of these keys should be reasonable in the face of the frequency of -# # those keys. If the combination of fields in these keys essentially makes them -# # unique, the dynamic sampler will do no sampling. If the keys have too few -# # values, you won't get samples of the most interesting traces. A good key -# # selection will have consistent values for high frequency boring traffic and -# # unique values for outliers and interesting traffic. Including an error field -# # (or something like HTTP status code) is an excellent choice. As an example, -# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code -# # would be a good set of keys in order to let you see accurately use of all -# # endpoints and call out when there is failing traffic to any endpoint. Field -# # names may come from any span in the trace. -# # Eligible for live reload. -# FieldList = [""] - -# # UseTraceLength will add the number of spans in the trace in to the dynamic -# # sampler as part of the key. The number of spans is exact, so if there are -# # normally small variations in trace length you may want to leave this off. If -# # traces are consistent lengths and changes in trace length is a useful -# # indicator of traces you'd like to see in Opsramp, set this to true. -# # Eligible for live reload. -# UseTraceLength = true - -# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field -# # to the root span of the trace containing the key used by the sampler to decide -# # the sample rate. This can be helpful in understanding why the sampler is -# # making certain decisions about sample rate and help you understand how to -# # better choose the sample rate key (aka the FieldList setting above) to use. -# AddSampleRateKeyToTrace = true - -# # AddSampleRateKeyToTraceField is the name of the field the sampler will use -# # when adding the sample rate key to the trace. This setting is only used when -# # AddSampleRateKeyToTrace is true. -# AddSampleRateKeyToTraceField = "" - -# # ClearFrequencySec is the name of the field the sampler will use to determine -# # the period over which it will calculate the sample rate. This setting defaults -# # to 30. -# # Eligible for live reload. -# ClearFrequencySec = 60 - -# [dataset2] - -# # EMADynamicSampler is a section of the config for manipulating the Exponential -# # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, -# # it attempts to average a given sample rate, weighting rare traffic and frequent -# # traffic differently so as to end up with the correct average. -# # -# # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended -# # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs -# # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential -# # Moving Average of counts seen per key, and adjusts this average at regular intervals. -# # The weight applied to more recent intervals is defined by `weight`, a number between -# # (0, 1) - larger values weight the average more toward recent observations. In other words, -# # a larger weight will cause sample rates more quickly adapt to traffic patterns, -# # while a smaller weight will result in sample rates that are less sensitive to bursts or drops -# # in traffic and thus more consistent over time. -# # -# # Keys that are not found in the EMA will always have a sample -# # rate of 1. Keys that occur more frequently will be sampled on a logarithmic -# # curve. In other words, every key will be represented at least once in any -# # given window and more frequent keys will have their sample rate -# # increased proportionally to wind up with the goal sample rate. -# Sampler = "EMADynamicSampler" - -# # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where -# # one sample trace is kept for every n traces seen. For example, a SampleRate of -# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic -# # sampler, who assigns a sample rate for each trace based on the fields selected -# # from that trace. -# # Eligible for live reload. -# GoalSampleRate = 1 - -# # FieldList is a list of all the field names to use to form the key that will be -# # handed to the dynamic sampler. The cardinality of the combination of values -# # from all of these keys should be reasonable in the face of the frequency of -# # those keys. If the combination of fields in these keys essentially makes them -# # unique, the dynamic sampler will do no sampling. If the keys have too few -# # values, you won't get samples of the most interesting traces. A good key -# # selection will have consistent values for high frequency boring traffic and -# # unique values for outliers and interesting traffic. Including an error field -# # (or something like HTTP status code) is an excellent choice. As an example, -# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code -# # would be a good set of keys in order to let you see accurately use of all -# # endpoints and call out when there is failing traffic to any endpoint. Field -# # names may come from any span in the trace. -# # Eligible for live reload. -# FieldList = [""] - -# # UseTraceLength will add the number of spans in the trace in to the dynamic -# # sampler as part of the key. The number of spans is exact, so if there are -# # normally small variations in trace length you may want to leave this off. If -# # traces are consistent lengths and changes in trace length is a useful -# # indicator of traces you'd like to see in Opsramp, set this to true. -# # Eligible for live reload. -# UseTraceLength = true - -# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field -# # to the root span of the trace containing the key used by the sampler to decide -# # the sample rate. This can be helpful in understanding why the sampler is -# # making certain decisions about sample rate and help you understand how to -# # better choose the sample rate key (aka the FieldList setting above) to use. -# AddSampleRateKeyToTrace = true - -# # AddSampleRateKeyToTraceField is the name of the field the sampler will use -# # when adding the sample rate key to the trace. This setting is only used when -# # AddSampleRateKeyToTrace is true. -# AddSampleRateKeyToTraceField = "" - -# # AdjustmentInterval defines how often (in seconds) we adjust the moving average from -# # recent observations. Default 15s -# # Eligible for live reload. -# AdjustmentInterval = 15 - -# # Weight is a value between (0, 1) indicating the weighting factor used to adjust -# # the EMA. With larger values, newer data will influence the average more, and older -# # values will be factored out more quickly. In mathematical literature concerning EMA, -# # this is referred to as the `alpha` constant. -# # Default is 0.5 -# # Eligible for live reload. -# Weight = 0.5 - -# # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. -# # Once MaxKeys is reached, new keys will not be included in the sample rate map, but -# # existing keys will continue to be be counted. You can use this to keep the sample rate -# # map size under control. -# # Eligible for live reload -# MaxKeys = 0 - -# # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key -# # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to -# # decide what constitutes "zero". Keys with averages below this threshold will be removed -# # from the EMA. Default is the same as Weight, as this prevents a key with the smallest -# # integer value (1) from being aged out immediately. This value should generally be <= Weight, -# # unless you have very specific reasons to set it higher. -# # Eligible for live reload -# AgeOutValue = 0.5 - -# # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define -# # the burst detection threshold. If total counts observed for a given interval exceed the threshold -# # EMA is updated immediately, rather than waiting on the AdjustmentInterval. -# # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, -# # burst detection will kick in. -# # Eligible for live reload -# BurstMultiple = 2.0 - -# # BurstDetectionDelay indicates the number of intervals to run after Start is called before -# # burst detection kicks in. -# # Defaults to 3 -# # Eligible for live reload -# BurstDetectionDelay = 3 - -# [dataset3] - -# Sampler = "DeterministicSampler" -# SampleRate = 10 - -# [dataset4] - -# Sampler = "RulesBasedSampler" - -# [[dataset4.rule]] -# # Rule name -# name = "" -# # Drop Condition (examples: true, false) -# drop = -# [[dataset4.rule.condition]] -# # Field Name (example: status_code) -# field = "" -# # Operator Value (example: =) -# operator = "" -# # Field Value (example: 500) -# value = "" - - - -# [dataset5] - -# Sampler = "TotalThroughputSampler" -# GoalThroughputPerSec = 100 -# FieldList = "[]" diff --git a/rules_complete.yaml b/rules_complete.yaml new file mode 100644 index 0000000000..79932e515d --- /dev/null +++ b/rules_complete.yaml @@ -0,0 +1,261 @@ +############################ +## Sampling Rules Config ## +############################ + +# DryRun - If enabled, marks traces that would be dropped given current sampling rules, +# and sends all traces regardless +DryRun: false + +# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to trace_proxy_kept +DryRunFieldName: trace_proxy_kept + +# DeterministicSampler is a section of the config for manipulating the +# Deterministic Sampler implementation. This is the simplest sampling algorithm +# - it is a static sample rate, choosing traces randomly to either keep or send +# (at the appropriate rate). It is not influenced by the contents of the trace. +Sampler: DeterministicSampler + +# SampleRate is the rate at which to sample. It indicates a ratio, where one +# sample trace is kept for every n traces seen. For example, a SampleRate of 30 +# will keep 1 out of every 30 traces. The choice on whether to keep any specific +# trace is random, so the rate is approximate. +# Eligible for live reload. +SampleRate: 1 + +dataset1: + + # Note: If your dataset name contains a space, you will have to escape the dataset name + # using single quotes, such as ['dataset 1'] + + # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler + # implementation. This sampler collects the values of a number of fields from a + # trace and uses them to form a key. This key is handed to the standard dynamic + # sampler algorithm which generates a sample rate based on the frequency with + # which that key has appeared in the previous ClearFrequencySec seconds. See + # https://github.com/honeycombio/dynsampler-go for more detail on the mechanics + # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from + # that package. + Sampler: DynamicSampler + + # SampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + SampleRate: 2 + + # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. + # The combination of values from all of these fields should reflect how interesting the trace is compared to + # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for + # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent + # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of + # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is + # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a + # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can + # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling + # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. + # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of + # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. + # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore + # interesting traces, like traces that experienced a `500`, might not be sampled. + # Field names may come from any span in the trace. + FieldList: + - request.method + - http.target + - response.status_code + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Honeycomb, set this to true. + UseTraceLength: true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace: true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField: meta.refinery.dynsampler_key + + # ClearFrequencySec is the name of the field the sampler will use to determine + # the period over which it will calculate the sample rate. This setting defaults + # to 30. + ClearFrequencySec: 60 +dataset2: + + # EMADynamicSampler is a section of the config for manipulating the Exponential + # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, + # it attempts to average a given sample rate, weighting rare traffic and frequent + # traffic differently so as to end up with the correct average. + # + # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended + # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs + # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential + # Moving Average of counts seen per key, and adjusts this average at regular intervals. + # The weight applied to more recent intervals is defined by `weight`, a number between + # (0, 1) - larger values weight the average more toward recent observations. In other words, + # a larger weight will cause sample rates more quickly adapt to traffic patterns, + # while a smaller weight will result in sample rates that are less sensitive to bursts or drops + # in traffic and thus more consistent over time. + # + # Keys that are not found in the EMA will always have a sample + # rate of 1. Keys that occur more frequently will be sampled on a logarithmic + # curve. In other words, every key will be represented at least once in any + # given window and more frequent keys will have their sample rate + # increased proportionally to wind up with the goal sample rate. + Sampler: EMADynamicSampler + + # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where + # one sample trace is kept for every n traces seen. For example, a SampleRate of + # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # sampler, who assigns a sample rate for each trace based on the fields selected + # from that trace. + GoalSampleRate: 2 + + # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. + # The combination of values from all of these fields should reflect how interesting the trace is compared to + # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for + # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent + # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of + # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is + # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a + # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can + # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling + # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. + # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of + # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. + # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore + # interesting traces, like traces that experienced a `500`, might not be sampled. + # Field names may come from any span in the trace. + FieldList: + - request.method + - http.target + - response.status_code + + # UseTraceLength will add the number of spans in the trace in to the dynamic + # sampler as part of the key. The number of spans is exact, so if there are + # normally small variations in trace length you may want to leave this off. If + # traces are consistent lengths and changes in trace length is a useful + # indicator of traces you'd like to see in Honeycomb, set this to true. + UseTraceLength: true + + # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # to the root span of the trace containing the key used by the sampler to decide + # the sample rate. This can be helpful in understanding why the sampler is + # making certain decisions about sample rate and help you understand how to + # better choose the sample rate key (aka the FieldList setting above) to use. + AddSampleRateKeyToTrace: true + + # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # when adding the sample rate key to the trace. This setting is only used when + # AddSampleRateKeyToTrace is true. + AddSampleRateKeyToTraceField: meta.refinery.dynsampler_key + + # AdjustmentInterval defines how often (in seconds) we adjust the moving average from + # recent observations. Default 15s + AdjustmentInterval: 15 + + # Weight is a value between (0, 1) indicating the weighting factor used to adjust + # the EMA. With larger values, newer data will influence the average more, and older + # values will be factored out more quickly. In mathematical literature concerning EMA, + # this is referred to as the `alpha` constant. + # Default is 0.5 + Weight: 0.5 + + # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. + # Once MaxKeys is reached, new keys will not be included in the sample rate map, but + # existing keys will continue to be be counted. You can use this to keep the sample rate + # map size under control. + MaxKeys: 0 + + # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key + # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to + # decide what constitutes "zero". Keys with averages below this threshold will be removed + # from the EMA. Default is the same as Weight, as this prevents a key with the smallest + # integer value (1) from being aged out immediately. This value should generally be <= Weight, + # unless you have very specific reasons to set it higher. + AgeOutValue: 0.5 + + # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define + # the burst detection threshold. If total counts observed for a given interval exceed the threshold + # EMA is updated immediately, rather than waiting on the AdjustmentInterval. + # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, + # burst detection will kick in. + BurstMultiple: 2 + + # BurstDetectionDelay indicates the number of intervals to run after Start is called before + # burst detection kicks in. + # Defaults to 3 + BurstDetectionDelay: 3 +dataset3: + Sampler: DeterministicSampler + SampleRate: 10 +dataset4: + Sampler: RulesBasedSampler + CheckNestedFields: false + rule: + - name: drop healthchecks + drop: true + condition: + - field: http.route + operator: '=' + value: /health-check + - name: keep slow 500 errors + SampleRate: 1 + condition: + - field: status_code + operator: '=' + value: 500 + - field: duration_ms + operator: '>=' + value: 1000.789 + - name: dynamically sample 200 responses + condition: + - field: status_code + operator: '=' + value: 200 + sampler: + EMADynamicSampler: + Sampler: EMADynamicSampler + GoalSampleRate: 15 + FieldList: + - request.method + - request.route + AddSampleRateKeyToTrace: true + AddSampleRateKeyToTraceField: meta.refinery.dynsampler_key + - name: dynamically sample 200 string responses + condition: + - field: status_code + operator: '=' + value: '200' + datatype: int + sampler: + EMADynamicSampler: + Sampler: EMADynamicSampler + GoalSampleRate: 15 + FieldList: + - request.method + - request.route + AddSampleRateKeyToTrace: true + AddSampleRateKeyToTraceField: meta.refinery.dynsampler_key + - name: sample traces originating from a service + Scope: span + SampleRate: 5 + condition: + - field: service name + operator: '=' + value: users + - field: meta.span_type + operator: '=' + value: root + - SampleRate: 10 +dataset5: + Sampler: TotalThroughputSampler + GoalThroughputPerSec: 100 + FieldList: '[request.method]' diff --git a/transmit/transmit.go b/transmit/transmit.go index f5248185e4..b6c54f6f1e 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -78,19 +78,15 @@ func (d *DefaultTransmission) Start() error { d.responseCanceler = canceler go d.processResponses(processCtx, d.LibhClient.TxResponses()) - //proxy support for traces - proto, _ := d.Config.GetProxyProtocol() - server, _ := d.Config.GetProxyServer() - port := d.Config.GetProxyPort() - username, _ := d.Config.GetProxyUsername() - password, _ := d.Config.GetProxyPassword() + // get proxy details + proxyConfig := d.Config.GetProxyConfig() proxyUrl := "" - if server != "" && proto != "" { - proxyUrl = fmt.Sprintf("%s://%s:%d/", proto, server, port) - if username != "" && password != "" { - proxyUrl = fmt.Sprintf("%s://%s:%s@%s:%d", proto, username, password, server, port) - d.Logger.Debug().Logf("Using Authentication for Proxy Communication for Traces") + if proxyConfig.Host != "" && proxyConfig.Protocol != "" { + proxyUrl = fmt.Sprintf("%s://%s:%d/", proxyConfig.Protocol, proxyConfig.Host, proxyConfig.Port) + if proxyConfig.Username != "" && proxyConfig.Password != "" { + proxyUrl = fmt.Sprintf("%s://%s:%s@%s:%d", proxyConfig.Protocol, proxyConfig.Username, proxyConfig.Password, proxyConfig.Host, proxyConfig.Port) + d.Logger.Debug().Logf("Using Authentication for ProxyConfiguration Communication for Traces") } os.Setenv("HTTPS_PROXY", proxyUrl) os.Setenv("HTTP_PROXY", proxyUrl) From af2eb3319f4636943209021f2effe8f321e1d37a Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Fri, 31 Mar 2023 09:51:59 +0530 Subject: [PATCH 296/351] updating helm with latest config --- build/opsramp-tracing-proxy/values.yaml | 327 ++++++++++++++---------- 1 file changed, 199 insertions(+), 128 deletions(-) diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index 397f067a48..f0430db3d1 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -31,35 +31,30 @@ service: annotations: { } config: - # ListenAddr is the IP and port on which to listen for incoming events. + ######################## + ## Trace Proxy Config ## + ######################## + + # ListenAddr is the IP and port on which to listen for incoming events. Incoming + # traffic is expected to be HTTP, so if using SSL put something like nginx in + # front to do the TLS Termination. ListenAddr: 0.0.0.0:{{include "httpPort" . | trim }} - # GRPCListenAddr is the IP and port on which to listen for incoming events over gRPC. + # GRPCListenAddr is the IP and port on which to listen for incoming events over + # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in + # front to do the TLS Termination. GRPCListenAddr: 0.0.0.0:{{include "grpcPort" . | trim }} - # PeerListenAddr is the IP and port on which to listen for traffic being rerouted from a peer. + # PeerListenAddr is the IP and port on which to listen for traffic being + # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL + # put something like nginx in front to do the decryption. Must be different from + # ListenAddr PeerListenAddr: 0.0.0.0:{{include "httpPeerPort" . | trim }} GRPCPeerListenAddr: 0.0.0.0:{{include "grpcPeerPort" . | trim }} - # ProxyProtocol accepts http and https - # Not Eligible for live reload. - ProxyProtocol: "" - # ProxyServer takes the proxy server address - # Not Eligible for live reload. - ProxyServer: "" - # ProxyPort takes the proxy server port - # Not Eligible for live reload. - ProxyPort: 3128 - # ProxyUserName takes the proxy username - # Not Eligible for live reload. - ProxyUserName: "" - # ProxyPassword takes the proxy password - # Not Eligible for live reload. - ProxyPassword: "" - - # CompressPeerCommunication determines whether trace-proxy will compress span data - # it forwards to peers. If it costs money to transmit data between OpsRamp-Tracing-Proxy + # CompressPeerCommunication determines whether to compress span data + # it forwards to peers. If it costs money to transmit data between different # instances (e.g. they're spread across AWS availability zones), then you # almost certainly want compression enabled to reduce your bill. The option to # disable it is provided as an escape hatch for deployments that value lower CPU @@ -67,16 +62,11 @@ config: CompressPeerCommunication: true # OpsrampAPI is the URL for the upstream Opsramp API. - # Eligible for live reload. OpsrampAPI: "" - # OpsrampKey is used to get the OauthToken - OpsrampKey: "" - # OpsrampSecret is used to get the OauthToken - OpsrampSecret: "" - # Traces are sent to the client with the given tenantId - TenantId: "" + # Dataset you want to use for sampling Dataset: "ds" + #Tls Options UseTls: true UseTlsInsecure: false @@ -116,11 +106,17 @@ config: UpstreamBufferSize: 1000 PeerBufferSize: 1000 + # AddHostMetadataToTrace determines whether to add information about + # the host that Refinery is running on to the spans that it processes. + # If enabled, information about the host will be added to each span with the + # prefix `meta.refinery.`. + # Currently, the only value added is 'meta.refinery.local_hostname'. + AddHostMetadataToTrace: false + # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. # Cache misses lookup the environment name using OpsRampAPI config value. # Default is 1 hour ("1h"). - # Not eligible for live reload. EnvironmentCacheTTL: "1h" # QueryAuthToken, if specified, provides a token that must be specified with @@ -129,12 +125,10 @@ config: # are not typically needed in normal operation. # Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN. # If left unspecified, the /query endpoints are inaccessible. - # Not eligible for live reload. # QueryAuthToken: "some-random-value" # AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which # contains text indicating which rule was evaluated that caused the trace to be included. - # Eligible for live reload. AddRuleReasonToTrace: true # AdditionalErrorFields should be a list of span fields that should be included when logging @@ -143,7 +137,6 @@ config: # The fields `dataset`, `apihost`, and `environment` are always included. # If a field is not present in the span, it will not be present in the error log. # Default is ["trace.span_id"]. - # Eligible for live reload. AdditionalErrorFields: - trace.span_id @@ -152,7 +145,6 @@ config: # This value is available to the rules-based sampler, making it possible to write rules that # are dependent upon the number of spans in the trace. # Default is false. - # Eligible for live reload. AddSpanCountToRoot: false # CacheOverrunStrategy controls the cache management behavior under memory pressure. @@ -162,17 +154,94 @@ config: # ejected from the cache earlier than normal but the cache is not resized. # In all cases, it only applies if MaxAlloc is nonzero. # Default is "resize" for compatibility but "impact" is recommended for most installations. - # Eligible for live reload. CacheOverrunStrategy: "impact" - # Metrics are sent to OpsRamp (The collection happens based on configuration specifie - # in OpsRampMetrics and only works when the Metrics is set to "prometheus") - SendMetricsToOpsRamp: false + ######################### + ## Proxy Configuration ## + ######################### + ProxyConfiguration: + # Protocol accepts http and https + Protocol: "http" + # Host takes the proxy server address + Host: "" + # Port takes the proxy server port + Port: 3128 + # UserName takes the proxy username + Username: "" + # Password takes the proxy password + Password: "" + + ################################## + ## Authentication Configuration ## + ################################## + AuthConfiguration: + # SkipAuth - skips authentication while sending requests (only to be used for debugging) + SkipAuth: false + + # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made + Endpoint: "" + # Key - authentication key provided in OpsRamp Portal + Key: "" + # Secret - authentication Secret provided in OpsRamp Portal + Secret: "" + # TenantId - tenant/client id to which the traces are to be posted + TenantId: "" + + ############################ + ## Implementation Choices ## + ############################ + # Each of the config options below chooses an implementation of a Trace Proxy + # component to use. Depending on the choice, there may be more configuration + # required below in the section for that choice. Changing implementation choices + # requires a process restart. + # Collector describes which collector to use for collecting traces. The only + # current valid option is "InMemCollector". More can be added by adding + # implementations of the Collector interface. + Collector: "InMemCollector" + + # InMemCollector brings together all the settings that are relevant to + # collecting spans together to make traces. + InMemCollector: + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + CacheCapacity: 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. Using 80% is the recommended. + # This value should be set in according to the resources.limits.memory + # By default that setting is 4GB, and this is set to 85% of that limit + # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 + # MaxAlloc: 3435973836 + MaxAlloc: 0 + + ##################### + ## Peer Management ## + ##################### # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed PeerManagement: - Strategy: "hash" # Always use hash for balanced distribution of traces + # Strategy controls the way that traces are assigned to Trace Proxy nodes. + # The "legacy" strategy uses a simple algorithm that unfortunately causes + # 1/2 of the in-flight traces to be assigned to a different node whenever the + # number of nodes changes. + # The legacy strategy is deprecated and is intended to be removed in a future release. + # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the + # number of nodes) are disrupted when the node count changes. + # Not eligible for live reload. + Strategy: "hash" + ########################################################### + ###### Redis (Suitable for all types of deployments) ###### + ########################################################### # The type should always be redis when deployed to Kubernetes environments Type: "redis" @@ -225,44 +294,7 @@ config: # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use # the first IPV6 unicast address found. UseIPV6Identifier: false - - ############################ - ## Implementation Choices ## - ############################ - # Each of the config options below chooses an implementation of a OpsRamp-Tracing-Proxy - # component to use. Depending on the choice there may be more configuration - # required below in the section for that choice. Changing implementation choices - # requires a process restart; these changes will not be picked up by a live - # config reload. (Individual config options for a given implementation may be - # eligible for live reload). - # Collector describes which collector to use for collecting traces. The only - # current valid option is "InMemCollector".. More can be added by adding - # implementations of the Collector interface. - Collector: "InMemCollector" - - # InMemCollector brings together all the settings that are relevant to - # collecting spans together to make traces. - InMemCollector: - - # The collection cache is used to collect all spans into a trace as well as - # remember the sampling decision for any spans that might come in after the - # trace has been marked "complete" (either by timing out or seeing the root - # span). The number of traces in the cache should be many multiples (100x to - # 1000x) of the total number of concurrently active traces (trace throughput * - # trace duration). - CacheCapacity: 1000 - - # MaxAlloc is optional. If set, it must be an integer >= 0. - # If set to a non-zero value, once per tick (see SendTicker) the collector - # will compare total allocated bytes to this value. If allocation is too - # high, cache capacity will be reduced and an error will be logged. - # Useful values for this setting are generally in the range of 75%-90% of - # available system memory. Using 80% is the recommended. - # This value should be set in according to the resources.limits.memory - # By default that setting is 4GB, and this is set to 85% of that limit - # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 - # MaxAlloc: 3435973836 - MaxAlloc: 0 + ########################################################### # LogrusLogger is a section of the config only used if you are using the # LogrusLogger to send all logs to STDOUT using the logrus package. @@ -272,66 +304,105 @@ config: # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"] LogOutput: 'stdout' - OpsRampMetrics: - # MetricsListenAddr determines the interface and port on which Prometheus will - # listen for requests for /metrics. Must be different from the main OpsRamp-Tracing-Proxy - # listener. - # Not eligible for live reload. - MetricsListenAddr: 'localhost:2112' - - # OpsRampMetricsAPI is the URL for the upstream OpsRamp API. - # Not Eligible for live reload. - OpsRampMetricsAPI: '' + MetricsConfig: + # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp + Enable: true - # OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. - OpsRampTenantID: '' - - # OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. - # This is separate from the APIKeys used to authenticate regular - # traffic. - # Not Eligible for live reload. - OpsRampMetricsAPIKey: '' + # ListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Trace Proxy + # listener. + ListenAddr: '0.0.0.0:2112' - # OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. - # This is separate from the APISecret used to authenticate regular - # traffic. - # Not Eligible for live reload. - OpsRampMetricsAPISecret: '' + # OpsRampAPI is the URL for the upstream OpsRamp API. + OpsRampAPI: "" - # OpsRampMetricsReportingInterval is frequency specified in seconds at which + # ReportingInterval is the frequency specified in seconds at which # the metrics are collected and sent to OpsRamp - # Not Eligible for live reload. - OpsRampMetricsReportingInterval: 10 + ReportingInterval: 10 # OpsRampMetricsRetryCount is the number of times we retry incase the send fails - # Not Eligible for live reload. - OpsRampMetricsRetryCount: 2 - - # ProxyProtocol accepts http and https - # Not Eligible for live reload. - ProxyProtocol: '' - - # ProxyServer takes the proxy server address - # Not Eligible for live reload. - ProxyServer: '' - - # ProxyPort takes the proxy server port - # Not Eligible for live reload. - ProxyPort: 3128 - - # ProxyUserName takes the proxy username - # Not Eligible for live reload. - ProxyUserName: '' - - # ProxyPassword takes the proxy password - # Not Eligible for live reload. - ProxyPassword: '' + RetryCount: 2 - # OpsRampMetricsList is a list of regular expressions which match the metric + # MetricsList is a list of regular expressions which match the metric # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. - # Internally all the regex in the list are concatinated using '|' to make the computation little faster. - # Not Eligible for live reload - OpsRampMetricsList: [ ".*" ] + # Internally, all the items in the list are concatenated using '|' to make the computation faster. + MetricsList: [ ".*" ] + + GRPCServerParameters: + # MaxConnectionIdle is a duration for the amount of time after which an + # idle connection would be closed by sending a GoAway. Idleness duration is + # defined since the most recent time the number of outstanding RPCs became + # zero or the connection establishment. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 + # MaxConnectionIdle: "1m" + + # MaxConnectionAge is a duration for the maximum amount of time a + # connection may exist before it will be closed by sending a GoAway. A + # random jitter of +/-10% will be added to MaxConnectionAge to spread out + # connection storms. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 + # MaxConnectionAge: "0s" + + # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + # which the connection will be forcibly closed. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 + # MaxConnectionAgeGrace: "0s" + + # After a duration of this time if the server doesn't see any activity it + # pings the client to see if the transport is still alive. + # If set below 1s, a minimum value of 1s will be used instead. + # 0s sets duration to 2 hours which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 + # Time: "10s" + + # After having pinged for keepalive check, the server waits for a duration + # of Timeout and if no activity is seen even after that the connection is + # closed. + # 0s sets duration to 20 seconds which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 + # Timeout: "2s" + + ################################ + ## Sample Cache Configuration ## + ################################ + + # Sample Cache Configuration controls the sample cache used to retain information about trace + # status after the sampling decision has been made. + SampleCacheConfig: + # Type controls the type of sample cache used. + # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is + # 5x the size of the trace cache. This is Refinery's original sample cache strategy. + # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember + # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. + # It is also more configurable. The cuckoo filter is recommended for most installations. + # Default is "legacy". + # Type: "cuckoo" + + # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. + # Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some + # statistical information. This is most useful in cases where the trace was sent before sending + # the root span, so that the root span can be decorated with accurate metadata. + # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). + # It Does not apply to the "legacy" type of cache. + # KeptSize: 10_000 + + # DroppedSize controls the size of the cuckoo dropped traces cache. + # This cache consumes 4-6 bytes per trace at a scale of millions of traces. + # Changing its size with live reload sets a future limit, but does not have an immediate effect. + # Default is 1_000_000 traces. + # It Does not apply to the "legacy" type of cache. + # DroppedSize: 1_000_000 + + # SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates + # the remaining capacity of its dropped traces cache and possibly cycles it. + # This cache is quite resilient so it doesn't need to happen very often, but the + # operation is also inexpensive. + # Default is 10 seconds. + # It Does not apply to the "legacy" type of cache. + # SizeCheckInterval: "10s" rules: From b2d268883bb3e5f2db54bd5343ee107c4d80783c Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Fri, 31 Mar 2023 12:06:07 +0530 Subject: [PATCH 297/351] fix duplicate go and process metrics --- metrics/opsramp.go | 71 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 16 deletions(-) diff --git a/metrics/opsramp.go b/metrics/opsramp.go index b980aa8c19..32d1a1fe0c 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -2,10 +2,12 @@ package metrics import ( "bytes" + "context" "encoding/json" "fmt" "github.com/golang/snappy" "github.com/gorilla/mux" + "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" io_prometheus_client "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" @@ -30,7 +32,14 @@ const ( missingMetricsWriteScope = "auth token provided not not have metrics:write scope" ) -var metricsServer sync.Once +var ( + muxer *mux.Router + server *http.Server +) + +func init() { + muxer = mux.NewRouter() +} type OpsRampMetrics struct { Config config.Config `inject:""` @@ -52,6 +61,8 @@ type OpsRampMetrics struct { apiKey string apiSecret string oAuthToken *OpsRampAuthTokenResponse + + promRegistry *prometheus.Registry } func (p *OpsRampMetrics) Start() error { @@ -60,6 +71,48 @@ func (p *OpsRampMetrics) Start() error { metricsConfig := p.Config.GetMetricsConfig() + p.metrics = make(map[string]interface{}) + + // Create non-global registry. + p.promRegistry = prometheus.NewRegistry() + + // Add go runtime metrics and process collectors to default metrics prefix + if p.prefix == "" { + p.promRegistry.MustRegister( + collectors.NewGoCollector(), + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + ) + } + + listenURI := "/metrics" + if p.prefix != "" { + listenURI = fmt.Sprintf("/metrics/%s", strings.TrimSpace(p.prefix)) + } + muxer.Handle(listenURI, promhttp.HandlerFor( + p.promRegistry, + promhttp.HandlerOpts{Registry: p.promRegistry, Timeout: 10 * time.Second}, + ), + ) + p.Logger.Info().Logf("registered metrics at %s for prefix: %s", listenURI, p.prefix) + + if server != nil { + err := server.Shutdown(context.Background()) + if err != nil { + p.Logger.Error().Logf("metrics server shutdown: %v", err) + } + } + server = &http.Server{ + Addr: metricsConfig.ListenAddr, + Handler: muxer, + ReadHeaderTimeout: 10 * time.Second, + } + go func() { + err := server.ListenAndServe() + if err != nil { + p.Logger.Error().Logf("failed to start metrics server: %v", err) + } + }() + if p.Config.GetSendMetricsToOpsRamp() { go func() { metricsTicker := time.NewTicker(time.Duration(metricsConfig.ReportingInterval) * time.Second) @@ -88,20 +141,6 @@ func (p *OpsRampMetrics) Start() error { }() } - p.metrics = make(map[string]interface{}) - - metricsServer.Do(func() { - muxer := mux.NewRouter() - muxer.Handle("/metrics", promhttp.Handler()) - - go func() { - err := http.ListenAndServe(metricsConfig.ListenAddr, muxer) - if err != nil { - p.Logger.Error().Logf("failed to create /metrics server Error: %v", err) - } - }() - }) - return nil } @@ -336,7 +375,7 @@ func (p *OpsRampMetrics) Populate() { } func (p *OpsRampMetrics) PushMetrics() (int, error) { - metricFamilySlice, err := prometheus.DefaultGatherer.Gather() + metricFamilySlice, err := p.promRegistry.Gather() if err != nil { return -1, err } From 4b8a7143d3a4808f9f48dd08457a65837363f4bf Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Fri, 31 Mar 2023 12:25:59 +0530 Subject: [PATCH 298/351] use custom prom registry instead of default registry --- metrics/opsramp.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/metrics/opsramp.go b/metrics/opsramp.go index 32d1a1fe0c..d6e8fa7c57 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -107,10 +107,7 @@ func (p *OpsRampMetrics) Start() error { ReadHeaderTimeout: 10 * time.Second, } go func() { - err := server.ListenAndServe() - if err != nil { - p.Logger.Error().Logf("failed to start metrics server: %v", err) - } + server.ListenAndServe() }() if p.Config.GetSendMetricsToOpsRamp() { @@ -165,21 +162,21 @@ func (p *OpsRampMetrics) Register(name string, metricType string) { switch metricType { case "counter": - newMetric = promauto.NewCounter(prometheus.CounterOpts{ + newMetric = promauto.With(p.promRegistry).NewCounter(prometheus.CounterOpts{ Name: name, Namespace: p.prefix, Help: name, ConstLabels: constantLabels, }) case "gauge": - newMetric = promauto.NewGauge(prometheus.GaugeOpts{ + newMetric = promauto.With(p.promRegistry).NewGauge(prometheus.GaugeOpts{ Name: name, Namespace: p.prefix, Help: name, ConstLabels: constantLabels, }) case "histogram": - newMetric = promauto.NewHistogram(prometheus.HistogramOpts{ + newMetric = promauto.With(p.promRegistry).NewHistogram(prometheus.HistogramOpts{ Name: name, Namespace: p.prefix, Help: name, @@ -207,20 +204,19 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s } hostMap := make(map[string]string) if hostname, err := os.Hostname(); err == nil && hostname != "" { - hostMap["hostname"] = hostname } switch metricType { case "counter": - newMetric = promauto.NewCounterVec(prometheus.CounterOpts{ + newMetric = promauto.With(p.promRegistry).NewCounterVec(prometheus.CounterOpts{ Name: name, Namespace: p.prefix, Help: desc, ConstLabels: hostMap, }, labels) case "gauge": - newMetric = promauto.NewGaugeVec( + newMetric = promauto.With(p.promRegistry).NewGaugeVec( prometheus.GaugeOpts{ Name: name, Namespace: p.prefix, @@ -229,7 +225,7 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s }, labels) case "histogram": - newMetric = promauto.NewHistogramVec(prometheus.HistogramOpts{ + newMetric = promauto.With(p.promRegistry).NewHistogramVec(prometheus.HistogramOpts{ Name: name, Namespace: p.prefix, Help: desc, From 9b601cea7a4510444db2895e2e2acf364f9c3731 Mon Sep 17 00:00:00 2001 From: saikalyan-bhagavathula <103252261+saikalyan-bhagavathula@users.noreply.github.com> Date: Mon, 3 Apr 2023 14:33:50 +0530 Subject: [PATCH 299/351] packaging related changes (#5) --- build/config_complete.yaml | 403 ++++++++++++++++++ build/configure.go | 44 ++ build/rules_complete.yaml | 211 +++++++++ build/tracing-deb/script.sh | 6 +- build/tracing-deb/tracing/DEBIAN/conffiles | 4 +- .../etc/systemd/system/tracing-proxy.service | 2 +- .../etc/systemd/system/tracing-proxy.service | 2 +- build/tracing-rpm/script.sh | 6 +- build/tracing-rpm/tracing-proxy.spec | 4 +- config/file_config.go | 2 +- 10 files changed, 671 insertions(+), 13 deletions(-) create mode 100644 build/config_complete.yaml create mode 100644 build/configure.go create mode 100644 build/rules_complete.yaml diff --git a/build/config_complete.yaml b/build/config_complete.yaml new file mode 100644 index 0000000000..d4fa9201f3 --- /dev/null +++ b/build/config_complete.yaml @@ -0,0 +1,403 @@ +######################## +## Trace Proxy Config ## +######################## + +# ListenAddr is the IP and port on which to listen for incoming events. Incoming +# traffic is expected to be HTTP, so if using SSL put something like nginx in +# front to do the TLS Termination. +ListenAddr: 0.0.0.0:8082 + +# GRPCListenAddr is the IP and port on which to listen for incoming events over +# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in +# front to do the TLS Termination. +GRPCListenAddr: 0.0.0.0:9090 + +# PeerListenAddr is the IP and port on which to listen for traffic being +# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL +# put something like nginx in front to do the decryption. Must be different from +# ListenAddr +PeerListenAddr: 0.0.0.0:8083 + +GRPCPeerListenAddr: 0.0.0.0:8084 + +# CompressPeerCommunication determines whether to compress span data +# it forwards to peers. If it costs money to transmit data between different +# instances (e.g. they're spread across AWS availability zones), then you +# almost certainly want compression enabled to reduce your bill. The option to +# disable it is provided as an escape hatch for deployments that value lower CPU +# utilization over data transfer costs. +CompressPeerCommunication: true + +# OpsrampAPI is the URL for the upstream Opsramp API. +OpsrampAPI: "" + +# Dataset you want to use for sampling +Dataset: "ds" + +#Tls Options +UseTls: true +UseTlsInsecure: false + +# LoggingLevel valid options are "debug", "info", "error", and "panic". +LoggingLevel: error + +# SendDelay is a short timer that will be triggered when a trace is complete. +# Trace Proxy will wait for this duration before actually sending the trace. The +# reason for this short delay is to allow for small network delays or clock +# jitters to elapse and any final spans to arrive before actually sending the +# trace. This supports duration strings with supplied units. Set to 0 for +# immediate sends. +SendDelay: 2s + +# BatchTimeout dictates how frequently to send unfulfilled batches. By default +# this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms. +# Eligible for live reload. +BatchTimeout: 1s + +# TraceTimeout is a long timer; it represents the outside boundary of how long +# to wait before sending an incomplete trace. Normally traces are sent when the +# root span arrives. Sometimes the root span never arrives (due to crashes or +# whatever), and this timer will send a trace even without having received the +# root span. If you have particularly long-lived traces you should increase this +# timer. This supports duration strings with supplied units. +TraceTimeout: 60s + +# MaxBatchSize is the number of events to be included in the batch for sending +MaxBatchSize: 500 + +# SendTicker is a short timer; it determines the duration to use to check for traces to send +SendTicker: 100ms + +# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use +# when buffering events that will be forwarded to peers or the upstream API. +UpstreamBufferSize: 1000 +PeerBufferSize: 1000 + +# AddHostMetadataToTrace determines whether to add information about +# the host that Tracing-Proxy is running on to the spans that it processes. +# If enabled, information about the host will be added to each span with the +# prefix `meta.tracing_proxy.`. +# Currently, the only value added is 'meta.tracing_proxy.local_hostname'. +AddHostMetadataToTrace: false + +# EnvironmentCacheTTL is the amount of time a cache entry will live that associates +# an API key with an environment name. +# Cache misses lookup the environment name using OpsRampAPI config value. +# Default is 1 hour ("1h"). +EnvironmentCacheTTL: "1h" + +# QueryAuthToken, if specified, provides a token that must be specified with +# the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed. +# These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and +# are not typically needed in normal operation. +# Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN. +# If left unspecified, the /query endpoints are inaccessible. +# QueryAuthToken: "some-random-value" + +# AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which +# contains text indicating which rule was evaluated that caused the trace to be included. +AddRuleReasonToTrace: true + +# AdditionalErrorFields should be a list of span fields that should be included when logging +# errors that happen during ingestion of events (for example, the span too large error). +# This is primarily useful in trying to track down misbehaving senders in a large installation. +# The fields `dataset`, `apihost`, and `environment` are always included. +# If a field is not present in the span, it will not be present in the error log. +# Default is ["trace.span_id"]. +AdditionalErrorFields: + - trace.span_id + +# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate +# the number of child spans on the trace at the time the sampling decision was made. +# This value is available to the rules-based sampler, making it possible to write rules that +# are dependent upon the number of spans in the trace. +# Default is false. +AddSpanCountToRoot: false + +# CacheOverrunStrategy controls the cache management behavior under memory pressure. +# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, +# which is generally not helpful unless it occurs because of a permanent change in traffic patterns. +# In the "impact" strategy, the items having the most impact on the cache size are +# ejected from the cache earlier than normal but the cache is not resized. +# In all cases, it only applies if MaxAlloc is nonzero. +# Default is "resize" for compatibility but "impact" is recommended for most installations. +CacheOverrunStrategy: "impact" + +######################### +## Proxy Configuration ## +######################### +ProxyConfiguration: + # Protocol accepts http and https + Protocol: "http" + # Host takes the proxy server address + Host: "" + # Port takes the proxy server port + Port: 3128 + # UserName takes the proxy username + Username: "" + # Password takes the proxy password + Password: "" + +################################## +## Authentication Configuration ## +################################## +AuthConfiguration: + # SkipAuth - skips authentication while sending requests (only to be used for debugging) + SkipAuth: false + + # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made + Endpoint: "" + # Key - authentication key provided in OpsRamp Portal + Key: "" + # Secret - authentication Secret provided in OpsRamp Portal + Secret: "" + # TenantId - tenant/client id to which the traces are to be posted + TenantId: "" + +############################ +## Implementation Choices ## +############################ +# Each of the config options below chooses an implementation of a Trace Proxy +# component to use. Depending on the choice, there may be more configuration +# required below in the section for that choice. Changing implementation choices +# requires a process restart. +# Collector describes which collector to use for collecting traces. The only +# current valid option is "InMemCollector". More can be added by adding +# implementations of the Collector interface. +Collector: "InMemCollector" + +# InMemCollector brings together all the settings that are relevant to +# collecting spans together to make traces. +InMemCollector: + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + CacheCapacity: 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. Using 80% is the recommended. + # This value should be set in according to the resources.limits.memory + # By default that setting is 4GB, and this is set to 85% of that limit + # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 + # MaxAlloc: 3435973836 + MaxAlloc: 0 + +##################### +## Peer Management ## +##################### + +# Configure how OpsRamp-Tracing-Proxy peers are discovered and managed +PeerManagement: + # Strategy controls the way that traces are assigned to Trace Proxy nodes. + # The "legacy" strategy uses a simple algorithm that unfortunately causes + # 1/2 of the in-flight traces to be assigned to a different node whenever the + # number of nodes changes. + # The legacy strategy is deprecated and is intended to be removed in a future release. + # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the + # number of nodes) are disrupted when the node count changes. + # Not eligible for live reload. + Strategy: "hash" + + ########################################################### + ###### File (Suitable only for VM based deployments) ###### + ########################################################### + Type: "file" + + # Peers is the list of all servers participating in this proxy cluster. Events + # will be sharded evenly across all peers based on the Trace ID. Values here + # should be the base URL used to access the peer, and should include scheme, + # hostname (or ip address) and port. All servers in the cluster should be in + # this list, including this host. + Peers: [ + "http://127.0.0.1:8084", #only grpc peer listener used + # "http://127.0.0.1:8083", + # "http://10.1.2.3.4:8080", + # "http://tracing-proxy-1231:8080", + # "http://peer-3.fqdn" // assumes port 80 + ] + ########################################################### + + ########################################################### + ###### Redis (Suitable for all types of deployments) ###### + ########################################################### + # The type should always be redis when deployed to Kubernetes environments +# Type: "redis" +# +# # RedisHost is used to connect to redis for peer cluster membership management. +# # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes +# # precedence and this value is ignored. +# # Not eligible for live reload. +# # RedisHost will default to the name used for the release or name overrides depending on what is used, +# # but can be overriden to a specific value. +# RedisHost: 0.0.0.0:22122 +# +# # RedisUsername is the username used to connect to redis for peer cluster membership management. +# # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes +# # precedence and this value is ignored. +# # Not eligible for live reload. +# RedisUsername: "" +# +# # RedisPassword is the password used to connect to redis for peer cluster membership management. +# # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes +# # precedence and this value is ignored. +# # Not eligible for live reload. +# RedisPassword: "" +# +# # RedisPrefix is a string used as a prefix for the keys in redis while storing +# # the peer membership. It might be useful to set this in any situation where +# # multiple trace-proxy clusters or multiple applications want to share a single +# # Redis instance. It may not be blank. +# RedisPrefix: "tracing-proxy" +# +# # RedisDatabase is an integer from 0-15 indicating the database number to use +# # for the Redis instance storing the peer membership. It might be useful to set +# # this in any situation where multiple trace-proxy clusters or multiple +# # applications want to share a single Redis instance. +# RedisDatabase: 0 + + # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + # Not eligible for live reload. + UseTLS: true + + # UseTLSInsecure disables certificate checks + # Not eligible for live reload. + UseTLSInsecure: false + + # IdentifierInterfaceName is optional. + # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. + # When configured the pod's IP will be used in the peer list + IdentifierInterfaceName: eth0 + + # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first + # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + # the first IPV6 unicast address found. + UseIPV6Identifier: false + ########################################################### + +# LogrusLogger is a section of the config only used if you are using the +# LogrusLogger to send all logs to STDOUT using the logrus package. +LogrusLogger: + # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] + LogFormatter: 'logfmt' + # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] + LogOutput: 'file' + + # specifies configs for logs when LogOutput is set to "file" + File: + # FileName specifies the location where the logs are supposed be stored + FileName: "/var/log/opsramp/tracing-proxy.log" + # MaxSize is the maximum size in megabytes of the log file before it gets rotated. + MaxSize: 1 + # MaxBackups is the maximum number of old log files to retain. + MaxBackups: 3 + # Compress determines if the rotated log files should be compressed + # using gzip. + Compress: true + +MetricsConfig: + # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp + Enable: false + + # ListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Trace Proxy + # listener. + ListenAddr: '0.0.0.0:2112' + + # OpsRampAPI is the URL for the upstream OpsRamp API. + OpsRampAPI: "" + + # ReportingInterval is the frequency specified in seconds at which + # the metrics are collected and sent to OpsRamp + ReportingInterval: 10 + + # OpsRampMetricsRetryCount is the number of times we retry incase the send fails + RetryCount: 2 + + # MetricsList is a list of regular expressions which match the metric + # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. + # Internally, all the items in the list are concatenated using '|' to make the computation faster. + MetricsList: [ ".*" ] + +GRPCServerParameters: +# MaxConnectionIdle is a duration for the amount of time after which an +# idle connection would be closed by sending a GoAway. Idleness duration is +# defined since the most recent time the number of outstanding RPCs became +# zero or the connection establishment. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 +# MaxConnectionIdle: "1m" + +# MaxConnectionAge is a duration for the maximum amount of time a +# connection may exist before it will be closed by sending a GoAway. A +# random jitter of +/-10% will be added to MaxConnectionAge to spread out +# connection storms. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 +# MaxConnectionAge: "0s" + +# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after +# which the connection will be forcibly closed. +# 0s sets duration to infinity which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 +# MaxConnectionAgeGrace: "0s" + +# After a duration of this time if the server doesn't see any activity it +# pings the client to see if the transport is still alive. +# If set below 1s, a minimum value of 1s will be used instead. +# 0s sets duration to 2 hours which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 +# Time: "10s" + +# After having pinged for keepalive check, the server waits for a duration +# of Timeout and if no activity is seen even after that the connection is +# closed. +# 0s sets duration to 20 seconds which is the default: +# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 +# Timeout: "2s" + +################################ +## Sample Cache Configuration ## +################################ + +# Sample Cache Configuration controls the sample cache used to retain information about trace +# status after the sampling decision has been made. +SampleCacheConfig: +# Type controls the type of sample cache used. +# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is +# 5x the size of the trace cache. This is Tracing-Proxy's original sample cache strategy. +# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember +# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. +# It is also more configurable. The cuckoo filter is recommended for most installations. +# Default is "legacy". +# Type: "cuckoo" + +# KeptSize controls the number of traces preserved in the cuckoo kept traces cache. +# Tracing-Proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some +# statistical information. This is most useful in cases where the trace was sent before sending +# the root span, so that the root span can be decorated with accurate metadata. +# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). +# It Does not apply to the "legacy" type of cache. +# KeptSize: 10_000 + +# DroppedSize controls the size of the cuckoo dropped traces cache. +# This cache consumes 4-6 bytes per trace at a scale of millions of traces. +# Changing its size with live reload sets a future limit, but does not have an immediate effect. +# Default is 1_000_000 traces. +# It Does not apply to the "legacy" type of cache. +# DroppedSize: 1_000_000 + +# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates +# the remaining capacity of its dropped traces cache and possibly cycles it. +# This cache is quite resilient so it doesn't need to happen very often, but the +# operation is also inexpensive. +# Default is 10 seconds. +# It Does not apply to the "legacy" type of cache. +# SizeCheckInterval: "10s" diff --git a/build/configure.go b/build/configure.go new file mode 100644 index 0000000000..54b259d357 --- /dev/null +++ b/build/configure.go @@ -0,0 +1,44 @@ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" +) + +func main() { + var configFile []byte + var fileContent string + var err error + + configFile, err = ioutil.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml") + + api := flag.String("A", "", "API To Send Data") + key := flag.String("K", "", "Opsramp Key") + secret := flag.String("S", "", "Opsramp Secret") + tenant := flag.String("T", "", "Opsramp TenantID") + metricsAPI := flag.String("M", "", "API To Send Metrics Data") + flag.Parse() + + fileContent = string(configFile) + fileContent = strings.ReplaceAll(fileContent, "", *api) + fileContent = strings.ReplaceAll(fileContent, "", *metricsAPI) + fileContent = strings.ReplaceAll(fileContent, "", *key) + fileContent = strings.ReplaceAll(fileContent, "", *secret) + fileContent = strings.ReplaceAll(fileContent, "", *tenant) + + if err = ioutil.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml", []byte(fileContent), 0666); err != nil { + fmt.Println(err) + os.Exit(1) + } + + if _, err := exec.Command("systemctl", "start", "tracing-proxy").Output(); err != nil { + fmt.Println(err) + os.Exit(1) + } + + fmt.Println("Tracing-Proxy Started Successfully") +} diff --git a/build/rules_complete.yaml b/build/rules_complete.yaml new file mode 100644 index 0000000000..aedb5ba976 --- /dev/null +++ b/build/rules_complete.yaml @@ -0,0 +1,211 @@ +############################ +## Sampling Rules Config ## +############################ + +# DryRun - If enabled, marks traces that would be dropped given current sampling rules, +# and sends all traces regardless +DryRun: false + +# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to trace_proxy_kept +DryRunFieldName: trace_proxy_kept + +# DeterministicSampler is a section of the config for manipulating the +# Deterministic Sampler implementation. This is the simplest sampling algorithm +# - it is a static sample rate, choosing traces randomly to either keep or send +# (at the appropriate rate). It is not influenced by the contents of the trace. +Sampler: DeterministicSampler + +# SampleRate is the rate at which to sample. It indicates a ratio, where one +# sample trace is kept for every n traces seen. For example, a SampleRate of 30 +# will keep 1 out of every 30 traces. The choice on whether to keep any specific +# trace is random, so the rate is approximate. +# Eligible for live reload. +SampleRate: 1 + +#dataset1: +# +# # Note: If your dataset name contains a space, you will have to escape the dataset name +# # using single quotes, such as ['dataset 1'] +# +# # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler +# # implementation. This sampler collects the values of a number of fields from a +# # trace and uses them to form a key. This key is handed to the standard dynamic +# # sampler algorithm which generates a sample rate based on the frequency with +# # which that key has appeared in the previous ClearFrequencySec seconds.This +# # sampler uses the AvgSampleRate algorithm from +# # that package. +# Sampler: DynamicSampler +# +# # SampleRate is the goal rate at which to sample. It indicates a ratio, where +# # one sample trace is kept for every n traces seen. For example, a SampleRate of +# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic +# # sampler, who assigns a sample rate for each trace based on the fields selected +# # from that trace. +# SampleRate: 2 +# +# # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. +# # The combination of values from all of these fields should reflect how interesting the trace is compared to +# # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for +# # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent +# # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of +# # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is +# # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a +# # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can +# # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling +# # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. +# # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of +# # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. +# # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore +# # interesting traces, like traces that experienced a `500`, might not be sampled. +# # Field names may come from any span in the trace. +# FieldList: +# - "" +# +# # UseTraceLength will add the number of spans in the trace in to the dynamic +# # sampler as part of the key. The number of spans is exact, so if there are +# # normally small variations in trace length you may want to leave this off. If +# # traces are consistent lengths and changes in trace length is a useful +# # indicator of traces you'd like to see in OpsRamp, set this to true. +# UseTraceLength: true +# +# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field +# # to the root span of the trace containing the key used by the sampler to decide +# # the sample rate. This can be helpful in understanding why the sampler is +# # making certain decisions about sample rate and help you understand how to +# # better choose the sample rate key (aka the FieldList setting above) to use. +# AddSampleRateKeyToTrace: true +# +# # AddSampleRateKeyToTraceField is the name of the field the sampler will use +# # when adding the sample rate key to the trace. This setting is only used when +# # AddSampleRateKeyToTrace is true. +# AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key +# +# # ClearFrequencySec is the name of the field the sampler will use to determine +# # the period over which it will calculate the sample rate. This setting defaults +# # to 30. +# ClearFrequencySec: 60 +#dataset2: +# +# # EMADynamicSampler is a section of the config for manipulating the Exponential +# # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, +# # it attempts to average a given sample rate, weighting rare traffic and frequent +# # traffic differently so as to end up with the correct average. +# # +# # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended +# # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs +# # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential +# # Moving Average of counts seen per key, and adjusts this average at regular intervals. +# # The weight applied to more recent intervals is defined by `weight`, a number between +# # (0, 1) - larger values weight the average more toward recent observations. In other words, +# # a larger weight will cause sample rates more quickly adapt to traffic patterns, +# # while a smaller weight will result in sample rates that are less sensitive to bursts or drops +# # in traffic and thus more consistent over time. +# # +# # Keys that are not found in the EMA will always have a sample +# # rate of 1. Keys that occur more frequently will be sampled on a logarithmic +# # curve. In other words, every key will be represented at least once in any +# # given window and more frequent keys will have their sample rate +# # increased proportionally to wind up with the goal sample rate. +# Sampler: EMADynamicSampler +# +# # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where +# # one sample trace is kept for every n traces seen. For example, a SampleRate of +# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic +# # sampler, who assigns a sample rate for each trace based on the fields selected +# # from that trace. +# GoalSampleRate: 2 +# +# # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. +# # The combination of values from all of these fields should reflect how interesting the trace is compared to +# # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for +# # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent +# # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of +# # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is +# # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a +# # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can +# # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling +# # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. +# # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of +# # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. +# # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore +# # interesting traces, like traces that experienced a `500`, might not be sampled. +# # Field names may come from any span in the trace. +# FieldList: [] +# +# # UseTraceLength will add the number of spans in the trace in to the dynamic +# # sampler as part of the key. The number of spans is exact, so if there are +# # normally small variations in trace length you may want to leave this off. If +# # traces are consistent lengths and changes in trace length is a useful +# # indicator of traces you'd like to see in Honeycomb, set this to true. +# UseTraceLength: true +# +# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field +# # to the root span of the trace containing the key used by the sampler to decide +# # the sample rate. This can be helpful in understanding why the sampler is +# # making certain decisions about sample rate and help you understand how to +# # better choose the sample rate key (aka the FieldList setting above) to use. +# AddSampleRateKeyToTrace: true +# +# # AddSampleRateKeyToTraceField is the name of the field the sampler will use +# # when adding the sample rate key to the trace. This setting is only used when +# # AddSampleRateKeyToTrace is true. +# AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key +# +# # AdjustmentInterval defines how often (in seconds) we adjust the moving average from +# # recent observations. Default 15s +# AdjustmentInterval: 15 +# +# # Weight is a value between (0, 1) indicating the weighting factor used to adjust +# # the EMA. With larger values, newer data will influence the average more, and older +# # values will be factored out more quickly. In mathematical literature concerning EMA, +# # this is referred to as the `alpha` constant. +# # Default is 0.5 +# Weight: 0.5 +# +# # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. +# # Once MaxKeys is reached, new keys will not be included in the sample rate map, but +# # existing keys will continue to be be counted. You can use this to keep the sample rate +# # map size under control. +# MaxKeys: 0 +# +# # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key +# # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to +# # decide what constitutes "zero". Keys with averages below this threshold will be removed +# # from the EMA. Default is the same as Weight, as this prevents a key with the smallest +# # integer value (1) from being aged out immediately. This value should generally be <= Weight, +# # unless you have very specific reasons to set it higher. +# AgeOutValue: 0.5 +# +# # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define +# # the burst detection threshold. If total counts observed for a given interval exceed the threshold +# # EMA is updated immediately, rather than waiting on the AdjustmentInterval. +# # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, +# # burst detection will kick in. +# BurstMultiple: 2 +# +# # BurstDetectionDelay indicates the number of intervals to run after Start is called before +# # burst detection kicks in. +# # Defaults to 3 +# BurstDetectionDelay: 3 +#dataset3: +# Sampler: DeterministicSampler +# SampleRate: 10 +#dataset4: +# Sampler: RulesBasedSampler +# CheckNestedFields: false +# rule: +# # Rule name +# - name: "" +# # Drop Condition (examples: true, false) +# drop: +# condition: +# # Field Name (example: status_code) +# - field: "" +# # Operator Value (example: =) +# operator: "" +# # Field Value (example: 500) +# value: "" +#dataset5: +# Sampler: TotalThroughputSampler +# GoalThroughputPerSec: 100 +# FieldList: '' diff --git a/build/tracing-deb/script.sh b/build/tracing-deb/script.sh index 5a97cf8371..6785673e43 100644 --- a/build/tracing-deb/script.sh +++ b/build/tracing-deb/script.sh @@ -13,11 +13,11 @@ sed -i "/^Architecture/s/:.*$/: ${architecture}/g" tracing/DEBIAN/control # Updating the files mkdir -p tracing/opt/opsramp/tracing-proxy/bin mkdir -p tracing/opt/opsramp/tracing-proxy/conf -cp config_complete.toml tracing/opt/opsramp/tracing-proxy/conf/config_complete.toml -cp rules_complete.toml tracing/opt/opsramp/tracing-proxy/conf/rules_complete.toml +cp ../config_complete.yaml tracing/opt/opsramp/tracing-proxy/conf/config_complete.yaml +cp ../rules_complete.yaml tracing/opt/opsramp/tracing-proxy/conf/rules_complete.yaml go build -o ../../cmd/tracing-proxy/main ../../cmd/tracing-proxy/main.go cp ../../cmd/tracing-proxy/main tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy -go build configure.go +go build ../configure.go cp configure tracing/opt/opsramp/tracing-proxy/bin/configure dpkg -b tracing diff --git a/build/tracing-deb/tracing/DEBIAN/conffiles b/build/tracing-deb/tracing/DEBIAN/conffiles index aee0592e98..2efffe9130 100644 --- a/build/tracing-deb/tracing/DEBIAN/conffiles +++ b/build/tracing-deb/tracing/DEBIAN/conffiles @@ -1,2 +1,2 @@ -/opt/opsramp/tracing-proxy/conf/config_complete.toml -/opt/opsramp/tracing-proxy/conf/rules_complete.toml +/opt/opsramp/tracing-proxy/conf/config_complete.yaml +/opt/opsramp/tracing-proxy/conf/rules_complete.yaml diff --git a/build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service b/build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service index c233840ab3..9225670d0a 100644 --- a/build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service +++ b/build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service @@ -3,7 +3,7 @@ Description=tracing-proxy OpsRamp Trace-Aware Sampling Proxy After=network.target [Service] -ExecStart=/opt/opsramp/tracing-proxy/bin/tracing-proxy -c /opt/opsramp/tracing-proxy/conf/config_complete.toml -r /opt/opsramp/tracing-proxy/conf/rules_complete.toml +ExecStart=/opt/opsramp/tracing-proxy/bin/tracing-proxy -c /opt/opsramp/tracing-proxy/conf/config_complete.yaml -r /opt/opsramp/tracing-proxy/conf/rules_complete.yaml KillMode=process Restart=on-failure LimitNOFILE=infinity diff --git a/build/tracing-rpm/etc/systemd/system/tracing-proxy.service b/build/tracing-rpm/etc/systemd/system/tracing-proxy.service index c233840ab3..9225670d0a 100644 --- a/build/tracing-rpm/etc/systemd/system/tracing-proxy.service +++ b/build/tracing-rpm/etc/systemd/system/tracing-proxy.service @@ -3,7 +3,7 @@ Description=tracing-proxy OpsRamp Trace-Aware Sampling Proxy After=network.target [Service] -ExecStart=/opt/opsramp/tracing-proxy/bin/tracing-proxy -c /opt/opsramp/tracing-proxy/conf/config_complete.toml -r /opt/opsramp/tracing-proxy/conf/rules_complete.toml +ExecStart=/opt/opsramp/tracing-proxy/bin/tracing-proxy -c /opt/opsramp/tracing-proxy/conf/config_complete.yaml -r /opt/opsramp/tracing-proxy/conf/rules_complete.yaml KillMode=process Restart=on-failure LimitNOFILE=infinity diff --git a/build/tracing-rpm/script.sh b/build/tracing-rpm/script.sh index 603002859d..72b45bf3ab 100644 --- a/build/tracing-rpm/script.sh +++ b/build/tracing-rpm/script.sh @@ -11,10 +11,10 @@ sed -i "/^\%define version/s/^.*$/\%define version ${Version}/g" tracing-pro # Updating the files mkdir -p opt/opsramp/tracing-proxy/conf mkdir -p opt/opsramp/tracing-proxy/bin -cp .config_complete.toml opt/opsramp/tracing-proxy/conf/config_complete.toml -cp rules_complete.toml opt/opsramp/tracing-proxy/conf/rules_complete.toml +cp ../config_complete.yaml opt/opsramp/tracing-proxy/conf/config_complete.yaml +cp ../rules_complete.yaml opt/opsramp/tracing-proxy/conf/rules_complete.yaml go build -o ../../cmd/tracing-proxy/main ../../cmd/tracing-proxy/main.go -go build configure.go +go build ../configure.go cp ../../cmd/tracing-proxy/main opt/opsramp/tracing-proxy/bin/tracing-proxy cp configure opt/opsramp/tracing-proxy/bin/configure diff --git a/build/tracing-rpm/tracing-proxy.spec b/build/tracing-rpm/tracing-proxy.spec index 1d7f6cf50f..2805088ce4 100644 --- a/build/tracing-rpm/tracing-proxy.spec +++ b/build/tracing-rpm/tracing-proxy.spec @@ -26,8 +26,8 @@ install -p -d -m 0755 %{buildroot}/opt/opsramp/tracing-proxy/conf install -p -d -m 0755 %{buildroot}/etc/systemd/system install -m 0775 opt/opsramp/tracing-proxy/bin/tracing-proxy %{buildroot}/opt/opsramp/tracing-proxy/bin/ install -m 0775 opt/opsramp/tracing-proxy/bin/configure %{buildroot}/opt/opsramp/tracing-proxy/bin -install -m 0644 opt/opsramp/tracing-proxy/conf/config_complete.toml %{buildroot}/opt/opsramp/tracing-proxy/conf/ -install -m 0644 opt/opsramp/tracing-proxy/conf/rules_complete.toml %{buildroot}/opt/opsramp/tracing-proxy/conf/ +install -m 0644 opt/opsramp/tracing-proxy/conf/config_complete.yaml %{buildroot}/opt/opsramp/tracing-proxy/conf/ +install -m 0644 opt/opsramp/tracing-proxy/conf/rules_complete.yaml %{buildroot}/opt/opsramp/tracing-proxy/conf/ install -m 0644 etc/systemd/system/tracing-proxy.service %{buildroot}/etc/systemd/system %clean diff --git a/config/file_config.go b/config/file_config.go index 7135471556..e6cc7084d8 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -110,7 +110,7 @@ type LogrusLoggerConfig struct { } type MetricsConfig struct { - Enable bool `validate:"required"` + Enable bool ListenAddr string `validate:"required"` OpsRampAPI string ReportingInterval int64 From 07e74f1833096c6fcfeba404c971c93a370ad0da Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 13 Apr 2023 10:54:44 +0530 Subject: [PATCH 300/351] * adding support for retry backoff * adding availability status * bumping up versions for deps with vulnerabilities --- cmd/tracing-proxy/main.go | 46 ++++++++++++++++++++++++++++----------- config/config.go | 2 ++ config/file_config.go | 28 ++++++++++++++++++++++++ config_complete.yaml | 22 +++++++++++++++++-- docker-compose.yml | 10 ++++----- go.mod | 8 +++---- go.sum | 16 +++++++------- internal/peer/redis.go | 8 +++++++ route/route.go | 4 ---- 9 files changed, 107 insertions(+), 37 deletions(-) diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index c0660c53df..485ef17ffe 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -103,16 +103,6 @@ func main() { os.Exit(1) } - ctx, cancel := context.WithTimeout(context.Background(), c.GetPeerTimeout()) - defer cancel() - done := make(chan struct{}) - peers, err := peer.NewPeers(ctx, c, done) - - if err != nil { - fmt.Printf("unable to load peers: %+v\n", err) - os.Exit(1) - } - // upstreamTransport is the http transport used to send things on to OpsRamp upstreamTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, @@ -135,10 +125,15 @@ func main() { peerMetricsConfig := metrics.GetMetricsImplementation("libtrace_peer") authConfig := c.GetAuthConfig() - opsrampapi, err := c.GetOpsrampAPI() + opsrampAPI, err := c.GetOpsrampAPI() + if err != nil { + logrusLogger.Fatal(err) + } + dataset, err := c.GetDataset() if err != nil { logrusLogger.Fatal(err) } + retryConfig := c.GetRetryConfig() userAgentAddition := "tracing-proxy/" + version upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{ @@ -157,8 +152,16 @@ func main() { AuthTokenEndpoint: authConfig.Endpoint, AuthTokenKey: authConfig.Key, AuthTokenSecret: authConfig.Secret, - ApiHost: opsrampapi, + ApiHost: opsrampAPI, TenantId: authConfig.TenantId, + Dataset: dataset, + RetrySettings: &transmission.RetrySettings{ + InitialInterval: retryConfig.InitialInterval, + RandomizationFactor: retryConfig.RandomizationFactor, + Multiplier: retryConfig.Multiplier, + MaxInterval: retryConfig.MaxInterval, + MaxElapsedTime: retryConfig.MaxElapsedTime, + }, }, }) if err != nil { @@ -180,8 +183,16 @@ func main() { AuthTokenEndpoint: authConfig.Endpoint, AuthTokenKey: authConfig.Key, AuthTokenSecret: authConfig.Secret, - ApiHost: opsrampapi, + ApiHost: opsrampAPI, TenantId: authConfig.TenantId, + Dataset: dataset, + RetrySettings: &transmission.RetrySettings{ + InitialInterval: retryConfig.InitialInterval, + RandomizationFactor: retryConfig.RandomizationFactor, + Multiplier: retryConfig.Multiplier, + MaxInterval: retryConfig.MaxInterval, + MaxElapsedTime: retryConfig.MaxElapsedTime, + }, }, }) if err != nil { @@ -189,6 +200,15 @@ func main() { os.Exit(1) } + ctx, cancel := context.WithTimeout(context.Background(), c.GetPeerTimeout()) + defer cancel() + done := make(chan struct{}) + peers, err := peer.NewPeers(ctx, c, done) + if err != nil { + fmt.Printf("unable to load peers: %+v\n", err) + os.Exit(1) + } + var g inject.Graph err = g.Provide( &inject.Object{Value: c}, diff --git a/config/config.go b/config/config.go index 702fe40df6..e5789ac4a4 100644 --- a/config/config.go +++ b/config/config.go @@ -159,6 +159,8 @@ type Config interface { // GetAuthConfig return the authentication configuration GetAuthConfig() AuthConfiguration + GetRetryConfig() *RetryConfiguration + GetTenantId() (string, error) GetDataset() (string, error) diff --git a/config/file_config.go b/config/file_config.go index e6cc7084d8..5b3d5a8dcd 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/opsramp/libtrace-go/transmission" "io" "net" "net/url" @@ -74,6 +75,15 @@ type configContents struct { ProxyConfiguration AuthConfiguration MetricsConfig + RetryConfiguration *RetryConfiguration +} + +type RetryConfiguration struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + MaxElapsedTime time.Duration } type ProxyConfiguration struct { @@ -583,6 +593,24 @@ func (f *fileConfig) GetAuthConfig() AuthConfiguration { return f.conf.AuthConfiguration } +func (f *fileConfig) GetRetryConfig() *RetryConfiguration { + f.mux.RLock() + defer f.mux.RUnlock() + + if f.conf.RetryConfiguration == nil { + defaultConfig := transmission.NewDefaultRetrySettings() + return &RetryConfiguration{ + InitialInterval: defaultConfig.InitialInterval, + RandomizationFactor: defaultConfig.RandomizationFactor, + Multiplier: defaultConfig.Multiplier, + MaxInterval: defaultConfig.MaxInterval, + MaxElapsedTime: defaultConfig.MaxElapsedTime, + } + } + + return f.conf.RetryConfiguration +} + func (f *fileConfig) GetDataset() (string, error) { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config_complete.yaml b/config_complete.yaml index 11de94b45f..7fa64065c7 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -123,6 +123,24 @@ AddSpanCountToRoot: false # Default is "resize" for compatibility but "impact" is recommended for most installations. CacheOverrunStrategy: "impact" +######################### +## Retry Configuration ## +######################### +RetryConfiguration: + # InitialInterval the time to wait after the first failure before retrying. + InitialInterval: 500ms + # RandomizationFactor is a random factor used to calculate next backoff + # Randomized interval = RetryInterval * (1 ± RandomizationFactor) + RandomizationFactor: 0.5 + # Multiplier is the value multiplied by the backoff interval bounds + Multiplier: 1.5 + # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between + # consecutive retries will always be `MaxInterval`. + MaxInterval: 60s + # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. + # Once this value is reached, the data is discarded. + MaxElapsedTime: 15m + ######################### ## Proxy Configuration ## ######################### @@ -237,7 +255,7 @@ PeerManagement: # Not eligible for live reload. # RedisHost will default to the name used for the release or name overrides depending on what is used, # but can be overriden to a specific value. - RedisHost: 0.0.0.0:22122 + RedisHost: localhost:6379 # RedisUsername is the username used to connect to redis for peer cluster membership management. # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes @@ -274,7 +292,7 @@ PeerManagement: # IdentifierInterfaceName is optional. # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. # When configured the pod's IP will be used in the peer list - IdentifierInterfaceName: eth0 + # IdentifierInterfaceName: eth0 # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use diff --git a/docker-compose.yml b/docker-compose.yml index c0f1033c6e..03f217764c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,19 +2,17 @@ version: '3.8' services: redis: - hostname: redis - image: dynomitedb/redis - expose: - - "22122" + image: redis + ports: + - "6379:6379" profiles: - redis - all redis-commander: image: rediscommander/redis-commander:latest - restart: always environment: - - REDIS_HOSTS=local:redis:22122 + - REDIS_HOSTS=local:redis:6379 ports: - "4042:8081" depends_on: diff --git a/go.mod b/go.mod index 1cd12b0c43..808e0d1d8e 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0 - github.com/opsramp/libtrace-go v0.0.0-20230330140347-a09e0a6b627f + github.com/opsramp/libtrace-go v0.0.0-20230413051817-f2f9e8a750b4 github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 @@ -68,9 +68,9 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/net v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/alexcesaro/statsd.v2 v2.0.0 // indirect diff --git a/go.sum b/go.sum index 37ca30a94d..cc871a1eae 100644 --- a/go.sum +++ b/go.sum @@ -581,8 +581,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0 h1:qj7a1B/GFWxFVWvpzTV2V0bbxGNFI8bGM+ElTxwJP20= github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230330140347-a09e0a6b627f h1:UxD+NprBlcKATAwSSWlfHWYeDXQZruMdGFx9EUFif3w= -github.com/opsramp/libtrace-go v0.0.0-20230330140347-a09e0a6b627f/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/libtrace-go v0.0.0-20230413051817-f2f9e8a750b4 h1:Kd2iNoQJGPSXMN/2mzhe1qHBcpmPgKFPbXEVfFmPx08= +github.com/opsramp/libtrace-go v0.0.0-20230413051817-f2f9e8a750b4/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -755,8 +755,8 @@ golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -865,8 +865,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -881,8 +881,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/internal/peer/redis.go b/internal/peer/redis.go index a530c0650b..3531793ab6 100644 --- a/internal/peer/redis.go +++ b/internal/peer/redis.go @@ -5,6 +5,7 @@ import ( "crypto/tls" "errors" "fmt" + "github.com/opsramp/libtrace-go/transmission" "net" "os" "sort" @@ -101,6 +102,10 @@ func newRedisPeers(ctx context.Context, c config.Config, done chan struct{}) (Pe } // register myself once + for !transmission.DefaultAvailability.Status() { + logrus.Info("peer is not available yet") + time.Sleep(5 * time.Second) + } err = peers.store.Register(ctx, address, peerEntryTimeout) if err != nil { logrus.WithError(err).Errorf("failed to register self with redis peer store") @@ -141,6 +146,9 @@ func (p *redisPeers) registerSelf(done chan struct{}) { for { select { case <-tk.C: + if !transmission.DefaultAvailability.Status() { + continue + } ctx, cancel := context.WithTimeout(context.Background(), p.c.GetPeerTimeout()) // every interval, insert a timeout record. we ignore the error // here since Register() logs the error for us. diff --git a/route/route.go b/route/route.go index 294b187f94..d6d75e524a 100644 --- a/route/route.go +++ b/route/route.go @@ -113,10 +113,6 @@ func (r *Router) SetVersion(ver string) { r.versionStr = ver } -type server struct { - proxypb.TraceProxyServiceServer -} - // LnS spins up the Listen and Serve portion of the router. A router is // initialized as being for either incoming traffic from clients or traffic from // a peer. They listen on different addresses so peer traffic can be From c64d8716dd955cd70f74fafdd96274a175a8f251 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 13 Apr 2023 11:33:01 +0530 Subject: [PATCH 301/351] removing retry for metrics --- config/file_config.go | 5 ----- config_complete.yaml | 3 --- metrics/opsramp.go | 15 ++------------- 3 files changed, 2 insertions(+), 21 deletions(-) diff --git a/config/file_config.go b/config/file_config.go index 5b3d5a8dcd..8f7664fcb2 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -124,7 +124,6 @@ type MetricsConfig struct { ListenAddr string `validate:"required"` OpsRampAPI string ReportingInterval int64 - RetryCount int64 MetricsList []string } @@ -210,7 +209,6 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("MetricsConfig.Enable", false) c.SetDefault("MetricsConfig.ListenAddr", "0.0.0.0:2112") c.SetDefault("MetricsConfig.ReportingInterval", 10) - c.SetDefault("MetricsConfig.RetryCount", 2) c.SetDefault("MetricsConfig.MetricsList", []string{".*"}) c.SetConfigFile(config) @@ -323,9 +321,6 @@ func (f *fileConfig) validateGeneralConfigs() error { // validate metrics config metricsConfig := f.GetMetricsConfig() - if metricsConfig.RetryCount < 0 || metricsConfig.RetryCount > 10 { - return fmt.Errorf("metrics retry count %d invalid, must be in range 1-10", metricsConfig.RetryCount) - } if metricsConfig.ReportingInterval < 10 { return fmt.Errorf("mertics reporting interval %d not allowed, must be >= 10", metricsConfig.ReportingInterval) } diff --git a/config_complete.yaml b/config_complete.yaml index 7fa64065c7..615bf1682c 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -336,9 +336,6 @@ MetricsConfig: # the metrics are collected and sent to OpsRamp ReportingInterval: 10 - # OpsRampMetricsRetryCount is the number of times we retry incase the send fails - RetryCount: 2 - # MetricsList is a list of regular expressions which match the metric # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. # Internally, all the items in the list are concatenated using '|' to make the computation faster. diff --git a/metrics/opsramp.go b/metrics/opsramp.go index d6e8fa7c57..674564eb57 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -53,7 +53,6 @@ type OpsRampMetrics struct { apiEndpoint string tenantID string - retryCount int64 re *regexp.Regexp prefix string @@ -317,7 +316,6 @@ func (p *OpsRampMetrics) Populate() { proxyConfig := p.Config.GetProxyConfig() p.apiEndpoint = metricsConfig.OpsRampAPI - p.retryCount = metricsConfig.RetryCount p.authTokenEndpoint = authConfig.Endpoint p.apiKey = authConfig.Key @@ -544,7 +542,7 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken)) - resp, err := p.SendWithRetry(req) + resp, err := p.Send(req) if err != nil { return -1, err } @@ -596,7 +594,7 @@ func (p *OpsRampMetrics) RenewOAuthToken() error { return nil } -func (p *OpsRampMetrics) SendWithRetry(request *http.Request) (*http.Response, error) { +func (p *OpsRampMetrics) Send(request *http.Request) (*http.Response, error) { response, err := p.Client.Do(request) if err == nil && response != nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { return response, nil @@ -604,19 +602,10 @@ func (p *OpsRampMetrics) SendWithRetry(request *http.Request) (*http.Response, e if response != nil && response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token p.RenewOAuthToken() request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken)) - } - - // retry if the error is not nil - for retries := p.retryCount; retries > 0; retries-- { response, err = p.Client.Do(request) if err == nil && response != nil && (response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted) { return response, nil } - if response != nil && response.StatusCode == http.StatusProxyAuthRequired { // OpsRamp uses this for bad auth token - p.RenewOAuthToken() - request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.oAuthToken.AccessToken)) - } } - return response, err } From 649e96f83d5a30f3ac0adca9e77e33b55098fb7e Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 13 Apr 2023 15:44:23 +0530 Subject: [PATCH 302/351] add job prefix label to prevent conflict of promhttp_metric_handler_errors metrics from differnt gatherers --- cmd/tracing-proxy/main.go | 4 +- metrics/opsramp.go | 118 ++++++++++++++++++++++++++------------ 2 files changed, 82 insertions(+), 40 deletions(-) diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 485ef17ffe..78960707bc 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -121,8 +121,8 @@ func main() { TLSHandshakeTimeout: 1200 * time.Millisecond, } - upstreamMetricsConfig := metrics.GetMetricsImplementation("libtrace_upstream") - peerMetricsConfig := metrics.GetMetricsImplementation("libtrace_peer") + upstreamMetricsConfig := metrics.GetMetricsImplementation("upstream") + peerMetricsConfig := metrics.GetMetricsImplementation("peer") authConfig := c.GetAuthConfig() opsrampAPI, err := c.GetOpsrampAPI() diff --git a/metrics/opsramp.go b/metrics/opsramp.go index 674564eb57..259c622787 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -122,7 +122,7 @@ func (p *OpsRampMetrics) Start() error { } for range metricsTicker.C { - statusCode, err := p.PushMetrics() + statusCode, err := p.Push() if err != nil { p.Logger.Error().Logf("error while pushing metrics with statusCode: %d and Error: %v", statusCode, err) if err.Error() == missingMetricsWriteScope { @@ -201,9 +201,9 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s if exists { return } - hostMap := make(map[string]string) + constantLabels := make(map[string]string) if hostname, err := os.Hostname(); err == nil && hostname != "" { - hostMap["hostname"] = hostname + constantLabels["hostname"] = hostname } switch metricType { @@ -212,7 +212,7 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s Name: name, Namespace: p.prefix, Help: desc, - ConstLabels: hostMap, + ConstLabels: constantLabels, }, labels) case "gauge": newMetric = promauto.With(p.promRegistry).NewGaugeVec( @@ -220,7 +220,7 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s Name: name, Namespace: p.prefix, Help: desc, - ConstLabels: hostMap, + ConstLabels: constantLabels, }, labels) case "histogram": @@ -228,12 +228,11 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s Name: name, Namespace: p.prefix, Help: desc, - ConstLabels: hostMap, + ConstLabels: constantLabels, // This is an attempt at a usable set of buckets for a wide range of metrics // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous Buckets: prometheus.ExponentialBuckets(1, 4, 16), }, labels) - } p.metrics[name] = newMetric @@ -310,7 +309,6 @@ type OpsRampAuthTokenResponse struct { } func (p *OpsRampMetrics) Populate() { - metricsConfig := p.Config.GetMetricsConfig() authConfig := p.Config.GetAuthConfig() proxyConfig := p.Config.GetProxyConfig() @@ -332,11 +330,11 @@ func (p *OpsRampMetrics) Populate() { } p.re = regexp.MustCompile(regexString) - proxyUrl := "" + proxyURL := "" if proxyConfig.Host != "" && proxyConfig.Protocol != "" { - proxyUrl = fmt.Sprintf("%s://%s:%d/", proxyConfig.Protocol, proxyConfig.Host, proxyConfig.Port) + proxyURL = fmt.Sprintf("%s://%s:%d/", proxyConfig.Protocol, proxyConfig.Host, proxyConfig.Port) if proxyConfig.Username != "" && proxyConfig.Password != "" { - proxyUrl = fmt.Sprintf("%s://%s:%s@%s:%d", proxyConfig.Protocol, proxyConfig.Username, proxyConfig.Password, proxyConfig.Host, proxyConfig.Port) + proxyURL = fmt.Sprintf("%s://%s:%s@%s:%d", proxyConfig.Protocol, proxyConfig.Username, proxyConfig.Password, proxyConfig.Host, proxyConfig.Port) p.Logger.Debug().Logf("Using Authentication for ProxyConfiguration Communication for Metrics") } } @@ -350,8 +348,8 @@ func (p *OpsRampMetrics) Populate() { }, Timeout: time.Duration(240) * time.Second, } - if proxyUrl != "" { - proxyURL, err := url.Parse(proxyUrl) + if proxyURL != "" { + proxyURL, err := url.Parse(proxyURL) if err != nil { p.Logger.Error().Logf("skipping proxy err: %v", err) } else { @@ -368,7 +366,7 @@ func (p *OpsRampMetrics) Populate() { } } -func (p *OpsRampMetrics) PushMetrics() (int, error) { +func (p *OpsRampMetrics) Push() (int, error) { metricFamilySlice, err := p.promRegistry.Gather() if err != nil { return -1, err @@ -394,10 +392,16 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { switch metricFamily.GetType() { case io_prometheus_client.MetricType_COUNTER: timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, prompb.Label{ - Name: model.MetricNameLabel, - Value: metricFamily.GetName(), - }), + Labels: append(labels, []prompb.Label{ + { + Name: model.MetricNameLabel, + Value: metricFamily.GetName(), + }, + { + Name: model.JobLabel, + Value: p.prefix, + }, + }...), Samples: []prompb.Sample{ { Value: metric.GetCounter().GetValue(), @@ -407,10 +411,16 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { }) case io_prometheus_client.MetricType_GAUGE: timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, prompb.Label{ - Name: model.MetricNameLabel, - Value: metricFamily.GetName(), - }), + Labels: append(labels, []prompb.Label{ + { + Name: model.MetricNameLabel, + Value: metricFamily.GetName(), + }, + { + Name: model.JobLabel, + Value: p.prefix, + }, + }...), Samples: []prompb.Sample{ { Value: metric.GetGauge().GetValue(), @@ -427,6 +437,10 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { Name: model.MetricNameLabel, Value: metricFamily.GetName(), }, + { + Name: model.JobLabel, + Value: p.prefix, + }, { Name: model.BucketLabel, Value: fmt.Sprintf("%v", bucket.GetUpperBound()), @@ -442,10 +456,16 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { } // samples for count and sum timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, prompb.Label{ - Name: model.MetricNameLabel, - Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), - }), + Labels: append(labels, []prompb.Label{ + { + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), + }, + { + Name: model.JobLabel, + Value: p.prefix, + }, + }...), Samples: []prompb.Sample{ { Value: metric.GetHistogram().GetSampleSum(), @@ -454,10 +474,16 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { }, }) timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, prompb.Label{ - Name: model.MetricNameLabel, - Value: fmt.Sprintf("%s_count", metricFamily.GetName()), - }), + Labels: append(labels, []prompb.Label{ + { + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_count", metricFamily.GetName()), + }, + { + Name: model.JobLabel, + Value: p.prefix, + }, + }...), Samples: []prompb.Sample{ { Value: float64(metric.GetHistogram().GetSampleCount()), @@ -474,6 +500,10 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { Name: model.MetricNameLabel, Value: metricFamily.GetName(), }, + { + Name: model.JobLabel, + Value: p.prefix, + }, { Name: model.QuantileLabel, Value: fmt.Sprintf("%v", quantile.GetQuantile()), @@ -489,10 +519,16 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { } // samples for count and sum timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, prompb.Label{ - Name: model.MetricNameLabel, - Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), - }), + Labels: append(labels, []prompb.Label{ + { + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), + }, + { + Name: model.JobLabel, + Value: p.prefix, + }, + }...), Samples: []prompb.Sample{ { Value: metric.GetSummary().GetSampleSum(), @@ -501,10 +537,16 @@ func (p *OpsRampMetrics) PushMetrics() (int, error) { }, }) timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, prompb.Label{ - Name: model.MetricNameLabel, - Value: fmt.Sprintf("%s_count", metricFamily.GetName()), - }), + Labels: append(labels, []prompb.Label{ + { + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_count", metricFamily.GetName()), + }, + { + Name: model.JobLabel, + Value: p.prefix, + }, + }...), Samples: []prompb.Sample{ { Value: float64(metric.GetSummary().GetSampleCount()), From 9bc18d269f4778a28cb8362a84bf9bd31bc4987a Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 13 Apr 2023 19:36:39 +0530 Subject: [PATCH 303/351] updating helm values.yaml with latest config_complete.yaml content --- build/opsramp-tracing-proxy/values.yaml | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/opsramp-tracing-proxy/values.yaml index f0430db3d1..a73dab8436 100644 --- a/build/opsramp-tracing-proxy/values.yaml +++ b/build/opsramp-tracing-proxy/values.yaml @@ -156,6 +156,24 @@ config: # Default is "resize" for compatibility but "impact" is recommended for most installations. CacheOverrunStrategy: "impact" + ######################### + ## Retry Configuration ## + ######################### + RetryConfiguration: + # InitialInterval the time to wait after the first failure before retrying. + InitialInterval: 500ms + # RandomizationFactor is a random factor used to calculate next backoff + # Randomized interval = RetryInterval * (1 ± RandomizationFactor) + RandomizationFactor: 0.5 + # Multiplier is the value multiplied by the backoff interval bounds + Multiplier: 1.5 + # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between + # consecutive retries will always be `MaxInterval`. + MaxInterval: 60s + # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. + # Once this value is reached, the data is discarded. + MaxElapsedTime: 15m + ######################### ## Proxy Configuration ## ######################### @@ -320,9 +338,6 @@ config: # the metrics are collected and sent to OpsRamp ReportingInterval: 10 - # OpsRampMetricsRetryCount is the number of times we retry incase the send fails - RetryCount: 2 - # MetricsList is a list of regular expressions which match the metric # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. # Internally, all the items in the list are concatenated using '|' to make the computation faster. From 6236fdbe992b31eb77c098e74b70b21a443c281e Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Fri, 14 Apr 2023 11:24:16 +0530 Subject: [PATCH 304/351] adding hostname and prefix label to all metrics being exporter out to opsramp --- metrics/opsramp.go | 158 ++++++++++++++++----------------------------- 1 file changed, 55 insertions(+), 103 deletions(-) diff --git a/metrics/opsramp.go b/metrics/opsramp.go index 259c622787..99133a4b58 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -33,12 +33,15 @@ const ( ) var ( - muxer *mux.Router - server *http.Server + muxer *mux.Router + server *http.Server + hostname string ) func init() { muxer = mux.NewRouter() + + hostname, _ = os.Hostname() } type OpsRampMetrics struct { @@ -153,26 +156,18 @@ func (p *OpsRampMetrics) Register(name string, metricType string) { return } - constantLabels := make(map[string]string) - - if hostname, err := os.Hostname(); err == nil && hostname != "" { - constantLabels["hostname"] = hostname - } - switch metricType { case "counter": newMetric = promauto.With(p.promRegistry).NewCounter(prometheus.CounterOpts{ - Name: name, - Namespace: p.prefix, - Help: name, - ConstLabels: constantLabels, + Name: name, + Namespace: p.prefix, + Help: name, }) case "gauge": newMetric = promauto.With(p.promRegistry).NewGauge(prometheus.GaugeOpts{ - Name: name, - Namespace: p.prefix, - Help: name, - ConstLabels: constantLabels, + Name: name, + Namespace: p.prefix, + Help: name, }) case "histogram": newMetric = promauto.With(p.promRegistry).NewHistogram(prometheus.HistogramOpts{ @@ -181,8 +176,7 @@ func (p *OpsRampMetrics) Register(name string, metricType string) { Help: name, // This is an attempt at a usable set of buckets for a wide range of metrics // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous - Buckets: prometheus.ExponentialBuckets(1, 4, 16), - ConstLabels: constantLabels, + Buckets: prometheus.ExponentialBuckets(1, 4, 16), }) } @@ -201,34 +195,27 @@ func (p *OpsRampMetrics) RegisterWithDescriptionLabels(name string, metricType s if exists { return } - constantLabels := make(map[string]string) - if hostname, err := os.Hostname(); err == nil && hostname != "" { - constantLabels["hostname"] = hostname - } switch metricType { case "counter": newMetric = promauto.With(p.promRegistry).NewCounterVec(prometheus.CounterOpts{ - Name: name, - Namespace: p.prefix, - Help: desc, - ConstLabels: constantLabels, + Name: name, + Namespace: p.prefix, + Help: desc, }, labels) case "gauge": newMetric = promauto.With(p.promRegistry).NewGaugeVec( prometheus.GaugeOpts{ - Name: name, - Namespace: p.prefix, - Help: desc, - ConstLabels: constantLabels, + Name: name, + Namespace: p.prefix, + Help: desc, }, labels) case "histogram": newMetric = promauto.With(p.promRegistry).NewHistogramVec(prometheus.HistogramOpts{ - Name: name, - Namespace: p.prefix, - Help: desc, - ConstLabels: constantLabels, + Name: name, + Namespace: p.prefix, + Help: desc, // This is an attempt at a usable set of buckets for a wide range of metrics // 16 buckets, first upper bound of 1, each following upper bound is 4x the previous Buckets: prometheus.ExponentialBuckets(1, 4, 16), @@ -381,7 +368,16 @@ func (p *OpsRampMetrics) Push() (int, error) { continue } for _, metric := range metricFamily.GetMetric() { - var labels []prompb.Label + labels := []prompb.Label{ + { + Name: model.JobLabel, + Value: p.prefix, + }, + { + Name: "hostname", + Value: hostname, + }, + } for _, label := range metric.GetLabel() { labels = append(labels, prompb.Label{ Name: label.GetName(), @@ -392,16 +388,10 @@ func (p *OpsRampMetrics) Push() (int, error) { switch metricFamily.GetType() { case io_prometheus_client.MetricType_COUNTER: timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, []prompb.Label{ - { - Name: model.MetricNameLabel, - Value: metricFamily.GetName(), - }, - { - Name: model.JobLabel, - Value: p.prefix, - }, - }...), + Labels: append(labels, prompb.Label{ + Name: model.MetricNameLabel, + Value: metricFamily.GetName(), + }), Samples: []prompb.Sample{ { Value: metric.GetCounter().GetValue(), @@ -411,16 +401,10 @@ func (p *OpsRampMetrics) Push() (int, error) { }) case io_prometheus_client.MetricType_GAUGE: timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, []prompb.Label{ - { - Name: model.MetricNameLabel, - Value: metricFamily.GetName(), - }, - { - Name: model.JobLabel, - Value: p.prefix, - }, - }...), + Labels: append(labels, prompb.Label{ + Name: model.MetricNameLabel, + Value: metricFamily.GetName(), + }), Samples: []prompb.Sample{ { Value: metric.GetGauge().GetValue(), @@ -437,10 +421,6 @@ func (p *OpsRampMetrics) Push() (int, error) { Name: model.MetricNameLabel, Value: metricFamily.GetName(), }, - { - Name: model.JobLabel, - Value: p.prefix, - }, { Name: model.BucketLabel, Value: fmt.Sprintf("%v", bucket.GetUpperBound()), @@ -456,16 +436,10 @@ func (p *OpsRampMetrics) Push() (int, error) { } // samples for count and sum timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, []prompb.Label{ - { - Name: model.MetricNameLabel, - Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), - }, - { - Name: model.JobLabel, - Value: p.prefix, - }, - }...), + Labels: append(labels, prompb.Label{ + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), + }), Samples: []prompb.Sample{ { Value: metric.GetHistogram().GetSampleSum(), @@ -474,16 +448,10 @@ func (p *OpsRampMetrics) Push() (int, error) { }, }) timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, []prompb.Label{ - { - Name: model.MetricNameLabel, - Value: fmt.Sprintf("%s_count", metricFamily.GetName()), - }, - { - Name: model.JobLabel, - Value: p.prefix, - }, - }...), + Labels: append(labels, prompb.Label{ + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_count", metricFamily.GetName()), + }), Samples: []prompb.Sample{ { Value: float64(metric.GetHistogram().GetSampleCount()), @@ -500,10 +468,6 @@ func (p *OpsRampMetrics) Push() (int, error) { Name: model.MetricNameLabel, Value: metricFamily.GetName(), }, - { - Name: model.JobLabel, - Value: p.prefix, - }, { Name: model.QuantileLabel, Value: fmt.Sprintf("%v", quantile.GetQuantile()), @@ -519,16 +483,10 @@ func (p *OpsRampMetrics) Push() (int, error) { } // samples for count and sum timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, []prompb.Label{ - { - Name: model.MetricNameLabel, - Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), - }, - { - Name: model.JobLabel, - Value: p.prefix, - }, - }...), + Labels: append(labels, prompb.Label{ + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_sum", metricFamily.GetName()), + }), Samples: []prompb.Sample{ { Value: metric.GetSummary().GetSampleSum(), @@ -537,16 +495,10 @@ func (p *OpsRampMetrics) Push() (int, error) { }, }) timeSeries = append(timeSeries, prompb.TimeSeries{ - Labels: append(labels, []prompb.Label{ - { - Name: model.MetricNameLabel, - Value: fmt.Sprintf("%s_count", metricFamily.GetName()), - }, - { - Name: model.JobLabel, - Value: p.prefix, - }, - }...), + Labels: append(labels, prompb.Label{ + Name: model.MetricNameLabel, + Value: fmt.Sprintf("%s_count", metricFamily.GetName()), + }), Samples: []prompb.Sample{ { Value: float64(metric.GetSummary().GetSampleCount()), From c337f3fe29f049e6b41ce5c8fd46cb1379f9fb11 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Tue, 18 Apr 2023 09:17:51 +0530 Subject: [PATCH 305/351] update libtrace-go --- config/file_config.go | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/config/file_config.go b/config/file_config.go index 8f7664fcb2..2b22c62001 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -573,12 +573,12 @@ func (f *fileConfig) GetOpsrampAPI() (string, error) { f.mux.RLock() defer f.mux.RUnlock() - u, err := url.Parse(f.conf.OpsrampAPI) + _, err := url.Parse(f.conf.OpsrampAPI) if err != nil { return "", err } - return fmt.Sprintf("%s://%s", u.Scheme, u.Hostname()), nil + return f.conf.OpsrampAPI, nil } func (f *fileConfig) GetAuthConfig() AuthConfiguration { diff --git a/go.mod b/go.mod index 808e0d1d8e..03b9272ce5 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0 - github.com/opsramp/libtrace-go v0.0.0-20230413051817-f2f9e8a750b4 + github.com/opsramp/libtrace-go v0.0.0-20230417062015-cad5afe8ebcc github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index cc871a1eae..a8ab5153f3 100644 --- a/go.sum +++ b/go.sum @@ -581,8 +581,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0 h1:qj7a1B/GFWxFVWvpzTV2V0bbxGNFI8bGM+ElTxwJP20= github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230413051817-f2f9e8a750b4 h1:Kd2iNoQJGPSXMN/2mzhe1qHBcpmPgKFPbXEVfFmPx08= -github.com/opsramp/libtrace-go v0.0.0-20230413051817-f2f9e8a750b4/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/libtrace-go v0.0.0-20230417062015-cad5afe8ebcc h1:k25sLiGMrwyJdWvSZ8hOuzIheWebby3DF3Ny+jE71xg= +github.com/opsramp/libtrace-go v0.0.0-20230417062015-cad5afe8ebcc/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= From af04447bc689e729bb4199297901cc9098d7f18b Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 20 Apr 2023 10:15:28 +0530 Subject: [PATCH 306/351] updating k8s deployment yaml --- .../helm}/opsramp-tracing-proxy/.helmignore | 0 .../helm}/opsramp-tracing-proxy/Chart.yaml | 0 .../templates/_helpers.tpl | 0 .../templates/deployment-redis.yaml | 0 .../templates/deployment.yaml | 0 .../templates/k8s-config-cm.yaml | 0 .../templates/k8s-rules-cm.yaml | 0 .../templates/service-redis.yaml | 0 .../templates/service.yaml | 0 .../helm}/opsramp-tracing-proxy/values.yaml | 0 build/kubernetes/yaml/k8s-config-cm.yaml | 417 ++++++++++++++++++ .../kubernetes/yaml}/k8s-deployment.yaml | 10 +- build/kubernetes/yaml/k8s-rules-cm.yaml | 225 ++++++++++ deployment/kubernetes/k8s-config-cm.yaml | 320 -------------- deployment/kubernetes/k8s-rules-cm.yaml | 266 ----------- 15 files changed, 647 insertions(+), 591 deletions(-) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/.helmignore (100%) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/Chart.yaml (100%) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/templates/_helpers.tpl (100%) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/templates/deployment-redis.yaml (100%) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/templates/deployment.yaml (100%) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/templates/k8s-config-cm.yaml (100%) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml (100%) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/templates/service-redis.yaml (100%) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/templates/service.yaml (100%) rename build/{ => kubernetes/helm}/opsramp-tracing-proxy/values.yaml (100%) create mode 100644 build/kubernetes/yaml/k8s-config-cm.yaml rename {deployment/kubernetes => build/kubernetes/yaml}/k8s-deployment.yaml (83%) create mode 100644 build/kubernetes/yaml/k8s-rules-cm.yaml delete mode 100644 deployment/kubernetes/k8s-config-cm.yaml delete mode 100644 deployment/kubernetes/k8s-rules-cm.yaml diff --git a/build/opsramp-tracing-proxy/.helmignore b/build/kubernetes/helm/opsramp-tracing-proxy/.helmignore similarity index 100% rename from build/opsramp-tracing-proxy/.helmignore rename to build/kubernetes/helm/opsramp-tracing-proxy/.helmignore diff --git a/build/opsramp-tracing-proxy/Chart.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml similarity index 100% rename from build/opsramp-tracing-proxy/Chart.yaml rename to build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml diff --git a/build/opsramp-tracing-proxy/templates/_helpers.tpl b/build/kubernetes/helm/opsramp-tracing-proxy/templates/_helpers.tpl similarity index 100% rename from build/opsramp-tracing-proxy/templates/_helpers.tpl rename to build/kubernetes/helm/opsramp-tracing-proxy/templates/_helpers.tpl diff --git a/build/opsramp-tracing-proxy/templates/deployment-redis.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment-redis.yaml similarity index 100% rename from build/opsramp-tracing-proxy/templates/deployment-redis.yaml rename to build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment-redis.yaml diff --git a/build/opsramp-tracing-proxy/templates/deployment.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment.yaml similarity index 100% rename from build/opsramp-tracing-proxy/templates/deployment.yaml rename to build/kubernetes/helm/opsramp-tracing-proxy/templates/deployment.yaml diff --git a/build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-config-cm.yaml similarity index 100% rename from build/opsramp-tracing-proxy/templates/k8s-config-cm.yaml rename to build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-config-cm.yaml diff --git a/build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml similarity index 100% rename from build/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml rename to build/kubernetes/helm/opsramp-tracing-proxy/templates/k8s-rules-cm.yaml diff --git a/build/opsramp-tracing-proxy/templates/service-redis.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/service-redis.yaml similarity index 100% rename from build/opsramp-tracing-proxy/templates/service-redis.yaml rename to build/kubernetes/helm/opsramp-tracing-proxy/templates/service-redis.yaml diff --git a/build/opsramp-tracing-proxy/templates/service.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/templates/service.yaml similarity index 100% rename from build/opsramp-tracing-proxy/templates/service.yaml rename to build/kubernetes/helm/opsramp-tracing-proxy/templates/service.yaml diff --git a/build/opsramp-tracing-proxy/values.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml similarity index 100% rename from build/opsramp-tracing-proxy/values.yaml rename to build/kubernetes/helm/opsramp-tracing-proxy/values.yaml diff --git a/build/kubernetes/yaml/k8s-config-cm.yaml b/build/kubernetes/yaml/k8s-config-cm.yaml new file mode 100644 index 0000000000..7179a026c0 --- /dev/null +++ b/build/kubernetes/yaml/k8s-config-cm.yaml @@ -0,0 +1,417 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: opsramp-tracing-proxy +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: opsramp-tracing-proxy-config + labels: + name: opsramp-tracing-proxy-config + namespace: opsramp-tracing-proxy +data: + config.yaml: |- + ######################## + ## Trace Proxy Config ## + ######################## + + # ListenAddr is the IP and port on which to listen for incoming events. Incoming + # traffic is expected to be HTTP, so if using SSL put something like nginx in + # front to do the TLS Termination. + ListenAddr: 0.0.0.0:8082 + + # GRPCListenAddr is the IP and port on which to listen for incoming events over + # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in + # front to do the TLS Termination. + GRPCListenAddr: 0.0.0.0:9090 + + # PeerListenAddr is the IP and port on which to listen for traffic being + # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL + # put something like nginx in front to do the decryption. Must be different from + # ListenAddr + PeerListenAddr: 0.0.0.0:8083 + + GRPCPeerListenAddr: 0.0.0.0:8084 + + # CompressPeerCommunication determines whether to compress span data + # it forwards to peers. If it costs money to transmit data between different + # instances (e.g. they're spread across AWS availability zones), then you + # almost certainly want compression enabled to reduce your bill. The option to + # disable it is provided as an escape hatch for deployments that value lower CPU + # utilization over data transfer costs. + CompressPeerCommunication: true + + # OpsrampAPI is the URL for the upstream Opsramp API. + OpsrampAPI: "" + + # Dataset you want to use for sampling + Dataset: "ds" + + #Tls Options + UseTls: true + UseTlsInsecure: false + + # LoggingLevel valid options are "debug", "info", "error", and "panic". + LoggingLevel: error + + # SendDelay is a short timer that will be triggered when a trace is complete. + # Trace Proxy will wait for this duration before actually sending the trace. The + # reason for this short delay is to allow for small network delays or clock + # jitters to elapse and any final spans to arrive before actually sending the + # trace. This supports duration strings with supplied units. Set to 0 for + # immediate sends. + SendDelay: 2s + + # BatchTimeout dictates how frequently to send unfulfilled batches. By default + # this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms. + # Eligible for live reload. + BatchTimeout: 1s + + # TraceTimeout is a long timer; it represents the outside boundary of how long + # to wait before sending an incomplete trace. Normally traces are sent when the + # root span arrives. Sometimes the root span never arrives (due to crashes or + # whatever), and this timer will send a trace even without having received the + # root span. If you have particularly long-lived traces you should increase this + # timer. This supports duration strings with supplied units. + TraceTimeout: 60s + + # MaxBatchSize is the number of events to be included in the batch for sending + MaxBatchSize: 500 + + # SendTicker is a short timer; it determines the duration to use to check for traces to send + SendTicker: 100ms + + # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use + # when buffering events that will be forwarded to peers or the upstream API. + UpstreamBufferSize: 1000 + PeerBufferSize: 1000 + + # AddHostMetadataToTrace determines whether to add information about + # the host that Refinery is running on to the spans that it processes. + # If enabled, information about the host will be added to each span with the + # prefix `meta.refinery.`. + # Currently, the only value added is 'meta.refinery.local_hostname'. + AddHostMetadataToTrace: false + + # EnvironmentCacheTTL is the amount of time a cache entry will live that associates + # an API key with an environment name. + # Cache misses lookup the environment name using OpsRampAPI config value. + # Default is 1 hour ("1h"). + EnvironmentCacheTTL: "1h" + + # QueryAuthToken, if specified, provides a token that must be specified with + # the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed. + # These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and + # are not typically needed in normal operation. + # Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN. + # If left unspecified, the /query endpoints are inaccessible. + # QueryAuthToken: "some-random-value" + + # AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which + # contains text indicating which rule was evaluated that caused the trace to be included. + AddRuleReasonToTrace: true + + # AdditionalErrorFields should be a list of span fields that should be included when logging + # errors that happen during ingestion of events (for example, the span too large error). + # This is primarily useful in trying to track down misbehaving senders in a large installation. + # The fields `dataset`, `apihost`, and `environment` are always included. + # If a field is not present in the span, it will not be present in the error log. + # Default is ["trace.span_id"]. + AdditionalErrorFields: + - trace.span_id + + # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate + # the number of child spans on the trace at the time the sampling decision was made. + # This value is available to the rules-based sampler, making it possible to write rules that + # are dependent upon the number of spans in the trace. + # Default is false. + AddSpanCountToRoot: false + + # CacheOverrunStrategy controls the cache management behavior under memory pressure. + # "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, + # which is generally not helpful unless it occurs because of a permanent change in traffic patterns. + # In the "impact" strategy, the items having the most impact on the cache size are + # ejected from the cache earlier than normal but the cache is not resized. + # In all cases, it only applies if MaxAlloc is nonzero. + # Default is "resize" for compatibility but "impact" is recommended for most installations. + CacheOverrunStrategy: "impact" + + ######################### + ## Retry Configuration ## + ######################### + RetryConfiguration: + # InitialInterval the time to wait after the first failure before retrying. + InitialInterval: 500ms + # RandomizationFactor is a random factor used to calculate next backoff + # Randomized interval = RetryInterval * (1 ± RandomizationFactor) + RandomizationFactor: 0.5 + # Multiplier is the value multiplied by the backoff interval bounds + Multiplier: 1.5 + # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between + # consecutive retries will always be `MaxInterval`. + MaxInterval: 60s + # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. + # Once this value is reached, the data is discarded. + MaxElapsedTime: 15m + + ######################### + ## Proxy Configuration ## + ######################### + ProxyConfiguration: + # Protocol accepts http and https + Protocol: "http" + # Host takes the proxy server address + Host: "" + # Port takes the proxy server port + Port: 3128 + # UserName takes the proxy username + Username: "" + # Password takes the proxy password + Password: "" + + ################################## + ## Authentication Configuration ## + ################################## + AuthConfiguration: + # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made + Endpoint: "" + # Key - authentication key provided in OpsRamp Portal + Key: "" + # Secret - authentication Secret provided in OpsRamp Portal + Secret: "" + # TenantId - tenant/client id to which the traces are to be posted + TenantId: "" + + ############################ + ## Implementation Choices ## + ############################ + # Each of the config options below chooses an implementation of a Trace Proxy + # component to use. Depending on the choice, there may be more configuration + # required below in the section for that choice. Changing implementation choices + # requires a process restart. + # Collector describes which collector to use for collecting traces. The only + # current valid option is "InMemCollector". More can be added by adding + # implementations of the Collector interface. + Collector: "InMemCollector" + + # InMemCollector brings together all the settings that are relevant to + # collecting spans together to make traces. + InMemCollector: + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + CacheCapacity: 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. Using 80% is the recommended. + # This value should be set in according to the resources.limits.memory + # By default that setting is 4GB, and this is set to 85% of that limit + # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 + # MaxAlloc: 3435973836 + MaxAlloc: 0 + + ##################### + ## Peer Management ## + ##################### + + # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed + PeerManagement: + # Strategy controls the way that traces are assigned to Trace Proxy nodes. + # The "legacy" strategy uses a simple algorithm that unfortunately causes + # 1/2 of the in-flight traces to be assigned to a different node whenever the + # number of nodes changes. + # The legacy strategy is deprecated and is intended to be removed in a future release. + # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the + # number of nodes) are disrupted when the node count changes. + # Not eligible for live reload. + Strategy: "hash" + + ########################################################### + ###### File (Suitable only for VM based deployments) ###### + ########################################################### + Type: "file" + + # Peers is the list of all servers participating in this proxy cluster. Events + # will be sharded evenly across all peers based on the Trace ID. Values here + # should be the base URL used to access the peer, and should include scheme, + # hostname (or ip address) and port. All servers in the cluster should be in + # this list, including this host. + Peers: [ + "http://127.0.0.1:8084", #only grpc peer listener used + # "http://127.0.0.1:8083", + # "http://10.1.2.3.4:8080", + # "http://refinery-1231:8080", + # "http://peer-3.fqdn" // assumes port 80 + ] + ########################################################### + + ########################################################### + ###### Redis (Suitable for all types of deployments) ###### + ########################################################### + # # The type should always be redis when deployed to Kubernetes environments + # Type: "redis" + # + # # RedisHost is used to connect to redis for peer cluster membership management. + # # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes + # # precedence and this value is ignored. + # # Not eligible for live reload. + # # RedisHost will default to the name used for the release or name overrides depending on what is used, + # # but can be overriden to a specific value. + # RedisHost: localhost:6379 + # + # # RedisUsername is the username used to connect to redis for peer cluster membership management. + # # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes + # # precedence and this value is ignored. + # # Not eligible for live reload. + # RedisUsername: "" + # + # # RedisPassword is the password used to connect to redis for peer cluster membership management. + # # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes + # # precedence and this value is ignored. + # # Not eligible for live reload. + # RedisPassword: "" + # + # # RedisPrefix is a string used as a prefix for the keys in redis while storing + # # the peer membership. It might be useful to set this in any situation where + # # multiple trace-proxy clusters or multiple applications want to share a single + # # Redis instance. It may not be blank. + # RedisPrefix: "tracing-proxy" + # + # # RedisDatabase is an integer from 0-15 indicating the database number to use + # # for the Redis instance storing the peer membership. It might be useful to set + # # this in any situation where multiple trace-proxy clusters or multiple + # # applications want to share a single Redis instance. + # RedisDatabase: 0 + # + # # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + # # Not eligible for live reload. + # UseTLS: false + # + # # UseTLSInsecure disables certificate checks + # # Not eligible for live reload. + # UseTLSInsecure: false + # + # # IdentifierInterfaceName is optional. + # # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. + # # When configured the pod's IP will be used in the peer list + # # IdentifierInterfaceName: eth0 + # + # # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first + # # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + # # the first IPV6 unicast address found. + # UseIPV6Identifier: false + ########################################################### + + # LogrusLogger is a section of the config only used if you are using the + # LogrusLogger to send all logs to STDOUT using the logrus package. + LogrusLogger: + # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] + LogFormatter: 'json' + # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] + LogOutput: 'stdout' + + MetricsConfig: + # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp + Enable: true + + # ListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Trace Proxy + # listener. + ListenAddr: '0.0.0.0:2112' + + # OpsRampAPI is the URL for the upstream OpsRamp API. + OpsRampAPI: "" + + # ReportingInterval is the frequency specified in seconds at which + # the metrics are collected and sent to OpsRamp + ReportingInterval: 10 + + # MetricsList is a list of regular expressions which match the metric + # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. + # Internally, all the items in the list are concatenated using '|' to make the computation faster. + MetricsList: [ ".*" ] + + GRPCServerParameters: + # MaxConnectionIdle is a duration for the amount of time after which an + # idle connection would be closed by sending a GoAway. Idleness duration is + # defined since the most recent time the number of outstanding RPCs became + # zero or the connection establishment. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 + # MaxConnectionIdle: "1m" + + # MaxConnectionAge is a duration for the maximum amount of time a + # connection may exist before it will be closed by sending a GoAway. A + # random jitter of +/-10% will be added to MaxConnectionAge to spread out + # connection storms. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 + # MaxConnectionAge: "0s" + + # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + # which the connection will be forcibly closed. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 + # MaxConnectionAgeGrace: "0s" + + # After a duration of this time if the server doesn't see any activity it + # pings the client to see if the transport is still alive. + # If set below 1s, a minimum value of 1s will be used instead. + # 0s sets duration to 2 hours which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 + # Time: "10s" + + # After having pinged for keepalive check, the server waits for a duration + # of Timeout and if no activity is seen even after that the connection is + # closed. + # 0s sets duration to 20 seconds which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 + # Timeout: "2s" + + ################################ + ## Sample Cache Configuration ## + ################################ + + # Sample Cache Configuration controls the sample cache used to retain information about trace + # status after the sampling decision has been made. + SampleCacheConfig: + # Type controls the type of sample cache used. + # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is + # 5x the size of the trace cache. This is Refinery's original sample cache strategy. + # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember + # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. + # It is also more configurable. The cuckoo filter is recommended for most installations. + # Default is "legacy". + # Type: "cuckoo" + + # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. + # Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some + # statistical information. This is most useful in cases where the trace was sent before sending + # the root span, so that the root span can be decorated with accurate metadata. + # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). + # It Does not apply to the "legacy" type of cache. + # KeptSize: 10_000 + + # DroppedSize controls the size of the cuckoo dropped traces cache. + # This cache consumes 4-6 bytes per trace at a scale of millions of traces. + # Changing its size with live reload sets a future limit, but does not have an immediate effect. + # Default is 1_000_000 traces. + # It Does not apply to the "legacy" type of cache. + # DroppedSize: 1_000_000 + + # SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates + # the remaining capacity of its dropped traces cache and possibly cycles it. + # This cache is quite resilient so it doesn't need to happen very often, but the + # operation is also inexpensive. + # Default is 10 seconds. + # It Does not apply to the "legacy" type of cache. + # SizeCheckInterval: "10s" diff --git a/deployment/kubernetes/k8s-deployment.yaml b/build/kubernetes/yaml/k8s-deployment.yaml similarity index 83% rename from deployment/kubernetes/k8s-deployment.yaml rename to build/kubernetes/yaml/k8s-deployment.yaml index 610ef92090..960013ff69 100644 --- a/deployment/kubernetes/k8s-deployment.yaml +++ b/build/kubernetes/yaml/k8s-deployment.yaml @@ -22,7 +22,7 @@ spec: spec: containers: - name: opsramp-tracing-proxy - image: lokeshopsramp/tracing-proxy + image: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy imagePullPolicy: Always ports: - containerPort: 9090 @@ -35,12 +35,12 @@ spec: cpu: "4" volumeMounts: - name: opsramp-tracing-rules - mountPath: /etc/tracing-proxy/rules.toml - subPath: rules.toml + mountPath: /etc/tracing-proxy/rules.yaml + subPath: rules.yaml readOnly: true - name: opsramp-tracing-config - mountPath: /etc/tracing-proxy/config.toml - subPath: config.toml + mountPath: /etc/tracing-proxy/config.yaml + subPath: config.yaml readOnly: true volumes: - configMap: diff --git a/build/kubernetes/yaml/k8s-rules-cm.yaml b/build/kubernetes/yaml/k8s-rules-cm.yaml new file mode 100644 index 0000000000..05450fd3fb --- /dev/null +++ b/build/kubernetes/yaml/k8s-rules-cm.yaml @@ -0,0 +1,225 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: opsramp-tracing-proxy +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: opsramp-tracing-proxy-rules + labels: + name: opsramp-tracing-proxy-rules + namespace: opsramp-tracing-proxy +data: + rules.yaml: |- + ############################ + ## Sampling Rules Config ## + ############################ + + # DryRun - If enabled, marks traces that would be dropped given current sampling rules, + # and sends all traces regardless + DryRun: true + + # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to trace_proxy_kept + DryRunFieldName: trace_proxy_kept + + # DeterministicSampler is a section of the config for manipulating the + # Deterministic Sampler implementation. This is the simplest sampling algorithm + # - it is a static sample rate, choosing traces randomly to either keep or send + # (at the appropriate rate). It is not influenced by the contents of the trace. + Sampler: DeterministicSampler + + # SampleRate is the rate at which to sample. It indicates a ratio, where one + # sample trace is kept for every n traces seen. For example, a SampleRate of 30 + # will keep 1 out of every 30 traces. The choice on whether to keep any specific + # trace is random, so the rate is approximate. + # Eligible for live reload. + SampleRate: 1 + + #dataset1: + # + # # Note: If your dataset name contains a space, you will have to escape the dataset name + # # using single quotes, such as ['dataset 1'] + # + # # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler + # # implementation. This sampler collects the values of a number of fields from a + # # trace and uses them to form a key. This key is handed to the standard dynamic + # # sampler algorithm which generates a sample rate based on the frequency with + # # which that key has appeared in the previous ClearFrequencySec seconds.This + # # sampler uses the AvgSampleRate algorithm from + # # that package. + # Sampler: DynamicSampler + # + # # SampleRate is the goal rate at which to sample. It indicates a ratio, where + # # one sample trace is kept for every n traces seen. For example, a SampleRate of + # # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # # sampler, who assigns a sample rate for each trace based on the fields selected + # # from that trace. + # SampleRate: 2 + # + # # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. + # # The combination of values from all of these fields should reflect how interesting the trace is compared to + # # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for + # # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent + # # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of + # # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is + # # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a + # # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can + # # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling + # # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. + # # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of + # # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. + # # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore + # # interesting traces, like traces that experienced a `500`, might not be sampled. + # # Field names may come from any span in the trace. + # FieldList: + # - "" + # + # # UseTraceLength will add the number of spans in the trace in to the dynamic + # # sampler as part of the key. The number of spans is exact, so if there are + # # normally small variations in trace length you may want to leave this off. If + # # traces are consistent lengths and changes in trace length is a useful + # # indicator of traces you'd like to see in OpsRamp, set this to true. + # UseTraceLength: true + # + # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # # to the root span of the trace containing the key used by the sampler to decide + # # the sample rate. This can be helpful in understanding why the sampler is + # # making certain decisions about sample rate and help you understand how to + # # better choose the sample rate key (aka the FieldList setting above) to use. + # AddSampleRateKeyToTrace: true + # + # # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # # when adding the sample rate key to the trace. This setting is only used when + # # AddSampleRateKeyToTrace is true. + # AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key + # + # # ClearFrequencySec is the name of the field the sampler will use to determine + # # the period over which it will calculate the sample rate. This setting defaults + # # to 30. + # ClearFrequencySec: 60 + #dataset2: + # + # # EMADynamicSampler is a section of the config for manipulating the Exponential + # # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, + # # it attempts to average a given sample rate, weighting rare traffic and frequent + # # traffic differently so as to end up with the correct average. + # # + # # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended + # # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs + # # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential + # # Moving Average of counts seen per key, and adjusts this average at regular intervals. + # # The weight applied to more recent intervals is defined by `weight`, a number between + # # (0, 1) - larger values weight the average more toward recent observations. In other words, + # # a larger weight will cause sample rates more quickly adapt to traffic patterns, + # # while a smaller weight will result in sample rates that are less sensitive to bursts or drops + # # in traffic and thus more consistent over time. + # # + # # Keys that are not found in the EMA will always have a sample + # # rate of 1. Keys that occur more frequently will be sampled on a logarithmic + # # curve. In other words, every key will be represented at least once in any + # # given window and more frequent keys will have their sample rate + # # increased proportionally to wind up with the goal sample rate. + # Sampler: EMADynamicSampler + # + # # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where + # # one sample trace is kept for every n traces seen. For example, a SampleRate of + # # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # # sampler, who assigns a sample rate for each trace based on the fields selected + # # from that trace. + # GoalSampleRate: 2 + # + # # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. + # # The combination of values from all of these fields should reflect how interesting the trace is compared to + # # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for + # # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent + # # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of + # # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is + # # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a + # # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can + # # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling + # # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. + # # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of + # # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. + # # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore + # # interesting traces, like traces that experienced a `500`, might not be sampled. + # # Field names may come from any span in the trace. + # FieldList: [] + # + # # UseTraceLength will add the number of spans in the trace in to the dynamic + # # sampler as part of the key. The number of spans is exact, so if there are + # # normally small variations in trace length you may want to leave this off. If + # # traces are consistent lengths and changes in trace length is a useful + # # indicator of traces you'd like to see in Honeycomb, set this to true. + # UseTraceLength: true + # + # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # # to the root span of the trace containing the key used by the sampler to decide + # # the sample rate. This can be helpful in understanding why the sampler is + # # making certain decisions about sample rate and help you understand how to + # # better choose the sample rate key (aka the FieldList setting above) to use. + # AddSampleRateKeyToTrace: true + # + # # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # # when adding the sample rate key to the trace. This setting is only used when + # # AddSampleRateKeyToTrace is true. + # AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key + # + # # AdjustmentInterval defines how often (in seconds) we adjust the moving average from + # # recent observations. Default 15s + # AdjustmentInterval: 15 + # + # # Weight is a value between (0, 1) indicating the weighting factor used to adjust + # # the EMA. With larger values, newer data will influence the average more, and older + # # values will be factored out more quickly. In mathematical literature concerning EMA, + # # this is referred to as the `alpha` constant. + # # Default is 0.5 + # Weight: 0.5 + # + # # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. + # # Once MaxKeys is reached, new keys will not be included in the sample rate map, but + # # existing keys will continue to be be counted. You can use this to keep the sample rate + # # map size under control. + # MaxKeys: 0 + # + # # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key + # # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to + # # decide what constitutes "zero". Keys with averages below this threshold will be removed + # # from the EMA. Default is the same as Weight, as this prevents a key with the smallest + # # integer value (1) from being aged out immediately. This value should generally be <= Weight, + # # unless you have very specific reasons to set it higher. + # AgeOutValue: 0.5 + # + # # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define + # # the burst detection threshold. If total counts observed for a given interval exceed the threshold + # # EMA is updated immediately, rather than waiting on the AdjustmentInterval. + # # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, + # # burst detection will kick in. + # BurstMultiple: 2 + # + # # BurstDetectionDelay indicates the number of intervals to run after Start is called before + # # burst detection kicks in. + # # Defaults to 3 + # BurstDetectionDelay: 3 + #dataset3: + # Sampler: DeterministicSampler + # SampleRate: 10 + #dataset4: + # Sampler: RulesBasedSampler + # CheckNestedFields: false + # rule: + # # Rule name + # - name: "" + # # Drop Condition (examples: true, false) + # drop: + # condition: + # # Field Name (example: status_code) + # - field: "" + # # Operator Value (example: =) + # operator: "" + # # Field Value (example: 500) + # value: "" + #dataset5: + # Sampler: TotalThroughputSampler + # GoalThroughputPerSec: 100 + # FieldList: '' diff --git a/deployment/kubernetes/k8s-config-cm.yaml b/deployment/kubernetes/k8s-config-cm.yaml deleted file mode 100644 index ad8fd4a4f2..0000000000 --- a/deployment/kubernetes/k8s-config-cm.yaml +++ /dev/null @@ -1,320 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: opsramp-tracing-proxy-config - namespace: opsramp-tracing-proxy ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: opsramp-tracing-proxy-config - labels: - name: opsramp-tracing-proxy-config - namespace: opsramp-tracing-proxy -data: - config.toml: |- - ##################### - ## Refinery Config ## - ##################### - - # ListenAddr is the IP and port on which to listen for incoming events. Incoming - # traffic is expected to be HTTP, so if using SSL put something like nginx in - # front to do the decryption. - # Should be of the form 0.0.0.0:8080 - # Not eligible for live reload. - ListenAddr = "0.0.0.0:8082" - - # GRPCListenAddr is the IP and port on which to listen for incoming events over - # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put - # something like nginx in front to do the decryption. - # Should be of the form 0.0.0.0:9090 - # Not eligible for live reload. - GRPCListenAddr = "0.0.0.0:9090" - - # PeerListenAddr is the IP and port on which to listen for traffic being - # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL - # put something like nginx in front to do the decryption. Must be different from - # ListenAddr - # Should be of the form 0.0.0.0:8081 - # Not eligible for live reload. - PeerListenAddr = "0.0.0.0:8083" - - # CompressPeerCommunication determines whether refinery will compress span data - # it forwards to peers. If it costs money to transmit data between refinery - # instances (e.g. they're spread across AWS availability zones), then you - # almost certainly want compression enabled to reduce your bill. The option to - # disable it is provided as an escape hatch for deployments that value lower CPU - # utilization over data transfer costs. - CompressPeerCommunication = true - - # APIKeys is a list of Honeycomb API keys that the proxy will accept. This list - # only applies to events - other Honeycomb API actions will fall through to the - # upstream API directly. - # Adding keys here causes events arriving with API keys not in this list to be - # rejected with an HTTP 401 error If an API key that is a literal '*' is in the - # list, all API keys are accepted. - # Eligible for live reload. - APIKeys = [ - # "replace-me", - # "more-optional-keys", - "*", # wildcard accept all keys - ] - - # HoneycombAPI is the URL for the upstream Honeycomb API. - # Eligible for live reload. - #HoneycombAPI = "localhost:50052" - HoneycombAPI = "https://asura.opsramp.net" - - # SendDelay is a short timer that will be triggered when a trace is complete. - # Refinery will wait this duration before actually sending the trace. The - # reason for this short delay is to allow for small network delays or clock - # jitters to elapse and any final spans to arrive before actually sending the - # trace. This supports duration strings with supplied units. Set to 0 for - # immediate sends. - # Eligible for live reload. - SendDelay = "2s" - - # TraceTimeout is a long timer; it represents the outside boundary of how long - # to wait before sending an incomplete trace. Normally traces are sent when the - # root span arrives. Sometimes the root span never arrives (due to crashes or - # whatever), and this timer will send a trace even without having received the - # root span. If you have particularly long-lived traces you should increase this - # timer. This supports duration strings with supplied units. - # Eligible for live reload. - TraceTimeout = "60s" - - # MaxBatchSize is the number of events to be included in the batch for sending - MaxBatchSize = 500 - - # SendTicker is a short timer; it determines the duration to use to check for traces to send - SendTicker = "100ms" - - # LoggingLevel is the level above which we should log. Debug is very verbose, - # and should only be used in pre-production environments. Info is the - # recommended level. Valid options are "debug", "info", "error", and - # "panic" - # Not eligible for live reload. - LoggingLevel = "debug" - - # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use - # when buffering events that will be forwarded to peers or the upstream API. - UpstreamBufferSize = 10000 - PeerBufferSize = 10000 - - # DebugServiceAddr sets the IP and port the debug service will run on - # The debug service will only run if the command line flag -d is specified - # The debug service runs on the first open port between localhost:6060 and :6069 by default - # DebugServiceAddr = "localhost:8085" - - # AddHostMetadataToTrace determines whether or not to add information about - # the host that Refinery is running on to the spans that it processes. - # If enabled, information about the host will be added to each span with the - # prefix `meta.refinery.`. - # Currently the only value added is 'meta.refinery.local_hostname'. - # Not eligible for live reload - AddHostMetadataToTrace = false - - ############################ - ## Implementation Choices ## - ############################ - - # Each of the config options below chooses an implementation of a Refinery - # component to use. Depending on the choice there may be more configuration - # required below in the section for that choice. Changing implementation choices - # requires a process restart; these changes will not be picked up by a live - # config reload. (Individual config options for a given implementation may be - # eligible for live reload). - - # Collector describes which collector to use for collecting traces. The only - # current valid option is "InMemCollector".. More can be added by adding - # implementations of the Collector interface. - Collector = "InMemCollector" - - # Logger describes which logger to use for Refinery logs. Valid options are - # "logrus" and "honeycomb". The logrus option will write logs to STDOUT and the - # honeycomb option will send them to a Honeycomb dataset. - Logger = "logrus" - - # Metrics describes which service to use for Refinery metrics. Valid options are - # "prometheus" and "honeycomb". The prometheus option starts a listener that - # will reply to a request for /metrics. The honeycomb option will send summary - # metrics to a Honeycomb dataset. - Metrics = "prometheus" - - ######################### - ## Peer Management ## - ######################### - - [PeerManagement] - Type = "file" - # Peers is the list of all servers participating in this proxy cluster. Events - # will be sharded evenly across all peers based on the Trace ID. Values here - # should be the base URL used to access the peer, and should include scheme, - # hostname (or ip address) and port. All servers in the cluster should be in - # this list, including this host. - Peers = [ - "http://127.0.0.1:8083", - # "http://127.0.0.1:8083", - # "http://10.1.2.3.4:8080", - # "http://refinery-1231:8080", - # "http://peer-3.fqdn" // assumes port 80 - ] - - # [PeerManagement] - # Type = "redis" - # RedisHost is is used to connect to redis for peer cluster membership management. - # Further, if the environment variable 'REFINERY_REDIS_HOST' is set it takes - # precedence and this value is ignored. - # Not eligible for live reload. - # RedisHost = "localhost:6379" - - # RedisPassword is the password used to connect to redis for peer cluster membership management. - # If the environment variable 'REFINERY_REDIS_PASSWORD' is set it takes - # precedence and this value is ignored. - # Not eligible for live reload. - # RedisPassword = "" - - # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. - # Not eligible for live reload. - # UseTLS = false - - # UseTLSInsecure disables certificate checks - # Not eligible for live reload. - # UseTLSInsecure = false - - # IdentifierInterfaceName is optional. By default, when using RedisHost, Refinery will use - # the local hostname to identify itself to other peers in Redis. If your environment - # requires that you use IPs as identifiers (for example, if peers can't resolve eachother - # by name), you can specify the network interface that Refinery is listening on here. - # Refinery will use the first unicast address that it finds on the specified network - # interface as its identifier. - # Not eligible for live reload. - # IdentifierInterfaceName = "eth0" - - # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Refinery will default to the first - # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use - # the first IPV6 unicast address found. - # UseIPV6Identifier = false - - # RedisIdentifier is optional. By default, when using RedisHost, Refinery will use - # the local hostname to identify itself to other peers in Redis. If your environment - # requires that you use IPs as identifiers (for example, if peers can't resolve eachother - # by name), you can specify the exact identifier (IP address, etc) to use here. - # Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. - # RedisIdentifier = "192.168.1.1" - - ######################### - ## In-Memory Collector ## - ######################### - - # InMemCollector brings together all the settings that are relevant to - # collecting spans together to make traces. - [InMemCollector] - - # The collection cache is used to collect all spans into a trace as well as - # remember the sampling decision for any spans that might come in after the - # trace has been marked "complete" (either by timing out or seeing the root - # span). The number of traces in the cache should be many multiples (100x to - # 1000x) of the total number of concurrently active traces (trace throughput * - # trace duration). - # Eligible for live reload. Growing the cache capacity with a live config reload - # is fine. Avoid shrinking it with a live reload (you can, but it may cause - # temporary odd sampling decisions). - CacheCapacity = 1000 - - # MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are - # supported. - # If set to a non-zero value, once per tick (see SendTicker) the collector - # will compare total allocated bytes to this value. If allocation is too - # high, cache capacity will be reduced and an error will be logged. - # Useful values for this setting are generally in the range of 75%-90% of - # available system memory. - MaxAlloc = 0 - - ################### - ## Logrus Logger ## - ################### - - # LogrusLogger is a section of the config only used if you are using the - # LogrusLogger to send all logs to STDOUT using the logrus package. If you are - # using a different logger (eg honeycomb logger) you can leave all this - # commented out. - [LogrusLogger] - - # logrus logger currently has no options! - - ###################### - ## Honeycomb Logger ## - ###################### - - # HoneycombLogger is a section of the config only used if you are using the - # HoneycombLogger to send all logs to a Honeycomb Dataset. If you are using a - # different logger (eg file-based logger) you can leave all this commented out. - - [HoneycombLogger] - - # LoggerHoneycombAPI is the URL for the upstream Honeycomb API. - # Eligible for live reload. - LoggerHoneycombAPI = "https://api.honeycomb.io" - - # LoggerAPIKey is the API key to use to send log events to the Honeycomb logging - # dataset. This is separate from the APIKeys used to authenticate regular - # traffic. - # Eligible for live reload. - LoggerAPIKey = "abcd1234" - - # LoggerDataset is the name of the dataset to which to send Refinery logs - # Eligible for live reload. - LoggerDataset = "Refinery Logs" - - # LoggerSamplerEnabled enables a PerKeyThroughput dynamic sampler for log messages. - # This will sample log messages based on [log level:message] key on a per second throughput basis. - # Not eligible for live reload. - LoggerSamplerEnabled = true - - # LoggerSamplerThroughput is the per key per second throughput for the log message dynamic sampler. - # Not eligible for live reload. - LoggerSamplerThroughput = 10 - - ####################### - ## Honeycomb Metrics ## - ####################### - - # HoneycombMetrics is a section of the config only used if you are using the - # HoneycombMetrics to send all metrics to a Honeycomb Dataset. If you are using a - # different metrics service (eg prometheus or metricsd) you can leave all this - # commented out. - - [HoneycombMetrics] - - # MetricsHoneycombAPI is the URL for the upstream Honeycomb API. - # Eligible for live reload. - MetricsHoneycombAPI = "https://api.honeycomb.io" - - # MetricsAPIKey is the API key to use to send log events to the Honeycomb logging - # dataset. This is separate from the APIKeys used to authenticate regular - # traffic. - # Eligible for live reload. - MetricsAPIKey = "abcd1234" - - # MetricsDataset is the name of the dataset to which to send Refinery metrics - # Eligible for live reload. - MetricsDataset = "Refinery Metrics" - - # MetricsReportingInterval is the frequency (in seconds) to send metric events - # to Honeycomb. Between 1 and 60 is recommended. - # Not eligible for live reload. - MetricsReportingInterval = 3 - - - #####################@## - ## Prometheus Metrics ## - #####################@## - - [PrometheusMetrics] - - # MetricsListenAddr determines the interface and port on which Prometheus will - # listen for requests for /metrics. Must be different from the main Refinery - # listener. - # Not eligible for live reload. - MetricsListenAddr = "localhost:2112" diff --git a/deployment/kubernetes/k8s-rules-cm.yaml b/deployment/kubernetes/k8s-rules-cm.yaml deleted file mode 100644 index 463cc6bc6f..0000000000 --- a/deployment/kubernetes/k8s-rules-cm.yaml +++ /dev/null @@ -1,266 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: opsramp-tracing-proxy-rules - namespace: opsramp-tracing-proxy ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: opsramp-tracing-proxy-rules - labels: - name: opsramp-tracing-proxy-rules - namespace: opsramp-tracing-proxy -data: - rules.toml: |- - ############################ - ## Sampling Rules Config ## - ############################ - - # DryRun - If enabled, marks traces that would be dropped given current sampling rules, - # and sends all traces regardless - DryRun = true - - # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to refinery_kept - DryRunFieldName = "fromProxy" - - # DeterministicSampler is a section of the config for manipulating the - # Deterministic Sampler implementation. This is the simplest sampling algorithm - # - it is a static sample rate, choosing traces randomly to either keep or send - # (at the appropriate rate). It is not influenced by the contents of the trace. - Sampler = "DeterministicSampler" - - # SampleRate is the rate at which to sample. It indicates a ratio, where one - # sample trace is kept for every n traces seen. For example, a SampleRate of 30 - # will keep 1 out of every 30 traces. The choice on whether to keep any specific - # trace is random, so the rate is approximate. - # Eligible for live reload. - SampleRate = 1 - - [dataset1] - - # Note: If your dataset name contains a space, you will have to escape the dataset name - # using single quotes, such as ['dataset 1'] - - # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler - # implementation. This sampler collects the values of a number of fields from a - # trace and uses them to form a key. This key is handed to the standard dynamic - # sampler algorithm which generates a sample rate based on the frequency with - # which that key has appeared in the previous ClearFrequencySec seconds. See - # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics - # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from - # that package. - Sampler = "DynamicSampler" - - # SampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. - SampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. - FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. - UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. - AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # ClearFrequencySec is the name of the field the sampler will use to determine - # the period over which it will calculate the sample rate. This setting defaults - # to 30. - # Eligible for live reload. - ClearFrequencySec = 60 - - [dataset2] - - # EMADynamicSampler is a section of the config for manipulating the Exponential - # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, - # it attempts to average a given sample rate, weighting rare traffic and frequent - # traffic differently so as to end up with the correct average. - # - # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended - # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs - # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential - # Moving Average of counts seen per key, and adjusts this average at regular intervals. - # The weight applied to more recent intervals is defined by `weight`, a number between - # (0, 1) - larger values weight the average more toward recent observations. In other words, - # a larger weight will cause sample rates more quickly adapt to traffic patterns, - # while a smaller weight will result in sample rates that are less sensitive to bursts or drops - # in traffic and thus more consistent over time. - # - # Keys that are not found in the EMA will always have a sample - # rate of 1. Keys that occur more frequently will be sampled on a logarithmic - # curve. In other words, every key will be represented at least once in any - # given window and more frequent keys will have their sample rate - # increased proportionally to wind up with the goal sample rate. - Sampler = "EMADynamicSampler" - - # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where - # one sample trace is kept for every n traces seen. For example, a SampleRate of - # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # sampler, who assigns a sample rate for each trace based on the fields selected - # from that trace. - # Eligible for live reload. - GoalSampleRate = 2 - - # FieldList is a list of all the field names to use to form the key that will be - # handed to the dynamic sampler. The cardinality of the combination of values - # from all of these keys should be reasonable in the face of the frequency of - # those keys. If the combination of fields in these keys essentially makes them - # unique, the dynamic sampler will do no sampling. If the keys have too few - # values, you won't get samples of the most interesting traces. A good key - # selection will have consistent values for high frequency boring traffic and - # unique values for outliers and interesting traffic. Including an error field - # (or something like HTTP status code) is an excellent choice. As an example, - # assuming 30 or so endpoints, a combination of HTTP endpoint and status code - # would be a good set of keys in order to let you see accurately use of all - # endpoints and call out when there is failing traffic to any endpoint. Field - # names may come from any span in the trace. - # Eligible for live reload. - FieldList = ["request.method","response.status_code"] - - # UseTraceLength will add the number of spans in the trace in to the dynamic - # sampler as part of the key. The number of spans is exact, so if there are - # normally small variations in trace length you may want to leave this off. If - # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Opsramp, set this to true. - # Eligible for live reload. - UseTraceLength = true - - # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # to the root span of the trace containing the key used by the sampler to decide - # the sample rate. This can be helpful in understanding why the sampler is - # making certain decisions about sample rate and help you understand how to - # better choose the sample rate key (aka the FieldList setting above) to use. - AddSampleRateKeyToTrace = true - - # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # when adding the sample rate key to the trace. This setting is only used when - # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - # AdjustmentInterval defines how often (in seconds) we adjust the moving average from - # recent observations. Default 15s - # Eligible for live reload. - AdjustmentInterval = 15 - - # Weight is a value between (0, 1) indicating the weighting factor used to adjust - # the EMA. With larger values, newer data will influence the average more, and older - # values will be factored out more quickly. In mathematical literature concerning EMA, - # this is referred to as the `alpha` constant. - # Default is 0.5 - # Eligible for live reload. - Weight = 0.5 - - # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. - # Once MaxKeys is reached, new keys will not be included in the sample rate map, but - # existing keys will continue to be be counted. You can use this to keep the sample rate - # map size under control. - # Eligible for live reload - MaxKeys = 0 - - # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key - # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to - # decide what constitutes "zero". Keys with averages below this threshold will be removed - # from the EMA. Default is the same as Weight, as this prevents a key with the smallest - # integer value (1) from being aged out immediately. This value should generally be <= Weight, - # unless you have very specific reasons to set it higher. - # Eligible for live reload - AgeOutValue = 0.5 - - # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define - # the burst detection threshold. If total counts observed for a given interval exceed the threshold - # EMA is updated immediately, rather than waiting on the AdjustmentInterval. - # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, - # burst detection will kick in. - # Eligible for live reload - BurstMultiple = 2.0 - - # BurstDetectionDelay indicates the number of intervals to run after Start is called before - # burst detection kicks in. - # Defaults to 3 - # Eligible for live reload - BurstDetectionDelay = 3 - - [dataset3] - - Sampler = "DeterministicSampler" - SampleRate = 10 - - [dataset4] - - Sampler = "RulesBasedSampler" - - [[dataset4.rule]] - name = "drop healtchecks" - drop = true - [[dataset4.rule.condition]] - field = "http.route" - operator = "=" - value = "/health-check" - - [[dataset4.rule]] - name = "keep slow 500 errors" - SampleRate = 1 - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 500 - [[dataset4.rule.condition]] - field = "duration_ms" - operator = ">=" - value = 1000.789 - - [[dataset4.rule]] - name = "dynamically sample 200 responses" - [[dataset4.rule.condition]] - field = "status_code" - operator = "=" - value = 200 - [dataset4.rule.sampler.EMADynamicSampler] - Sampler = "EMADynamicSampler" - GoalSampleRate = 15 - FieldList = ["request.method", "request.route"] - AddSampleRateKeyToTrace = true - AddSampleRateKeyToTraceField = "meta.refinery.dynsampler_key" - - [[dataset4.rule]] - SampleRate = 10 # default when no rules match, if missing defaults to 10 - - [dataset5] - - Sampler = "TotalThroughputSampler" - GoalThroughputPerSec = 100 - FieldList = "[request.method]" - From 7c339ea56b8689a9354aeb4cb9af80522a2aed84 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 20 Apr 2023 10:28:57 +0530 Subject: [PATCH 307/351] removing old references --- build/kubernetes/helm/opsramp-tracing-proxy/values.yaml | 2 +- build/kubernetes/yaml/k8s-config-cm.yaml | 2 +- build/kubernetes/yaml/k8s-rules-cm.yaml | 2 +- config_complete.yaml | 2 +- rules_complete.yaml | 9 +++------ 5 files changed, 7 insertions(+), 10 deletions(-) diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml index a73dab8436..9b164ea0be 100644 --- a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml +++ b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml @@ -397,7 +397,7 @@ config: # Type: "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. - # Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some + # Refinery keeps a record of each trace that was kept and sent to OpsRamp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). diff --git a/build/kubernetes/yaml/k8s-config-cm.yaml b/build/kubernetes/yaml/k8s-config-cm.yaml index 7179a026c0..34a431ec4e 100644 --- a/build/kubernetes/yaml/k8s-config-cm.yaml +++ b/build/kubernetes/yaml/k8s-config-cm.yaml @@ -394,7 +394,7 @@ data: # Type: "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. - # Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some + # Refinery keeps a record of each trace that was kept and sent to OpsRamp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). diff --git a/build/kubernetes/yaml/k8s-rules-cm.yaml b/build/kubernetes/yaml/k8s-rules-cm.yaml index 05450fd3fb..14df8f1cd4 100644 --- a/build/kubernetes/yaml/k8s-rules-cm.yaml +++ b/build/kubernetes/yaml/k8s-rules-cm.yaml @@ -150,7 +150,7 @@ data: # # sampler as part of the key. The number of spans is exact, so if there are # # normally small variations in trace length you may want to leave this off. If # # traces are consistent lengths and changes in trace length is a useful - # # indicator of traces you'd like to see in Honeycomb, set this to true. + # # indicator of traces you'd like to see in OpsRamp, set this to true. # UseTraceLength: true # # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field diff --git a/config_complete.yaml b/config_complete.yaml index 615bf1682c..ebebfebc70 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -395,7 +395,7 @@ SampleCacheConfig: # Type: "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. -# Refinery keeps a record of each trace that was kept and sent to Honeycomb, along with some +# Refinery keeps a record of each trace that was kept and sent to OpsRamp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). diff --git a/rules_complete.yaml b/rules_complete.yaml index 79932e515d..6003cdeb78 100644 --- a/rules_complete.yaml +++ b/rules_complete.yaml @@ -31,10 +31,7 @@ dataset1: # implementation. This sampler collects the values of a number of fields from a # trace and uses them to form a key. This key is handed to the standard dynamic # sampler algorithm which generates a sample rate based on the frequency with - # which that key has appeared in the previous ClearFrequencySec seconds. See - # https://github.com/honeycombio/dynsampler-go for more detail on the mechanics - # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from - # that package. + # which that key has appeared in the previous ClearFrequencySec seconds. Sampler: DynamicSampler # SampleRate is the goal rate at which to sample. It indicates a ratio, where @@ -68,7 +65,7 @@ dataset1: # sampler as part of the key. The number of spans is exact, so if there are # normally small variations in trace length you may want to leave this off. If # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Honeycomb, set this to true. + # indicator of traces you'd like to see in OpsRamp, set this to true. UseTraceLength: true # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field @@ -142,7 +139,7 @@ dataset2: # sampler as part of the key. The number of spans is exact, so if there are # normally small variations in trace length you may want to leave this off. If # traces are consistent lengths and changes in trace length is a useful - # indicator of traces you'd like to see in Honeycomb, set this to true. + # indicator of traces you'd like to see in OpsRamp, set this to true. UseTraceLength: true # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field From 1bdaf5213ff344e5dac318708a3b52e7cc36a54e Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 20 Apr 2023 10:36:28 +0530 Subject: [PATCH 308/351] replacing refinery text in configs --- .../helm/opsramp-tracing-proxy/values.yaml | 10 +++++----- build/kubernetes/yaml/k8s-config-cm.yaml | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml index 9b164ea0be..6f323f274b 100644 --- a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml +++ b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml @@ -107,10 +107,10 @@ config: PeerBufferSize: 1000 # AddHostMetadataToTrace determines whether to add information about - # the host that Refinery is running on to the spans that it processes. + # the host that tracing-proxy is running on to the spans that it processes. # If enabled, information about the host will be added to each span with the - # prefix `meta.refinery.`. - # Currently, the only value added is 'meta.refinery.local_hostname'. + # prefix `meta.tracing-proxy.`. + # Currently, the only value added is 'meta.tracing-proxy.local_hostname'. AddHostMetadataToTrace: false # EnvironmentCacheTTL is the amount of time a cache entry will live that associates @@ -389,7 +389,7 @@ config: SampleCacheConfig: # Type controls the type of sample cache used. # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is - # 5x the size of the trace cache. This is Refinery's original sample cache strategy. + # 5x the size of the trace cache. This is tracing-proxy's original sample cache strategy. # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. # It is also more configurable. The cuckoo filter is recommended for most installations. @@ -397,7 +397,7 @@ config: # Type: "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. - # Refinery keeps a record of each trace that was kept and sent to OpsRamp, along with some + # tracing-proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). diff --git a/build/kubernetes/yaml/k8s-config-cm.yaml b/build/kubernetes/yaml/k8s-config-cm.yaml index 34a431ec4e..59c92462af 100644 --- a/build/kubernetes/yaml/k8s-config-cm.yaml +++ b/build/kubernetes/yaml/k8s-config-cm.yaml @@ -88,10 +88,10 @@ data: PeerBufferSize: 1000 # AddHostMetadataToTrace determines whether to add information about - # the host that Refinery is running on to the spans that it processes. + # the host that tracing-proxy is running on to the spans that it processes. # If enabled, information about the host will be added to each span with the - # prefix `meta.refinery.`. - # Currently, the only value added is 'meta.refinery.local_hostname'. + # prefix `meta.tracing-proxy.`. + # Currently, the only value added is 'meta.tracing-proxy.local_hostname'. AddHostMetadataToTrace: false # EnvironmentCacheTTL is the amount of time a cache entry will live that associates @@ -249,7 +249,7 @@ data: "http://127.0.0.1:8084", #only grpc peer listener used # "http://127.0.0.1:8083", # "http://10.1.2.3.4:8080", - # "http://refinery-1231:8080", + # "http://tracing-proxy-1231:8080", # "http://peer-3.fqdn" // assumes port 80 ] ########################################################### @@ -386,7 +386,7 @@ data: SampleCacheConfig: # Type controls the type of sample cache used. # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is - # 5x the size of the trace cache. This is Refinery's original sample cache strategy. + # 5x the size of the trace cache. This is tracing-proxy's original sample cache strategy. # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. # It is also more configurable. The cuckoo filter is recommended for most installations. @@ -394,7 +394,7 @@ data: # Type: "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. - # Refinery keeps a record of each trace that was kept and sent to OpsRamp, along with some + # tracing-proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). From 17afd62bfa3df588ca416833fb1c61734a74e5b0 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 20 Apr 2023 10:37:23 +0530 Subject: [PATCH 309/351] replacing refinery text in configs --- config_complete.yaml | 12 ++++++------ rules_complete.yaml | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/config_complete.yaml b/config_complete.yaml index ebebfebc70..9372eb3623 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -74,10 +74,10 @@ UpstreamBufferSize: 1000 PeerBufferSize: 1000 # AddHostMetadataToTrace determines whether to add information about -# the host that Refinery is running on to the spans that it processes. +# the host that tracing proxy is running on to the spans that it processes. # If enabled, information about the host will be added to each span with the -# prefix `meta.refinery.`. -# Currently, the only value added is 'meta.refinery.local_hostname'. +# prefix `meta.tracing proxy.`. +# Currently, the only value added is 'meta.tracing proxy.local_hostname'. AddHostMetadataToTrace: false # EnvironmentCacheTTL is the amount of time a cache entry will live that associates @@ -238,7 +238,7 @@ PeerManagement: # "http://127.0.0.1:8084", #only grpc peer listener used # # "http://127.0.0.1:8083", # # "http://10.1.2.3.4:8080", - # # "http://refinery-1231:8080", + # # "http://tracing proxy-1231:8080", # # "http://peer-3.fqdn" // assumes port 80 # ] ########################################################### @@ -387,7 +387,7 @@ GRPCServerParameters: SampleCacheConfig: # Type controls the type of sample cache used. # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is -# 5x the size of the trace cache. This is Refinery's original sample cache strategy. +# 5x the size of the trace cache. This is tracing proxy's original sample cache strategy. # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. # It is also more configurable. The cuckoo filter is recommended for most installations. @@ -395,7 +395,7 @@ SampleCacheConfig: # Type: "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. -# Refinery keeps a record of each trace that was kept and sent to OpsRamp, along with some +# tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). diff --git a/rules_complete.yaml b/rules_complete.yaml index 6003cdeb78..99cdc4ae03 100644 --- a/rules_complete.yaml +++ b/rules_complete.yaml @@ -78,7 +78,7 @@ dataset1: # AddSampleRateKeyToTraceField is the name of the field the sampler will use # when adding the sample rate key to the trace. This setting is only used when # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField: meta.refinery.dynsampler_key + AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key # ClearFrequencySec is the name of the field the sampler will use to determine # the period over which it will calculate the sample rate. This setting defaults @@ -152,7 +152,7 @@ dataset2: # AddSampleRateKeyToTraceField is the name of the field the sampler will use # when adding the sample rate key to the trace. This setting is only used when # AddSampleRateKeyToTrace is true. - AddSampleRateKeyToTraceField: meta.refinery.dynsampler_key + AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key # AdjustmentInterval defines how often (in seconds) we adjust the moving average from # recent observations. Default 15s @@ -225,7 +225,7 @@ dataset4: - request.method - request.route AddSampleRateKeyToTrace: true - AddSampleRateKeyToTraceField: meta.refinery.dynsampler_key + AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key - name: dynamically sample 200 string responses condition: - field: status_code @@ -240,7 +240,7 @@ dataset4: - request.method - request.route AddSampleRateKeyToTrace: true - AddSampleRateKeyToTraceField: meta.refinery.dynsampler_key + AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key - name: sample traces originating from a service Scope: span SampleRate: 5 From 34e8c727342d3608cef0939244fb832d52990253 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 20 Apr 2023 17:29:03 +0530 Subject: [PATCH 310/351] adding health check for file based cluster --- build/tracing-deb/config_complete.toml | 511 ------------------ build/tracing-deb/configure.go | 57 -- build/tracing-deb/rules_complete.toml | 229 -------- build/tracing-rpm/config_complete.toml | 511 ------------------ build/tracing-rpm/configure.go | 57 -- build/tracing-rpm/rules_complete.toml | 229 -------- build/{ => vm}/config_complete.yaml | 33 +- build/{ => vm}/configure.go | 7 +- build/{ => vm}/rules_complete.yaml | 0 build/{ => vm}/tracing-deb/script.sh | 0 .../tracing-deb/tracing/DEBIAN/conffiles | 0 .../tracing-deb/tracing/DEBIAN/control | 0 .../tracing-deb/tracing/DEBIAN/postinst | 0 .../{ => vm}/tracing-deb/tracing/DEBIAN/prerm | 1 + .../etc/systemd/system/tracing-proxy.service | 0 .../etc/systemd/system/tracing-proxy.service | 0 build/{ => vm}/tracing-rpm/script.sh | 0 build/{ => vm}/tracing-rpm/tracing-proxy.spec | 1 + go.mod | 4 +- go.sum | 8 +- internal/peer/file.go | 35 +- route/otlp_trace.go | 9 +- 22 files changed, 68 insertions(+), 1624 deletions(-) delete mode 100644 build/tracing-deb/config_complete.toml delete mode 100644 build/tracing-deb/configure.go delete mode 100644 build/tracing-deb/rules_complete.toml delete mode 100644 build/tracing-rpm/config_complete.toml delete mode 100644 build/tracing-rpm/configure.go delete mode 100644 build/tracing-rpm/rules_complete.toml rename build/{ => vm}/config_complete.yaml (94%) rename build/{ => vm}/configure.go (74%) rename build/{ => vm}/rules_complete.yaml (100%) rename build/{ => vm}/tracing-deb/script.sh (100%) rename build/{ => vm}/tracing-deb/tracing/DEBIAN/conffiles (100%) rename build/{ => vm}/tracing-deb/tracing/DEBIAN/control (100%) rename build/{ => vm}/tracing-deb/tracing/DEBIAN/postinst (100%) rename build/{ => vm}/tracing-deb/tracing/DEBIAN/prerm (91%) rename build/{ => vm}/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service (100%) rename build/{ => vm}/tracing-rpm/etc/systemd/system/tracing-proxy.service (100%) rename build/{ => vm}/tracing-rpm/script.sh (100%) rename build/{ => vm}/tracing-rpm/tracing-proxy.spec (98%) diff --git a/build/tracing-deb/config_complete.toml b/build/tracing-deb/config_complete.toml deleted file mode 100644 index dcbf861bd4..0000000000 --- a/build/tracing-deb/config_complete.toml +++ /dev/null @@ -1,511 +0,0 @@ -##################### -## Tracing-proxy Config ## -##################### - -# ListenAddr is the IP and port on which to listen for incoming events. Incoming -# traffic is expected to be HTTP, so if using SSL put something like nginx in -# front to do the decryption. -# Should be of the form 0.0.0.0:8082 -# Not eligible for live reload. -ListenAddr = "0.0.0.0:8082" - -# GRPCListenAddr is the IP and port on which to listen for incoming events over -# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put -# something like nginx in front to do the decryption. -# Should be of the form 0.0.0.0:9090 -# Not eligible for live reload. -GRPCListenAddr = "0.0.0.0:9090" - -# PeerListenAddr is the IP and port on which to listen for traffic being -# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL -# put something like nginx in front to do the decryption. Must be different from -# ListenAddr -# Should be of the form 0.0.0.0:8081 -# Not eligible for live reload. -PeerListenAddr = "0.0.0.0:8083" -GRPCPeerListenAddr = "0.0.0.0:8084" - -# ProxyProtocol accepts http and https -# Not Eligible for live reload. -ProxyProtocol = "" - -# ProxyServer takes the proxy server address -# Not Eligible for live reload. -ProxyServer = "" - -# ProxyPort takes the proxy server port -# Not Eligible for live reload. -ProxyPort = 0 - -# ProxyUserName takes the proxy username -# Not Eligible for live reload. -ProxyUserName = "" - -# ProxyPassword takes the proxy password -# Not Eligible for live reload. -ProxyPassword = "" - -# CompressPeerCommunication determines whether tracin will compress span data -# it forwards to peers. If it costs money to transmit data between tracin -# instances (e.g. they're spread across AWS availability zones), then you -# almost certainly want compression enabled to reduce your bill. The option to -# disable it is provided as an escape hatch for deployments that value lower CPU -# utilization over data transfer costs. -CompressPeerCommunication = true - -# OpsrampAPI is the URL for the upstream Opsramp API. -# Eligible for live reload. -OpsrampAPI = - -# OpsrampKey is used to get the OauthToken -OpsrampKey = - -# OpsrampSecret is used to get the OauthToken -OpsrampSecret = - -# Traces are send to the client with given tenantid -TenantId = - -# Dataset you want to use for sampling -Dataset = "ds" - -#Tls Options -UseTls = true -UseTlsInsecure = false - -# SendDelay is a short timer that will be triggered when a trace is complete. -# Tracing-proxy will wait this duration before actually sending the trace. The -# reason for this short delay is to allow for small network delays or clock -# jitters to elapse and any final spans to arrive before actually sending the -# trace. This supports duration strings with supplied units. Set to 0 for -# immediate sends. -# Eligible for live reload. -SendDelay = "2s" - -# BatchTimeout dictates how frequently to send unfulfilled batches. By default -# this will use the DefaultBatchTimeout in libhoney as its value, which is 100ms. -# Eligible for live reload. -BatchTimeout = "1s" - -# TraceTimeout is a long timer; it represents the outside boundary of how long -# to wait before sending an incomplete trace. Normally traces are sent when the -# root span arrives. Sometimes the root span never arrives (due to crashes or -# whatever), and this timer will send a trace even without having received the -# root span. If you have particularly long-lived traces you should increase this -# timer. This supports duration strings with supplied units. -# Eligible for live reload. -TraceTimeout = "60s" - -# MaxBatchSize is the number of events to be included in the batch for sending -MaxBatchSize = 500 - -# SendTicker is a short timer; it determines the duration to use to check for traces to send -SendTicker = "100ms" - -# LoggingLevel is the level above which we should log. Debug is very verbose, -# and should only be used in pre-production environments. Info is the -# recommended level. Valid options are "debug", "info", "error", and -# "panic" -# Not eligible for live reload. -LoggingLevel = "error" - -# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use -# when buffering events that will be forwarded to peers or the upstream API. -UpstreamBufferSize = 10000 -PeerBufferSize = 10000 - -# DebugServiceAddr sets the IP and port the debug service will run on -# The debug service will only run if the command line flag -d is specified -# The debug service runs on the first open port between localhost:6060 and :6069 by default -# DebugServiceAddr = "localhost:8085" - -# AddHostMetadataToTrace determines whether or not to add information about -# the host that Tracing-proxy is running on to the spans that it processes. -# If enabled, information about the host will be added to each span with the -# prefix `meta.tracing-procy.`. -# Currently the only value added is 'meta.tracing-proxy.local_hostname'. -# Not eligible for live reload -AddHostMetadataToTrace = false - -# EnvironmentCacheTTL is the amount of time a cache entry will live that associates -# an API key with an environment name. -# Cache misses lookup the environment name using OpsrampAPI config value. -# Default is 1 hour ("1h"). -# Not eligible for live reload. -EnvironmentCacheTTL = "1h" - -# QueryAuthToken, if specified, provides a token that must be specified with -# the header "X-Opsramp-Tracing-proxy-Query" in order for a /query request to succeed. -# These /query requests are intended for debugging tracin installations and -# are not typically needed in normal operation. -# Can be specified in the environment as tracing-proxy_QUERY_AUTH_TOKEN. -# If left unspecified, the /query endpoints are inaccessible. -# Not eligible for live reload. -# QueryAuthToken = "some-random-value" - -# AddRuleReasonToTrace causes traces that are sent to Opsramp to include the field `meta.tracin.reason`. -# This field contains text indicating which rule was evaluated that caused the trace to be included. -# Eligible for live reload. -# AddRuleReasonToTrace = true - -# AdditionalErrorFields should be a list of span fields that should be included when logging -# errors that happen during ingestion of events (for example, the span too large error). -# This is primarily useful in trying to track down misbehaving senders in a large installation. -# The fields `dataset`, `apihost`, and `environment` are always included. -# If a field is not present in the span, it will not be present in the error log. -# Default is ["trace.span_id"]. -# Eligible for live reload. -AdditionalErrorFields = [ - "trace.span_id" -] - -# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate -# the number of child spans on the trace at the time the sampling decision was made. -# This value is available to the rules-based sampler, making it possible to write rules that -# are dependent upon the number of spans in the trace. -# Default is false. -# Eligible for live reload. -# AddSpanCountToRoot = true - -# CacheOverrunStrategy controls the cache management behavior under memory pressure. -# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, -# which is generally not helpful unless it occurs because of a permanent change in traffic patterns. -# In the "impact" strategy, the items having the most impact on the cache size are -# ejected from the cache earlier than normal but the cache is not resized. -# In all cases, it only applies if MaxAlloc is nonzero. -# Default is "resize" for compatibility but "impact" is recommended for most installations. -# Eligible for live reload. -# CacheOverrunStrategy = "impact" - -# Metrics are sent to OpsRamp (The collection happens based on configuration specifie -# in OpsRampMetrics and only works when the Metrics is set to "prometheus") -SendMetricsToOpsRamp = false - -############################ -## Implementation Choices ## -############################ - -# Each of the config options below chooses an implementation of a Tracing-proxy -# component to use. Depending on the choice there may be more configuration -# required below in the section for that choice. Changing implementation choices -# requires a process restart; these changes will not be picked up by a live -# config reload. (Individual config options for a given implementation may be -# eligible for live reload). - -# Collector describes which collector to use for collecting traces. The only -# current valid option is "InMemCollector".. More can be added by adding -# implementations of the Collector interface. -Collector = "InMemCollector" - -######################### -## Peer Management ## -######################### - -[PeerManagement] -Type = "file" -## Peers is the list of all servers participating in this proxy cluster. Events -## will be sharded evenly across all peers based on the Trace ID. Values here -## should be the base URL used to access the peer, and should include scheme, -## hostname (or ip address) and port. All servers in the cluster should be in -## this list, including this host. -Peers = [ - "http://127.0.0.1:8084", #only grpc peer listener used -# # "http://127.0.0.1:8083", -# # "http://10.1.2.3.4:8080", -# # "http://tracin-1231:8080", -# # "http://peer-3.fqdn" // assumes port 80 -] - -#[PeerManagement] -#Type = "redis" -# RedisHost is is used to connect to redis for peer cluster membership management. -# Further, if the environment variable 'Tracing_Proxy_REDIS_HOST' is set it takes -# precedence and this value is ignored. -# Not eligible for live reload. -#RedisHost = "redis:22122" - -# RedisUsername is the username used to connect to redis for peer cluster membership management. -# If the environment variable 'Tracing_Proxy_REDIS_USERNAME' is set it takes -# precedence and this value is ignored. -# Not eligible for live reload. -# RedisUsername = "" - -# RedisPassword is the password used to connect to redis for peer cluster membership management. -# If the environment variable 'Tracing_Proxy_REDIS_PASSWORD' is set it takes -# precedence and this value is ignored. -# Not eligible for live reload. -# RedisPassword = "" - -# RedisPrefix is a string used as a prefix for the keys in redis while storing -# the peer membership. It might be useful to set this in any situation where -# multiple tracing-proxy clusters or multiple applications want to share a single -# Redis instance. If not set then "tracing-proxy" is used as prefix -# RedisPrefix = "customPrefix" - -# RedisDatabase is an integer from 0-15 indicating the database number to use -# for the Redis instance storing the peer membership. It might be useful to set -# this in any situation where multiple trace-proxy clusters or multiple -# applications want to share a single Redis instance. if not set Default = 0 -# RedisDatabase = 1 - -# UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. -# Not eligible for live reload. -# UseTLS = false - -# UseTLSInsecure disables certificate checks -# Not eligible for live reload. -# UseTLSInsecure = false - -# IdentifierInterfaceName is optional. By default, when using RedisHost, Tracing-proxy will use -# the local hostname to identify itself to other peers in Redis. If your environment -# requires that you use IPs as identifiers (for example, if peers can't resolve eachother -# by name), you can specify the network interface that Tracing-proxy is listening on here. -# Tracing-proxy will use the first unicast address that it finds on the specified network -# interface as its identifier. -# Not eligible for live reload. -# IdentifierInterfaceName = "eth0" - -# UseIPV6Identifier is optional. If using IdentifierInterfaceName, Tracing-proxy will default to the first -# IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use -# the first IPV6 unicast address found. -# UseIPV6Identifier = false - -# RedisIdentifier is optional. By default, when using RedisHost, Tracing-proxy will use -# the local hostname to id788714cd-a17a-4d7e-9bac-c35131f4bcc2entify itself to other peers in Redis. If your environment -# requires that you use IPs as identifiers (for example, if peers can't resolve eachother -# by name), you can specify the exact identifier (IP address, etc) to use here. -# Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. -# RedisIdentifier = "192.168.1.1" - -# Timeout is optional. By default, when using RedisHost, Tracing-proxy will timeout -# after 5s when communicating with Redis. -# Timeout = "5s" - -# Strategy controls the way that traces are assigned to tracin nodes. -# The "legacy" strategy uses a simple algorithm that unfortunately causes -# 1/2 of the in-flight traces to be assigned to a different node whenever the -# number of nodes changes. -# The legacy strategy is deprecated and is intended to be removed in a future release. -# The "hash" strategy is strongly recommended, as only 1/N traces (where N is the -# number of nodes) are disrupted when the node count changes. -# Not eligible for live reload. -Strategy = "hash" - -######################### -## In-Memory Collector ## -######################### - -# InMemCollector brings together all the settings that are relevant to -# collecting spans together to make traces. -[InMemCollector] - -# The collection cache is used to collect all spans into a trace as well as -# remember the sampling decision for any spans that might come in after the -# trace has been marked "complete" (either by timing out or seeing the root -# span). The number of traces in the cache should be many multiples (100x to -# 1000x) of the total number of concurrently active traces (trace throughput * -# trace duration). -# Eligible for live reload. Growing the cache capacity with a live config reload -# is fine. Avoid shrinking it with a live reload (you can, but it may cause -# temporary odd sampling decisions). -CacheCapacity = 1000 - -# MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are -# supported. -# If set to a non-zero value, once per tick (see SendTicker) the collector -# will compare total allocated bytes to this value. If allocation is too -# high, cache capacity will be adjusted according to the setting for -# CacheOverrunStrategy. -# Useful values for this setting are generally in the range of 75%-90% of -# available system memory. -MaxAlloc = 0 - -################### -## Logrus Logger ## -################### - -# LogrusLogger is a section of the config only used if you are using the -# LogrusLogger to send all logs to STDOUT using the logrus package. If you are -# using a different logger (eg Opsramp logger) you can leave all this -# commented out. -[LogrusLogger] - -# LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] -LogFormatter = "logfmt" - -# LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] -LogOutput = "file" - -## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" -[LogrusLogger.File] - -# FileName specifies the location where the logs are supposed be stored -FileName = "/var/log/opsramp/tracing-proxy.log" - -# MaxSize is the maximum size in megabytes of the log file before it gets rotated. -MaxSize = 1 - -# MaxBackups is the maximum number of old log files to retain. -MaxBackups = 3 - -# Compress determines if the rotated log files should be compressed -# using gzip. -Compress = true - - -####################### -## Prometheus Metrics ## -####################### - -[OpsRampMetrics] -# MetricsListenAddr determines the interface and port on which Prometheus will -# listen for requests for /metrics. Must be different from the main Tracing-proxy -# listener. -# Not eligible for live reload. -MetricsListenAddr = "localhost:2112" - -# OpsRampMetricsAPI is the URL for the upstream OpsRamp API. -# Not Eligible for live reload. -OpsRampMetricsAPI = - -# OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. -# Not Eligible for live reload. -OpsRampTenantID = - -# OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. -# This is separate from the APIKeys used to authenticate regular -# traffic. -# Not Eligible for live reload. -OpsRampMetricsAPIKey = - -# OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. -# This is separate from the APISecret used to authenticate regular -# traffic. -# Not Eligible for live reload. -OpsRampMetricsAPISecret = - -# OpsRampMetricsReportingInterval is frequency specified in seconds at which -# the metrics are collected and sent to OpsRamp -# Not Eligible for live reload. -OpsRampMetricsReportingInterval = 10 - -# OpsRampMetricsRetryCount is the number of times we retry incase the send fails -# Not Eligible for live reload. -OpsRampMetricsRetryCount = 2 - -# ProxyProtocol accepts http and https -# Not Eligible for live reload. -ProxyProtocol = "" - -# ProxyServer takes the proxy server address -# Not Eligible for live reload. -ProxyServer = "" - -# ProxyPort takes the proxy server port -# Not Eligible for live reload. -ProxyPort = 3128 - -# ProxyUserName takes the proxy username -# Not Eligible for live reload. -ProxyUserName = "" - -# ProxyPassword takes the proxy password -# Not Eligible for live reload. -ProxyPassword = "" - -# OpsRampMetricsList is a list of regular expressions which match the metric -# names. Keep the list as small as possible since too many regular expressions can lead to bad performance. -# Internally all the regex in the list are concatinated using '|' to make the computation little faster. -# Not Eligible for live reload -OpsRampMetricsList = [".*"] - - -[GRPCServerParameters] - -# MaxConnectionIdle is a duration for the amount of time after which an -# idle connection would be closed by sending a GoAway. Idleness duration is -# defined since the most recent time the number of outstanding RPCs became -# zero or the connection establishment. -# 0s sets duration to infinity which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 -# Not eligible for live reload. -# MaxConnectionIdle = "1m" - -# MaxConnectionAge is a duration for the maximum amount of time a -# connection may exist before it will be closed by sending a GoAway. A -# random jitter of +/-10% will be added to MaxConnectionAge to spread out -# connection storms. -# 0s sets duration to infinity which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 -# Not eligible for live reload. -# MaxConnectionAge = "0s" - -# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after -# which the connection will be forcibly closed. -# 0s sets duration to infinity which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 -# Not eligible for live reload. -# MaxConnectionAgeGrace = "0s" - -# After a duration of this time if the server doesn't see any activity it -# pings the client to see if the transport is still alive. -# If set below 1s, a minimum value of 1s will be used instead. -# 0s sets duration to 2 hours which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 -# Not eligible for live reload. -# Time = "10s" - -# After having pinged for keepalive check, the server waits for a duration -# of Timeout and if no activity is seen even after that the connection is -# closed. -# 0s sets duration to 20 seconds which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 -# Not eligible for live reload. -# Timeout = "2s" - - - -################################ -## Sample Cache Configuration ## -################################ - -# Sample Cache Configuration controls the sample cache used to retain information about trace -# status after the sampling decision has been made. - -[SampleCacheConfig] - -# Type controls the type of sample cache used. -# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is -# 5x the size of the trace cache. This is Tracing-proxy's original sample cache strategy. -# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember -# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. -# It is also more configurable. The cuckoo filter is recommended for most installations. -# Default is "legacy". -# Not eligible for live reload (you cannot change the type of cache with reload). -# Type = "cuckoo" - -# KeptSize controls the number of traces preserved in the cuckoo kept traces cache. -# Tracing-proxy keeps a record of each trace that was kept and sent to Opsramp, along with some -# statistical information. This is most useful in cases where the trace was sent before sending -# the root span, so that the root span can be decorated with accurate metadata. -# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). -# Does not apply to the "legacy" type of cache. -# Eligible for live reload. -# KeptSize = 10_000 - -# DroppedSize controls the size of the cuckoo dropped traces cache. -# This cache consumes 4-6 bytes per trace at a scale of millions of traces. -# Changing its size with live reload sets a future limit, but does not have an immediate effect. -# Default is 1_000_000 traces. -# Does not apply to the "legacy" type of cache. -# Eligible for live reload. -# DroppedSize = 1_000_000 - -# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates -# the remaining capacity of its dropped traces cache and possibly cycles it. -# This cache is quite resilient so it doesn't need to happen very often, but the -# operation is also inexpensive. -# Default is 10 seconds. -# Does not apply to the "legacy" type of cache. -# Eligible for live reload. -# SizeCheckInterval = "10s" diff --git a/build/tracing-deb/configure.go b/build/tracing-deb/configure.go deleted file mode 100644 index 1daecd2039..0000000000 --- a/build/tracing-deb/configure.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" -) - -func main() { - var configFile, updatedConfigFile []byte - var err error - configFile, err = ioutil.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml") - - api := flag.String("A", "", "API To Send Data") - key := flag.String("K", "", "Opsramp Key") - secret := flag.String("S", "", "Opsramp Secret") - tenant := flag.String("T", "", "Opsramp TenantID") - flag.Parse() - - opsrampApiHost := "OpsrampAPI = \"" + *api + "\"" - opsrampMetricsApiHost := "OpsRampMetricsAPI = \"" + *api + "\"" - - updatedConfigFile = bytes.Replace(configFile, []byte("OpsrampAPI ="), []byte(opsrampApiHost), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPI ="), []byte(opsrampMetricsApiHost), 1) - - opsrampKey := "OpsrampKey = \"" + *key + "\"" - opsrampMetricsApiKey := "OpsRampMetricsAPIKey = \"" + *key + "\"" - - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampKey ="), []byte(opsrampKey), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPIKey ="), []byte(opsrampMetricsApiKey), 1) - - OpsrampSecret := "OpsrampSecret = \"" + *secret + "\"" - OpsRampMetricsAPISecret := "OpsRampMetricsAPISecret = \"" + *secret + "\"" - - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampSecret ="), []byte(OpsrampSecret), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPISecret ="), []byte(OpsRampMetricsAPISecret), 1) - - opsrampTenantID := "OpsRampTenantID = \"" + *tenant + "\"" - TenantId := "TenantId = \"" + *tenant + "\"" - - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampTenantID ="), []byte(opsrampTenantID), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("TenantId ="), []byte(TenantId), 1) - - if err = ioutil.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml", updatedConfigFile, 0666); err != nil { - fmt.Println(err) - os.Exit(1) - } - - if _, err := exec.Command("systemctl", "start", "tracing-proxy").Output(); err != nil { - fmt.Println(err) - os.Exit(1) - } - fmt.Println("Tracing-Proxy Started Successfully") -} diff --git a/build/tracing-deb/rules_complete.toml b/build/tracing-deb/rules_complete.toml deleted file mode 100644 index 1ff80e7295..0000000000 --- a/build/tracing-deb/rules_complete.toml +++ /dev/null @@ -1,229 +0,0 @@ -############################ -## Sampling Rules Config ## -############################ - -# DryRun - If enabled, marks traces that would be dropped given current sampling rules, -# and sends all traces regardless -DryRun = true - -# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to tracing_proxy_kept -DryRunFieldName = "fromProxy" - -# DeterministicSampler is a section of the config for manipulating the -# Deterministic Sampler implementation. This is the simplest sampling algorithm -# - it is a static sample rate, choosing traces randomly to either keep or send -# (at the appropriate rate). It is not influenced by the contents of the trace. -Sampler = "DeterministicSampler" - -# SampleRate is the rate at which to sample. It indicates a ratio, where one -# sample trace is kept for every n traces seen. For example, a SampleRate of 30 -# will keep 1 out of every 30 traces. The choice on whether to keep any specific -# trace is random, so the rate is approximate. -# Eligible for live reload. -SampleRate = 1 - -# [dataset1] - -# # Note: If your dataset name contains a space, you will have to escape the dataset name -# # using single quotes, such as ['dataset 1'] - -# # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler -# # implementation. This sampler collects the values of a number of fields from a -# # trace and uses them to form a key. This key is handed to the standard dynamic -# # sampler algorithm which generates a sample rate based on the frequency with -# # which that key has appeared in the previous ClearFrequencySec seconds. See -# # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics -# # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from -# # that package. -# Sampler = "DynamicSampler" - -# # SampleRate is the goal rate at which to sample. It indicates a ratio, where -# # one sample trace is kept for every n traces seen. For example, a SampleRate of -# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic -# # sampler, who assigns a sample rate for each trace based on the fields selected -# # from that trace. -# # Eligible for live reload. -# SampleRate = 1 - -# # FieldList is a list of all the field names to use to form the key that will be -# # handed to the dynamic sampler. The cardinality of the combination of values -# # from all of these keys should be reasonable in the face of the frequency of -# # those keys. If the combination of fields in these keys essentially makes them -# # unique, the dynamic sampler will do no sampling. If the keys have too few -# # values, you won't get samples of the most interesting traces. A good key -# # selection will have consistent values for high frequency boring traffic and -# # unique values for outliers and interesting traffic. Including an error field -# # (or something like HTTP status code) is an excellent choice. As an example, -# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code -# # would be a good set of keys in order to let you see accurately use of all -# # endpoints and call out when there is failing traffic to any endpoint. Field -# # names may come from any span in the trace. -# # Eligible for live reload. -# FieldList = [""] - -# # UseTraceLength will add the number of spans in the trace in to the dynamic -# # sampler as part of the key. The number of spans is exact, so if there are -# # normally small variations in trace length you may want to leave this off. If -# # traces are consistent lengths and changes in trace length is a useful -# # indicator of traces you'd like to see in Opsramp, set this to true. -# # Eligible for live reload. -# UseTraceLength = true - -# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field -# # to the root span of the trace containing the key used by the sampler to decide -# # the sample rate. This can be helpful in understanding why the sampler is -# # making certain decisions about sample rate and help you understand how to -# # better choose the sample rate key (aka the FieldList setting above) to use. -# AddSampleRateKeyToTrace = true - -# # AddSampleRateKeyToTraceField is the name of the field the sampler will use -# # when adding the sample rate key to the trace. This setting is only used when -# # AddSampleRateKeyToTrace is true. -# AddSampleRateKeyToTraceField = "" - -# # ClearFrequencySec is the name of the field the sampler will use to determine -# # the period over which it will calculate the sample rate. This setting defaults -# # to 30. -# # Eligible for live reload. -# ClearFrequencySec = 60 - -# [dataset2] - -# # EMADynamicSampler is a section of the config for manipulating the Exponential -# # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, -# # it attempts to average a given sample rate, weighting rare traffic and frequent -# # traffic differently so as to end up with the correct average. -# # -# # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended -# # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs -# # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential -# # Moving Average of counts seen per key, and adjusts this average at regular intervals. -# # The weight applied to more recent intervals is defined by `weight`, a number between -# # (0, 1) - larger values weight the average more toward recent observations. In other words, -# # a larger weight will cause sample rates more quickly adapt to traffic patterns, -# # while a smaller weight will result in sample rates that are less sensitive to bursts or drops -# # in traffic and thus more consistent over time. -# # -# # Keys that are not found in the EMA will always have a sample -# # rate of 1. Keys that occur more frequently will be sampled on a logarithmic -# # curve. In other words, every key will be represented at least once in any -# # given window and more frequent keys will have their sample rate -# # increased proportionally to wind up with the goal sample rate. -# Sampler = "EMADynamicSampler" - -# # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where -# # one sample trace is kept for every n traces seen. For example, a SampleRate of -# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic -# # sampler, who assigns a sample rate for each trace based on the fields selected -# # from that trace. -# # Eligible for live reload. -# GoalSampleRate = 1 - -# # FieldList is a list of all the field names to use to form the key that will be -# # handed to the dynamic sampler. The cardinality of the combination of values -# # from all of these keys should be reasonable in the face of the frequency of -# # those keys. If the combination of fields in these keys essentially makes them -# # unique, the dynamic sampler will do no sampling. If the keys have too few -# # values, you won't get samples of the most interesting traces. A good key -# # selection will have consistent values for high frequency boring traffic and -# # unique values for outliers and interesting traffic. Including an error field -# # (or something like HTTP status code) is an excellent choice. As an example, -# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code -# # would be a good set of keys in order to let you see accurately use of all -# # endpoints and call out when there is failing traffic to any endpoint. Field -# # names may come from any span in the trace. -# # Eligible for live reload. -# FieldList = [""] - -# # UseTraceLength will add the number of spans in the trace in to the dynamic -# # sampler as part of the key. The number of spans is exact, so if there are -# # normally small variations in trace length you may want to leave this off. If -# # traces are consistent lengths and changes in trace length is a useful -# # indicator of traces you'd like to see in Opsramp, set this to true. -# # Eligible for live reload. -# UseTraceLength = true - -# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field -# # to the root span of the trace containing the key used by the sampler to decide -# # the sample rate. This can be helpful in understanding why the sampler is -# # making certain decisions about sample rate and help you understand how to -# # better choose the sample rate key (aka the FieldList setting above) to use. -# AddSampleRateKeyToTrace = true - -# # AddSampleRateKeyToTraceField is the name of the field the sampler will use -# # when adding the sample rate key to the trace. This setting is only used when -# # AddSampleRateKeyToTrace is true. -# AddSampleRateKeyToTraceField = "" - -# # AdjustmentInterval defines how often (in seconds) we adjust the moving average from -# # recent observations. Default 15s -# # Eligible for live reload. -# AdjustmentInterval = 15 - -# # Weight is a value between (0, 1) indicating the weighting factor used to adjust -# # the EMA. With larger values, newer data will influence the average more, and older -# # values will be factored out more quickly. In mathematical literature concerning EMA, -# # this is referred to as the `alpha` constant. -# # Default is 0.5 -# # Eligible for live reload. -# Weight = 0.5 - -# # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. -# # Once MaxKeys is reached, new keys will not be included in the sample rate map, but -# # existing keys will continue to be be counted. You can use this to keep the sample rate -# # map size under control. -# # Eligible for live reload -# MaxKeys = 0 - -# # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key -# # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to -# # decide what constitutes "zero". Keys with averages below this threshold will be removed -# # from the EMA. Default is the same as Weight, as this prevents a key with the smallest -# # integer value (1) from being aged out immediately. This value should generally be <= Weight, -# # unless you have very specific reasons to set it higher. -# # Eligible for live reload -# AgeOutValue = 0.5 - -# # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define -# # the burst detection threshold. If total counts observed for a given interval exceed the threshold -# # EMA is updated immediately, rather than waiting on the AdjustmentInterval. -# # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, -# # burst detection will kick in. -# # Eligible for live reload -# BurstMultiple = 2.0 - -# # BurstDetectionDelay indicates the number of intervals to run after Start is called before -# # burst detection kicks in. -# # Defaults to 3 -# # Eligible for live reload -# BurstDetectionDelay = 3 - -# [dataset3] - -# Sampler = "DeterministicSampler" -# SampleRate = 10 - -# [dataset4] - -# Sampler = "RulesBasedSampler" - -# [[dataset4.rule]] -# # Rule name -# name = "" -# # Drop Condition (examples: true, false) -# drop = -# [[dataset4.rule.condition]] -# # Field Name (example: status_code) -# field = "" -# # Operator Value (example: =) -# operator = "" -# # Field Value (example: 500) -# value = "" - - - -# [dataset5] - -# Sampler = "TotalThroughputSampler" -# GoalThroughputPerSec = 100 -# FieldList = "[]" diff --git a/build/tracing-rpm/config_complete.toml b/build/tracing-rpm/config_complete.toml deleted file mode 100644 index dcbf861bd4..0000000000 --- a/build/tracing-rpm/config_complete.toml +++ /dev/null @@ -1,511 +0,0 @@ -##################### -## Tracing-proxy Config ## -##################### - -# ListenAddr is the IP and port on which to listen for incoming events. Incoming -# traffic is expected to be HTTP, so if using SSL put something like nginx in -# front to do the decryption. -# Should be of the form 0.0.0.0:8082 -# Not eligible for live reload. -ListenAddr = "0.0.0.0:8082" - -# GRPCListenAddr is the IP and port on which to listen for incoming events over -# gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put -# something like nginx in front to do the decryption. -# Should be of the form 0.0.0.0:9090 -# Not eligible for live reload. -GRPCListenAddr = "0.0.0.0:9090" - -# PeerListenAddr is the IP and port on which to listen for traffic being -# rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL -# put something like nginx in front to do the decryption. Must be different from -# ListenAddr -# Should be of the form 0.0.0.0:8081 -# Not eligible for live reload. -PeerListenAddr = "0.0.0.0:8083" -GRPCPeerListenAddr = "0.0.0.0:8084" - -# ProxyProtocol accepts http and https -# Not Eligible for live reload. -ProxyProtocol = "" - -# ProxyServer takes the proxy server address -# Not Eligible for live reload. -ProxyServer = "" - -# ProxyPort takes the proxy server port -# Not Eligible for live reload. -ProxyPort = 0 - -# ProxyUserName takes the proxy username -# Not Eligible for live reload. -ProxyUserName = "" - -# ProxyPassword takes the proxy password -# Not Eligible for live reload. -ProxyPassword = "" - -# CompressPeerCommunication determines whether tracin will compress span data -# it forwards to peers. If it costs money to transmit data between tracin -# instances (e.g. they're spread across AWS availability zones), then you -# almost certainly want compression enabled to reduce your bill. The option to -# disable it is provided as an escape hatch for deployments that value lower CPU -# utilization over data transfer costs. -CompressPeerCommunication = true - -# OpsrampAPI is the URL for the upstream Opsramp API. -# Eligible for live reload. -OpsrampAPI = - -# OpsrampKey is used to get the OauthToken -OpsrampKey = - -# OpsrampSecret is used to get the OauthToken -OpsrampSecret = - -# Traces are send to the client with given tenantid -TenantId = - -# Dataset you want to use for sampling -Dataset = "ds" - -#Tls Options -UseTls = true -UseTlsInsecure = false - -# SendDelay is a short timer that will be triggered when a trace is complete. -# Tracing-proxy will wait this duration before actually sending the trace. The -# reason for this short delay is to allow for small network delays or clock -# jitters to elapse and any final spans to arrive before actually sending the -# trace. This supports duration strings with supplied units. Set to 0 for -# immediate sends. -# Eligible for live reload. -SendDelay = "2s" - -# BatchTimeout dictates how frequently to send unfulfilled batches. By default -# this will use the DefaultBatchTimeout in libhoney as its value, which is 100ms. -# Eligible for live reload. -BatchTimeout = "1s" - -# TraceTimeout is a long timer; it represents the outside boundary of how long -# to wait before sending an incomplete trace. Normally traces are sent when the -# root span arrives. Sometimes the root span never arrives (due to crashes or -# whatever), and this timer will send a trace even without having received the -# root span. If you have particularly long-lived traces you should increase this -# timer. This supports duration strings with supplied units. -# Eligible for live reload. -TraceTimeout = "60s" - -# MaxBatchSize is the number of events to be included in the batch for sending -MaxBatchSize = 500 - -# SendTicker is a short timer; it determines the duration to use to check for traces to send -SendTicker = "100ms" - -# LoggingLevel is the level above which we should log. Debug is very verbose, -# and should only be used in pre-production environments. Info is the -# recommended level. Valid options are "debug", "info", "error", and -# "panic" -# Not eligible for live reload. -LoggingLevel = "error" - -# UpstreamBufferSize and PeerBufferSize control how large of an event queue to use -# when buffering events that will be forwarded to peers or the upstream API. -UpstreamBufferSize = 10000 -PeerBufferSize = 10000 - -# DebugServiceAddr sets the IP and port the debug service will run on -# The debug service will only run if the command line flag -d is specified -# The debug service runs on the first open port between localhost:6060 and :6069 by default -# DebugServiceAddr = "localhost:8085" - -# AddHostMetadataToTrace determines whether or not to add information about -# the host that Tracing-proxy is running on to the spans that it processes. -# If enabled, information about the host will be added to each span with the -# prefix `meta.tracing-procy.`. -# Currently the only value added is 'meta.tracing-proxy.local_hostname'. -# Not eligible for live reload -AddHostMetadataToTrace = false - -# EnvironmentCacheTTL is the amount of time a cache entry will live that associates -# an API key with an environment name. -# Cache misses lookup the environment name using OpsrampAPI config value. -# Default is 1 hour ("1h"). -# Not eligible for live reload. -EnvironmentCacheTTL = "1h" - -# QueryAuthToken, if specified, provides a token that must be specified with -# the header "X-Opsramp-Tracing-proxy-Query" in order for a /query request to succeed. -# These /query requests are intended for debugging tracin installations and -# are not typically needed in normal operation. -# Can be specified in the environment as tracing-proxy_QUERY_AUTH_TOKEN. -# If left unspecified, the /query endpoints are inaccessible. -# Not eligible for live reload. -# QueryAuthToken = "some-random-value" - -# AddRuleReasonToTrace causes traces that are sent to Opsramp to include the field `meta.tracin.reason`. -# This field contains text indicating which rule was evaluated that caused the trace to be included. -# Eligible for live reload. -# AddRuleReasonToTrace = true - -# AdditionalErrorFields should be a list of span fields that should be included when logging -# errors that happen during ingestion of events (for example, the span too large error). -# This is primarily useful in trying to track down misbehaving senders in a large installation. -# The fields `dataset`, `apihost`, and `environment` are always included. -# If a field is not present in the span, it will not be present in the error log. -# Default is ["trace.span_id"]. -# Eligible for live reload. -AdditionalErrorFields = [ - "trace.span_id" -] - -# AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate -# the number of child spans on the trace at the time the sampling decision was made. -# This value is available to the rules-based sampler, making it possible to write rules that -# are dependent upon the number of spans in the trace. -# Default is false. -# Eligible for live reload. -# AddSpanCountToRoot = true - -# CacheOverrunStrategy controls the cache management behavior under memory pressure. -# "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, -# which is generally not helpful unless it occurs because of a permanent change in traffic patterns. -# In the "impact" strategy, the items having the most impact on the cache size are -# ejected from the cache earlier than normal but the cache is not resized. -# In all cases, it only applies if MaxAlloc is nonzero. -# Default is "resize" for compatibility but "impact" is recommended for most installations. -# Eligible for live reload. -# CacheOverrunStrategy = "impact" - -# Metrics are sent to OpsRamp (The collection happens based on configuration specifie -# in OpsRampMetrics and only works when the Metrics is set to "prometheus") -SendMetricsToOpsRamp = false - -############################ -## Implementation Choices ## -############################ - -# Each of the config options below chooses an implementation of a Tracing-proxy -# component to use. Depending on the choice there may be more configuration -# required below in the section for that choice. Changing implementation choices -# requires a process restart; these changes will not be picked up by a live -# config reload. (Individual config options for a given implementation may be -# eligible for live reload). - -# Collector describes which collector to use for collecting traces. The only -# current valid option is "InMemCollector".. More can be added by adding -# implementations of the Collector interface. -Collector = "InMemCollector" - -######################### -## Peer Management ## -######################### - -[PeerManagement] -Type = "file" -## Peers is the list of all servers participating in this proxy cluster. Events -## will be sharded evenly across all peers based on the Trace ID. Values here -## should be the base URL used to access the peer, and should include scheme, -## hostname (or ip address) and port. All servers in the cluster should be in -## this list, including this host. -Peers = [ - "http://127.0.0.1:8084", #only grpc peer listener used -# # "http://127.0.0.1:8083", -# # "http://10.1.2.3.4:8080", -# # "http://tracin-1231:8080", -# # "http://peer-3.fqdn" // assumes port 80 -] - -#[PeerManagement] -#Type = "redis" -# RedisHost is is used to connect to redis for peer cluster membership management. -# Further, if the environment variable 'Tracing_Proxy_REDIS_HOST' is set it takes -# precedence and this value is ignored. -# Not eligible for live reload. -#RedisHost = "redis:22122" - -# RedisUsername is the username used to connect to redis for peer cluster membership management. -# If the environment variable 'Tracing_Proxy_REDIS_USERNAME' is set it takes -# precedence and this value is ignored. -# Not eligible for live reload. -# RedisUsername = "" - -# RedisPassword is the password used to connect to redis for peer cluster membership management. -# If the environment variable 'Tracing_Proxy_REDIS_PASSWORD' is set it takes -# precedence and this value is ignored. -# Not eligible for live reload. -# RedisPassword = "" - -# RedisPrefix is a string used as a prefix for the keys in redis while storing -# the peer membership. It might be useful to set this in any situation where -# multiple tracing-proxy clusters or multiple applications want to share a single -# Redis instance. If not set then "tracing-proxy" is used as prefix -# RedisPrefix = "customPrefix" - -# RedisDatabase is an integer from 0-15 indicating the database number to use -# for the Redis instance storing the peer membership. It might be useful to set -# this in any situation where multiple trace-proxy clusters or multiple -# applications want to share a single Redis instance. if not set Default = 0 -# RedisDatabase = 1 - -# UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. -# Not eligible for live reload. -# UseTLS = false - -# UseTLSInsecure disables certificate checks -# Not eligible for live reload. -# UseTLSInsecure = false - -# IdentifierInterfaceName is optional. By default, when using RedisHost, Tracing-proxy will use -# the local hostname to identify itself to other peers in Redis. If your environment -# requires that you use IPs as identifiers (for example, if peers can't resolve eachother -# by name), you can specify the network interface that Tracing-proxy is listening on here. -# Tracing-proxy will use the first unicast address that it finds on the specified network -# interface as its identifier. -# Not eligible for live reload. -# IdentifierInterfaceName = "eth0" - -# UseIPV6Identifier is optional. If using IdentifierInterfaceName, Tracing-proxy will default to the first -# IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use -# the first IPV6 unicast address found. -# UseIPV6Identifier = false - -# RedisIdentifier is optional. By default, when using RedisHost, Tracing-proxy will use -# the local hostname to id788714cd-a17a-4d7e-9bac-c35131f4bcc2entify itself to other peers in Redis. If your environment -# requires that you use IPs as identifiers (for example, if peers can't resolve eachother -# by name), you can specify the exact identifier (IP address, etc) to use here. -# Not eligible for live reload. Overrides IdentifierInterfaceName, if both are set. -# RedisIdentifier = "192.168.1.1" - -# Timeout is optional. By default, when using RedisHost, Tracing-proxy will timeout -# after 5s when communicating with Redis. -# Timeout = "5s" - -# Strategy controls the way that traces are assigned to tracin nodes. -# The "legacy" strategy uses a simple algorithm that unfortunately causes -# 1/2 of the in-flight traces to be assigned to a different node whenever the -# number of nodes changes. -# The legacy strategy is deprecated and is intended to be removed in a future release. -# The "hash" strategy is strongly recommended, as only 1/N traces (where N is the -# number of nodes) are disrupted when the node count changes. -# Not eligible for live reload. -Strategy = "hash" - -######################### -## In-Memory Collector ## -######################### - -# InMemCollector brings together all the settings that are relevant to -# collecting spans together to make traces. -[InMemCollector] - -# The collection cache is used to collect all spans into a trace as well as -# remember the sampling decision for any spans that might come in after the -# trace has been marked "complete" (either by timing out or seeing the root -# span). The number of traces in the cache should be many multiples (100x to -# 1000x) of the total number of concurrently active traces (trace throughput * -# trace duration). -# Eligible for live reload. Growing the cache capacity with a live config reload -# is fine. Avoid shrinking it with a live reload (you can, but it may cause -# temporary odd sampling decisions). -CacheCapacity = 1000 - -# MaxAlloc is optional. If set, it must be an integer >= 0. 64-bit values are -# supported. -# If set to a non-zero value, once per tick (see SendTicker) the collector -# will compare total allocated bytes to this value. If allocation is too -# high, cache capacity will be adjusted according to the setting for -# CacheOverrunStrategy. -# Useful values for this setting are generally in the range of 75%-90% of -# available system memory. -MaxAlloc = 0 - -################### -## Logrus Logger ## -################### - -# LogrusLogger is a section of the config only used if you are using the -# LogrusLogger to send all logs to STDOUT using the logrus package. If you are -# using a different logger (eg Opsramp logger) you can leave all this -# commented out. -[LogrusLogger] - -# LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] -LogFormatter = "logfmt" - -# LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] -LogOutput = "file" - -## LogrusLogger.File - specifies configs for logs when LogOutput is set to "file" -[LogrusLogger.File] - -# FileName specifies the location where the logs are supposed be stored -FileName = "/var/log/opsramp/tracing-proxy.log" - -# MaxSize is the maximum size in megabytes of the log file before it gets rotated. -MaxSize = 1 - -# MaxBackups is the maximum number of old log files to retain. -MaxBackups = 3 - -# Compress determines if the rotated log files should be compressed -# using gzip. -Compress = true - - -####################### -## Prometheus Metrics ## -####################### - -[OpsRampMetrics] -# MetricsListenAddr determines the interface and port on which Prometheus will -# listen for requests for /metrics. Must be different from the main Tracing-proxy -# listener. -# Not eligible for live reload. -MetricsListenAddr = "localhost:2112" - -# OpsRampMetricsAPI is the URL for the upstream OpsRamp API. -# Not Eligible for live reload. -OpsRampMetricsAPI = - -# OpsRampTenantID is the Client or Tenant ID where the metrics are supposed to be pushed. -# Not Eligible for live reload. -OpsRampTenantID = - -# OpsRampMetricsAPIKey is the API key to use to send metrics to the OpsRamp. -# This is separate from the APIKeys used to authenticate regular -# traffic. -# Not Eligible for live reload. -OpsRampMetricsAPIKey = - -# OpsRampMetricsAPISecret is the API Secret to use to send metrics to the OpsRamp. -# This is separate from the APISecret used to authenticate regular -# traffic. -# Not Eligible for live reload. -OpsRampMetricsAPISecret = - -# OpsRampMetricsReportingInterval is frequency specified in seconds at which -# the metrics are collected and sent to OpsRamp -# Not Eligible for live reload. -OpsRampMetricsReportingInterval = 10 - -# OpsRampMetricsRetryCount is the number of times we retry incase the send fails -# Not Eligible for live reload. -OpsRampMetricsRetryCount = 2 - -# ProxyProtocol accepts http and https -# Not Eligible for live reload. -ProxyProtocol = "" - -# ProxyServer takes the proxy server address -# Not Eligible for live reload. -ProxyServer = "" - -# ProxyPort takes the proxy server port -# Not Eligible for live reload. -ProxyPort = 3128 - -# ProxyUserName takes the proxy username -# Not Eligible for live reload. -ProxyUserName = "" - -# ProxyPassword takes the proxy password -# Not Eligible for live reload. -ProxyPassword = "" - -# OpsRampMetricsList is a list of regular expressions which match the metric -# names. Keep the list as small as possible since too many regular expressions can lead to bad performance. -# Internally all the regex in the list are concatinated using '|' to make the computation little faster. -# Not Eligible for live reload -OpsRampMetricsList = [".*"] - - -[GRPCServerParameters] - -# MaxConnectionIdle is a duration for the amount of time after which an -# idle connection would be closed by sending a GoAway. Idleness duration is -# defined since the most recent time the number of outstanding RPCs became -# zero or the connection establishment. -# 0s sets duration to infinity which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 -# Not eligible for live reload. -# MaxConnectionIdle = "1m" - -# MaxConnectionAge is a duration for the maximum amount of time a -# connection may exist before it will be closed by sending a GoAway. A -# random jitter of +/-10% will be added to MaxConnectionAge to spread out -# connection storms. -# 0s sets duration to infinity which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 -# Not eligible for live reload. -# MaxConnectionAge = "0s" - -# MaxConnectionAgeGrace is an additive period after MaxConnectionAge after -# which the connection will be forcibly closed. -# 0s sets duration to infinity which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 -# Not eligible for live reload. -# MaxConnectionAgeGrace = "0s" - -# After a duration of this time if the server doesn't see any activity it -# pings the client to see if the transport is still alive. -# If set below 1s, a minimum value of 1s will be used instead. -# 0s sets duration to 2 hours which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 -# Not eligible for live reload. -# Time = "10s" - -# After having pinged for keepalive check, the server waits for a duration -# of Timeout and if no activity is seen even after that the connection is -# closed. -# 0s sets duration to 20 seconds which is the default: -# https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 -# Not eligible for live reload. -# Timeout = "2s" - - - -################################ -## Sample Cache Configuration ## -################################ - -# Sample Cache Configuration controls the sample cache used to retain information about trace -# status after the sampling decision has been made. - -[SampleCacheConfig] - -# Type controls the type of sample cache used. -# "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is -# 5x the size of the trace cache. This is Tracing-proxy's original sample cache strategy. -# "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember -# a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. -# It is also more configurable. The cuckoo filter is recommended for most installations. -# Default is "legacy". -# Not eligible for live reload (you cannot change the type of cache with reload). -# Type = "cuckoo" - -# KeptSize controls the number of traces preserved in the cuckoo kept traces cache. -# Tracing-proxy keeps a record of each trace that was kept and sent to Opsramp, along with some -# statistical information. This is most useful in cases where the trace was sent before sending -# the root span, so that the root span can be decorated with accurate metadata. -# Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). -# Does not apply to the "legacy" type of cache. -# Eligible for live reload. -# KeptSize = 10_000 - -# DroppedSize controls the size of the cuckoo dropped traces cache. -# This cache consumes 4-6 bytes per trace at a scale of millions of traces. -# Changing its size with live reload sets a future limit, but does not have an immediate effect. -# Default is 1_000_000 traces. -# Does not apply to the "legacy" type of cache. -# Eligible for live reload. -# DroppedSize = 1_000_000 - -# SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates -# the remaining capacity of its dropped traces cache and possibly cycles it. -# This cache is quite resilient so it doesn't need to happen very often, but the -# operation is also inexpensive. -# Default is 10 seconds. -# Does not apply to the "legacy" type of cache. -# Eligible for live reload. -# SizeCheckInterval = "10s" diff --git a/build/tracing-rpm/configure.go b/build/tracing-rpm/configure.go deleted file mode 100644 index 1daecd2039..0000000000 --- a/build/tracing-rpm/configure.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" -) - -func main() { - var configFile, updatedConfigFile []byte - var err error - configFile, err = ioutil.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml") - - api := flag.String("A", "", "API To Send Data") - key := flag.String("K", "", "Opsramp Key") - secret := flag.String("S", "", "Opsramp Secret") - tenant := flag.String("T", "", "Opsramp TenantID") - flag.Parse() - - opsrampApiHost := "OpsrampAPI = \"" + *api + "\"" - opsrampMetricsApiHost := "OpsRampMetricsAPI = \"" + *api + "\"" - - updatedConfigFile = bytes.Replace(configFile, []byte("OpsrampAPI ="), []byte(opsrampApiHost), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPI ="), []byte(opsrampMetricsApiHost), 1) - - opsrampKey := "OpsrampKey = \"" + *key + "\"" - opsrampMetricsApiKey := "OpsRampMetricsAPIKey = \"" + *key + "\"" - - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampKey ="), []byte(opsrampKey), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPIKey ="), []byte(opsrampMetricsApiKey), 1) - - OpsrampSecret := "OpsrampSecret = \"" + *secret + "\"" - OpsRampMetricsAPISecret := "OpsRampMetricsAPISecret = \"" + *secret + "\"" - - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsrampSecret ="), []byte(OpsrampSecret), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampMetricsAPISecret ="), []byte(OpsRampMetricsAPISecret), 1) - - opsrampTenantID := "OpsRampTenantID = \"" + *tenant + "\"" - TenantId := "TenantId = \"" + *tenant + "\"" - - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("OpsRampTenantID ="), []byte(opsrampTenantID), 1) - updatedConfigFile = bytes.Replace(updatedConfigFile, []byte("TenantId ="), []byte(TenantId), 1) - - if err = ioutil.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.toml", updatedConfigFile, 0666); err != nil { - fmt.Println(err) - os.Exit(1) - } - - if _, err := exec.Command("systemctl", "start", "tracing-proxy").Output(); err != nil { - fmt.Println(err) - os.Exit(1) - } - fmt.Println("Tracing-Proxy Started Successfully") -} diff --git a/build/tracing-rpm/rules_complete.toml b/build/tracing-rpm/rules_complete.toml deleted file mode 100644 index 1ff80e7295..0000000000 --- a/build/tracing-rpm/rules_complete.toml +++ /dev/null @@ -1,229 +0,0 @@ -############################ -## Sampling Rules Config ## -############################ - -# DryRun - If enabled, marks traces that would be dropped given current sampling rules, -# and sends all traces regardless -DryRun = true - -# DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to tracing_proxy_kept -DryRunFieldName = "fromProxy" - -# DeterministicSampler is a section of the config for manipulating the -# Deterministic Sampler implementation. This is the simplest sampling algorithm -# - it is a static sample rate, choosing traces randomly to either keep or send -# (at the appropriate rate). It is not influenced by the contents of the trace. -Sampler = "DeterministicSampler" - -# SampleRate is the rate at which to sample. It indicates a ratio, where one -# sample trace is kept for every n traces seen. For example, a SampleRate of 30 -# will keep 1 out of every 30 traces. The choice on whether to keep any specific -# trace is random, so the rate is approximate. -# Eligible for live reload. -SampleRate = 1 - -# [dataset1] - -# # Note: If your dataset name contains a space, you will have to escape the dataset name -# # using single quotes, such as ['dataset 1'] - -# # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler -# # implementation. This sampler collects the values of a number of fields from a -# # trace and uses them to form a key. This key is handed to the standard dynamic -# # sampler algorithm which generates a sample rate based on the frequency with -# # which that key has appeared in the previous ClearFrequencySec seconds. See -# # https://github.com/opsrampio/dynsampler-go for more detail on the mechanics -# # of the dynamic sampler. This sampler uses the AvgSampleRate algorithm from -# # that package. -# Sampler = "DynamicSampler" - -# # SampleRate is the goal rate at which to sample. It indicates a ratio, where -# # one sample trace is kept for every n traces seen. For example, a SampleRate of -# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic -# # sampler, who assigns a sample rate for each trace based on the fields selected -# # from that trace. -# # Eligible for live reload. -# SampleRate = 1 - -# # FieldList is a list of all the field names to use to form the key that will be -# # handed to the dynamic sampler. The cardinality of the combination of values -# # from all of these keys should be reasonable in the face of the frequency of -# # those keys. If the combination of fields in these keys essentially makes them -# # unique, the dynamic sampler will do no sampling. If the keys have too few -# # values, you won't get samples of the most interesting traces. A good key -# # selection will have consistent values for high frequency boring traffic and -# # unique values for outliers and interesting traffic. Including an error field -# # (or something like HTTP status code) is an excellent choice. As an example, -# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code -# # would be a good set of keys in order to let you see accurately use of all -# # endpoints and call out when there is failing traffic to any endpoint. Field -# # names may come from any span in the trace. -# # Eligible for live reload. -# FieldList = [""] - -# # UseTraceLength will add the number of spans in the trace in to the dynamic -# # sampler as part of the key. The number of spans is exact, so if there are -# # normally small variations in trace length you may want to leave this off. If -# # traces are consistent lengths and changes in trace length is a useful -# # indicator of traces you'd like to see in Opsramp, set this to true. -# # Eligible for live reload. -# UseTraceLength = true - -# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field -# # to the root span of the trace containing the key used by the sampler to decide -# # the sample rate. This can be helpful in understanding why the sampler is -# # making certain decisions about sample rate and help you understand how to -# # better choose the sample rate key (aka the FieldList setting above) to use. -# AddSampleRateKeyToTrace = true - -# # AddSampleRateKeyToTraceField is the name of the field the sampler will use -# # when adding the sample rate key to the trace. This setting is only used when -# # AddSampleRateKeyToTrace is true. -# AddSampleRateKeyToTraceField = "" - -# # ClearFrequencySec is the name of the field the sampler will use to determine -# # the period over which it will calculate the sample rate. This setting defaults -# # to 30. -# # Eligible for live reload. -# ClearFrequencySec = 60 - -# [dataset2] - -# # EMADynamicSampler is a section of the config for manipulating the Exponential -# # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, -# # it attempts to average a given sample rate, weighting rare traffic and frequent -# # traffic differently so as to end up with the correct average. -# # -# # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended -# # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs -# # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential -# # Moving Average of counts seen per key, and adjusts this average at regular intervals. -# # The weight applied to more recent intervals is defined by `weight`, a number between -# # (0, 1) - larger values weight the average more toward recent observations. In other words, -# # a larger weight will cause sample rates more quickly adapt to traffic patterns, -# # while a smaller weight will result in sample rates that are less sensitive to bursts or drops -# # in traffic and thus more consistent over time. -# # -# # Keys that are not found in the EMA will always have a sample -# # rate of 1. Keys that occur more frequently will be sampled on a logarithmic -# # curve. In other words, every key will be represented at least once in any -# # given window and more frequent keys will have their sample rate -# # increased proportionally to wind up with the goal sample rate. -# Sampler = "EMADynamicSampler" - -# # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where -# # one sample trace is kept for every n traces seen. For example, a SampleRate of -# # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic -# # sampler, who assigns a sample rate for each trace based on the fields selected -# # from that trace. -# # Eligible for live reload. -# GoalSampleRate = 1 - -# # FieldList is a list of all the field names to use to form the key that will be -# # handed to the dynamic sampler. The cardinality of the combination of values -# # from all of these keys should be reasonable in the face of the frequency of -# # those keys. If the combination of fields in these keys essentially makes them -# # unique, the dynamic sampler will do no sampling. If the keys have too few -# # values, you won't get samples of the most interesting traces. A good key -# # selection will have consistent values for high frequency boring traffic and -# # unique values for outliers and interesting traffic. Including an error field -# # (or something like HTTP status code) is an excellent choice. As an example, -# # assuming 30 or so endpoints, a combination of HTTP endpoint and status code -# # would be a good set of keys in order to let you see accurately use of all -# # endpoints and call out when there is failing traffic to any endpoint. Field -# # names may come from any span in the trace. -# # Eligible for live reload. -# FieldList = [""] - -# # UseTraceLength will add the number of spans in the trace in to the dynamic -# # sampler as part of the key. The number of spans is exact, so if there are -# # normally small variations in trace length you may want to leave this off. If -# # traces are consistent lengths and changes in trace length is a useful -# # indicator of traces you'd like to see in Opsramp, set this to true. -# # Eligible for live reload. -# UseTraceLength = true - -# # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field -# # to the root span of the trace containing the key used by the sampler to decide -# # the sample rate. This can be helpful in understanding why the sampler is -# # making certain decisions about sample rate and help you understand how to -# # better choose the sample rate key (aka the FieldList setting above) to use. -# AddSampleRateKeyToTrace = true - -# # AddSampleRateKeyToTraceField is the name of the field the sampler will use -# # when adding the sample rate key to the trace. This setting is only used when -# # AddSampleRateKeyToTrace is true. -# AddSampleRateKeyToTraceField = "" - -# # AdjustmentInterval defines how often (in seconds) we adjust the moving average from -# # recent observations. Default 15s -# # Eligible for live reload. -# AdjustmentInterval = 15 - -# # Weight is a value between (0, 1) indicating the weighting factor used to adjust -# # the EMA. With larger values, newer data will influence the average more, and older -# # values will be factored out more quickly. In mathematical literature concerning EMA, -# # this is referred to as the `alpha` constant. -# # Default is 0.5 -# # Eligible for live reload. -# Weight = 0.5 - -# # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. -# # Once MaxKeys is reached, new keys will not be included in the sample rate map, but -# # existing keys will continue to be be counted. You can use this to keep the sample rate -# # map size under control. -# # Eligible for live reload -# MaxKeys = 0 - -# # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key -# # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to -# # decide what constitutes "zero". Keys with averages below this threshold will be removed -# # from the EMA. Default is the same as Weight, as this prevents a key with the smallest -# # integer value (1) from being aged out immediately. This value should generally be <= Weight, -# # unless you have very specific reasons to set it higher. -# # Eligible for live reload -# AgeOutValue = 0.5 - -# # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define -# # the burst detection threshold. If total counts observed for a given interval exceed the threshold -# # EMA is updated immediately, rather than waiting on the AdjustmentInterval. -# # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, -# # burst detection will kick in. -# # Eligible for live reload -# BurstMultiple = 2.0 - -# # BurstDetectionDelay indicates the number of intervals to run after Start is called before -# # burst detection kicks in. -# # Defaults to 3 -# # Eligible for live reload -# BurstDetectionDelay = 3 - -# [dataset3] - -# Sampler = "DeterministicSampler" -# SampleRate = 10 - -# [dataset4] - -# Sampler = "RulesBasedSampler" - -# [[dataset4.rule]] -# # Rule name -# name = "" -# # Drop Condition (examples: true, false) -# drop = -# [[dataset4.rule.condition]] -# # Field Name (example: status_code) -# field = "" -# # Operator Value (example: =) -# operator = "" -# # Field Value (example: 500) -# value = "" - - - -# [dataset5] - -# Sampler = "TotalThroughputSampler" -# GoalThroughputPerSec = 100 -# FieldList = "[]" diff --git a/build/config_complete.yaml b/build/vm/config_complete.yaml similarity index 94% rename from build/config_complete.yaml rename to build/vm/config_complete.yaml index d4fa9201f3..6608353f5d 100644 --- a/build/config_complete.yaml +++ b/build/vm/config_complete.yaml @@ -74,10 +74,10 @@ UpstreamBufferSize: 1000 PeerBufferSize: 1000 # AddHostMetadataToTrace determines whether to add information about -# the host that Tracing-Proxy is running on to the spans that it processes. +# the host that tracing proxy is running on to the spans that it processes. # If enabled, information about the host will be added to each span with the -# prefix `meta.tracing_proxy.`. -# Currently, the only value added is 'meta.tracing_proxy.local_hostname'. +# prefix `meta.tracing proxy.`. +# Currently, the only value added is 'meta.tracing proxy.local_hostname'. AddHostMetadataToTrace: false # EnvironmentCacheTTL is the amount of time a cache entry will live that associates @@ -123,6 +123,24 @@ AddSpanCountToRoot: false # Default is "resize" for compatibility but "impact" is recommended for most installations. CacheOverrunStrategy: "impact" +######################### +## Retry Configuration ## +######################### +RetryConfiguration: + # InitialInterval the time to wait after the first failure before retrying. + InitialInterval: 500ms + # RandomizationFactor is a random factor used to calculate next backoff + # Randomized interval = RetryInterval * (1 ± RandomizationFactor) + RandomizationFactor: 0.5 + # Multiplier is the value multiplied by the backoff interval bounds + Multiplier: 1.5 + # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between + # consecutive retries will always be `MaxInterval`. + MaxInterval: 60s + # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. + # Once this value is reached, the data is discarded. + MaxElapsedTime: 15m + ######################### ## Proxy Configuration ## ######################### @@ -274,7 +292,7 @@ PeerManagement: # IdentifierInterfaceName is optional. # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. # When configured the pod's IP will be used in the peer list - IdentifierInterfaceName: eth0 + # IdentifierInterfaceName: eth0 # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use @@ -318,9 +336,6 @@ MetricsConfig: # the metrics are collected and sent to OpsRamp ReportingInterval: 10 - # OpsRampMetricsRetryCount is the number of times we retry incase the send fails - RetryCount: 2 - # MetricsList is a list of regular expressions which match the metric # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. # Internally, all the items in the list are concatenated using '|' to make the computation faster. @@ -372,7 +387,7 @@ GRPCServerParameters: SampleCacheConfig: # Type controls the type of sample cache used. # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is -# 5x the size of the trace cache. This is Tracing-Proxy's original sample cache strategy. +# 5x the size of the trace cache. This is tracing proxy's original sample cache strategy. # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. # It is also more configurable. The cuckoo filter is recommended for most installations. @@ -380,7 +395,7 @@ SampleCacheConfig: # Type: "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. -# Tracing-Proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some +# tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). diff --git a/build/configure.go b/build/vm/configure.go similarity index 74% rename from build/configure.go rename to build/vm/configure.go index 54b259d357..311f84607a 100644 --- a/build/configure.go +++ b/build/vm/configure.go @@ -3,7 +3,6 @@ package main import ( "flag" "fmt" - "io/ioutil" "os" "os/exec" "strings" @@ -14,7 +13,7 @@ func main() { var fileContent string var err error - configFile, err = ioutil.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml") + configFile, err = os.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml") api := flag.String("A", "", "API To Send Data") key := flag.String("K", "", "Opsramp Key") @@ -30,12 +29,12 @@ func main() { fileContent = strings.ReplaceAll(fileContent, "", *secret) fileContent = strings.ReplaceAll(fileContent, "", *tenant) - if err = ioutil.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml", []byte(fileContent), 0666); err != nil { + if err = os.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml", []byte(fileContent), 0666); err != nil { fmt.Println(err) os.Exit(1) } - if _, err := exec.Command("systemctl", "start", "tracing-proxy").Output(); err != nil { + if _, err := exec.Command("systemctl", "enable", "--now", "tracing-proxy").Output(); err != nil { fmt.Println(err) os.Exit(1) } diff --git a/build/rules_complete.yaml b/build/vm/rules_complete.yaml similarity index 100% rename from build/rules_complete.yaml rename to build/vm/rules_complete.yaml diff --git a/build/tracing-deb/script.sh b/build/vm/tracing-deb/script.sh similarity index 100% rename from build/tracing-deb/script.sh rename to build/vm/tracing-deb/script.sh diff --git a/build/tracing-deb/tracing/DEBIAN/conffiles b/build/vm/tracing-deb/tracing/DEBIAN/conffiles similarity index 100% rename from build/tracing-deb/tracing/DEBIAN/conffiles rename to build/vm/tracing-deb/tracing/DEBIAN/conffiles diff --git a/build/tracing-deb/tracing/DEBIAN/control b/build/vm/tracing-deb/tracing/DEBIAN/control similarity index 100% rename from build/tracing-deb/tracing/DEBIAN/control rename to build/vm/tracing-deb/tracing/DEBIAN/control diff --git a/build/tracing-deb/tracing/DEBIAN/postinst b/build/vm/tracing-deb/tracing/DEBIAN/postinst similarity index 100% rename from build/tracing-deb/tracing/DEBIAN/postinst rename to build/vm/tracing-deb/tracing/DEBIAN/postinst diff --git a/build/tracing-deb/tracing/DEBIAN/prerm b/build/vm/tracing-deb/tracing/DEBIAN/prerm similarity index 91% rename from build/tracing-deb/tracing/DEBIAN/prerm rename to build/vm/tracing-deb/tracing/DEBIAN/prerm index 13e56fab3b..8a65e05e4f 100755 --- a/build/tracing-deb/tracing/DEBIAN/prerm +++ b/build/vm/tracing-deb/tracing/DEBIAN/prerm @@ -1,5 +1,6 @@ echo "Uninstalling Tracing Proxy" systemctl stop tracing-proxy +systemctl disable tracing-proxy if [ -f /etc/systemd/system/tracing-proxy.service ]; then rm -rf /etc/systemd/system/tracing-proxy.service > /dev/null 2>&1 fi diff --git a/build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service b/build/vm/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service similarity index 100% rename from build/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service rename to build/vm/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service diff --git a/build/tracing-rpm/etc/systemd/system/tracing-proxy.service b/build/vm/tracing-rpm/etc/systemd/system/tracing-proxy.service similarity index 100% rename from build/tracing-rpm/etc/systemd/system/tracing-proxy.service rename to build/vm/tracing-rpm/etc/systemd/system/tracing-proxy.service diff --git a/build/tracing-rpm/script.sh b/build/vm/tracing-rpm/script.sh similarity index 100% rename from build/tracing-rpm/script.sh rename to build/vm/tracing-rpm/script.sh diff --git a/build/tracing-rpm/tracing-proxy.spec b/build/vm/tracing-rpm/tracing-proxy.spec similarity index 98% rename from build/tracing-rpm/tracing-proxy.spec rename to build/vm/tracing-rpm/tracing-proxy.spec index 2805088ce4..16d37d003f 100644 --- a/build/tracing-rpm/tracing-proxy.spec +++ b/build/vm/tracing-rpm/tracing-proxy.spec @@ -48,6 +48,7 @@ systemctl start tracing-proxy %preun -p /bin/bash echo "Uninstalling Tracing Proxy" systemctl stop tracing-proxy +systemctl disable tracing-proxy #if [ -f /etc/systemd/system/tracing-proxy.service ]; then # rm -rf /etc/systemd/system/tracing-proxy.service > /dev/null 2>&1 #fi diff --git a/go.mod b/go.mod index 03b9272ce5..0bbf48ed0c 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 - github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0 - github.com/opsramp/libtrace-go v0.0.0-20230417062015-cad5afe8ebcc + github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c + github.com/opsramp/libtrace-go v0.0.0-20230420114955-ed5a1acf9924 github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index a8ab5153f3..51f48293d7 100644 --- a/go.sum +++ b/go.sum @@ -579,10 +579,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0 h1:qj7a1B/GFWxFVWvpzTV2V0bbxGNFI8bGM+ElTxwJP20= -github.com/opsramp/husky v0.0.0-20230330140711-461fc680ffa0/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230417062015-cad5afe8ebcc h1:k25sLiGMrwyJdWvSZ8hOuzIheWebby3DF3Ny+jE71xg= -github.com/opsramp/libtrace-go v0.0.0-20230417062015-cad5afe8ebcc/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c h1:FhNFobufJrjU1/E/5LCiZk3IZRbrqk/5gnTz5iZTNQg= +github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= +github.com/opsramp/libtrace-go v0.0.0-20230420114955-ed5a1acf9924 h1:sTGWGxhN2rqubYG3JU9SQYsOccsv1E6RqrBs4cDtcu4= +github.com/opsramp/libtrace-go v0.0.0-20230420114955-ed5a1acf9924/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= diff --git a/internal/peer/file.go b/internal/peer/file.go index 1f070296cb..44269f2501 100644 --- a/internal/peer/file.go +++ b/internal/peer/file.go @@ -1,10 +1,14 @@ package peer import ( + "context" + "fmt" + "github.com/opsramp/libtrace-go/proto/proxypb" "github.com/opsramp/tracing-proxy/config" - "net" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "net/url" "sort" - "strings" "sync" "time" ) @@ -86,14 +90,25 @@ func getPeerMembers(originalPeerlist []string) []string { return workingPeers } -func isOpen(peer string) bool { - urlBreaker := strings.Split(peer, ":") - peerUrl := string(urlBreaker[1][2:]) + ":" + urlBreaker[2] - conn, err := net.Dial("tcp", peerUrl) +func isOpen(peerURL string) bool { + u, err := url.Parse(peerURL) + if err != nil { + return false + } + + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + } + conn, err := grpc.Dial(fmt.Sprintf("%s:%s", u.Hostname(), u.Port()), opts...) + if err != nil { + return false + } + defer conn.Close() + client := proxypb.NewTraceProxyServiceClient(conn) - if err == nil { - _ = conn.Close() - return true + resp, err := client.Status(context.TODO(), &proxypb.StatusRequest{}) + if err != nil { + return false } - return false + return resp.GetPeerActive() } diff --git a/route/otlp_trace.go b/route/otlp_trace.go index b09cc155f7..5494db68fc 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -4,7 +4,8 @@ import ( "context" "encoding/json" "fmt" - proxypb "github.com/opsramp/libtrace-go/proto/proxypb" + "github.com/opsramp/libtrace-go/proto/proxypb" + "github.com/opsramp/libtrace-go/transmission" "google.golang.org/grpc/metadata" "net/http" "strings" @@ -196,6 +197,12 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr return &proxypb.ExportTraceProxyServiceResponse{Message: "Received Successfully by peer", Status: "Success"}, nil } +func (r *Router) Status(context.Context, *proxypb.StatusRequest) (*proxypb.StatusResponse, error) { + return &proxypb.StatusResponse{ + PeerActive: transmission.DefaultAvailability.Status(), + }, nil +} + func extractKeyValue(v *proxypb.AnyValue) string { if x, ok := v.GetValue().(*proxypb.AnyValue_StringValue); ok { return x.StringValue From 8acb3396488e03ad1466b58f7aaf80eddef52cfa Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 20 Apr 2023 17:44:12 +0530 Subject: [PATCH 311/351] updating deb and rpm build files --- build/vm/tracing-deb/script.sh | 4 ++-- build/vm/tracing-deb/tracing/DEBIAN/control | 2 +- build/vm/tracing-rpm/script.sh | 4 ++-- build/vm/tracing-rpm/tracing-proxy.spec | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) mode change 100644 => 100755 build/vm/tracing-deb/script.sh mode change 100644 => 100755 build/vm/tracing-rpm/script.sh diff --git a/build/vm/tracing-deb/script.sh b/build/vm/tracing-deb/script.sh old mode 100644 new mode 100755 index 6785673e43..274949e115 --- a/build/vm/tracing-deb/script.sh +++ b/build/vm/tracing-deb/script.sh @@ -15,8 +15,8 @@ mkdir -p tracing/opt/opsramp/tracing-proxy/bin mkdir -p tracing/opt/opsramp/tracing-proxy/conf cp ../config_complete.yaml tracing/opt/opsramp/tracing-proxy/conf/config_complete.yaml cp ../rules_complete.yaml tracing/opt/opsramp/tracing-proxy/conf/rules_complete.yaml -go build -o ../../cmd/tracing-proxy/main ../../cmd/tracing-proxy/main.go -cp ../../cmd/tracing-proxy/main tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy +go build -o ../../../cmd/tracing-proxy/main ../../../cmd/tracing-proxy/main.go +cp ../../../cmd/tracing-proxy/main tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy go build ../configure.go cp configure tracing/opt/opsramp/tracing-proxy/bin/configure diff --git a/build/vm/tracing-deb/tracing/DEBIAN/control b/build/vm/tracing-deb/tracing/DEBIAN/control index 59984a4d9e..5c33427362 100644 --- a/build/vm/tracing-deb/tracing/DEBIAN/control +++ b/build/vm/tracing-deb/tracing/DEBIAN/control @@ -1,5 +1,5 @@ Package: tracing-proxy -Version: 1.0.0 +Version: 1.1.0 Architecture: amd64 Essential: no Priority: optional diff --git a/build/vm/tracing-rpm/script.sh b/build/vm/tracing-rpm/script.sh old mode 100644 new mode 100755 index 72b45bf3ab..af018109c3 --- a/build/vm/tracing-rpm/script.sh +++ b/build/vm/tracing-rpm/script.sh @@ -13,9 +13,9 @@ mkdir -p opt/opsramp/tracing-proxy/conf mkdir -p opt/opsramp/tracing-proxy/bin cp ../config_complete.yaml opt/opsramp/tracing-proxy/conf/config_complete.yaml cp ../rules_complete.yaml opt/opsramp/tracing-proxy/conf/rules_complete.yaml -go build -o ../../cmd/tracing-proxy/main ../../cmd/tracing-proxy/main.go +go build -o ../../../cmd/tracing-proxy/main ../../../cmd/tracing-proxy/main.go go build ../configure.go -cp ../../cmd/tracing-proxy/main opt/opsramp/tracing-proxy/bin/tracing-proxy +cp ../../../cmd/tracing-proxy/main opt/opsramp/tracing-proxy/bin/tracing-proxy cp configure opt/opsramp/tracing-proxy/bin/configure diff --git a/build/vm/tracing-rpm/tracing-proxy.spec b/build/vm/tracing-rpm/tracing-proxy.spec index 16d37d003f..e0f9a7788d 100644 --- a/build/vm/tracing-rpm/tracing-proxy.spec +++ b/build/vm/tracing-rpm/tracing-proxy.spec @@ -1,8 +1,8 @@ # SPEC file for creating tracing-proxy RPM %define name tracing-proxy -%define release 1.0 -%define version 2.0.1 +%define release +%define version 1.1.0 Summary: Tracing Proxy License: OpsRamp From a4d436cf3e1b04b6b9c947c3f051e05594c72688 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Tue, 25 Apr 2023 09:49:09 +0530 Subject: [PATCH 312/351] updating configure.go to default to api for metrics and traces enpoint when they are not set --- build/vm/config_complete.yaml | 8 +++--- build/vm/configure.go | 49 +++++++++++++++++++++++++---------- 2 files changed, 39 insertions(+), 18 deletions(-) diff --git a/build/vm/config_complete.yaml b/build/vm/config_complete.yaml index 6608353f5d..3938a20d6b 100644 --- a/build/vm/config_complete.yaml +++ b/build/vm/config_complete.yaml @@ -29,7 +29,7 @@ GRPCPeerListenAddr: 0.0.0.0:8084 CompressPeerCommunication: true # OpsrampAPI is the URL for the upstream Opsramp API. -OpsrampAPI: "" +OpsrampAPI: "" # Dataset you want to use for sampling Dataset: "ds" @@ -164,13 +164,13 @@ AuthConfiguration: SkipAuth: false # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made - Endpoint: "" + Endpoint: "" # Key - authentication key provided in OpsRamp Portal Key: "" # Secret - authentication Secret provided in OpsRamp Portal Secret: "" # TenantId - tenant/client id to which the traces are to be posted - TenantId: "" + TenantId: "" ############################ ## Implementation Choices ## @@ -330,7 +330,7 @@ MetricsConfig: ListenAddr: '0.0.0.0:2112' # OpsRampAPI is the URL for the upstream OpsRamp API. - OpsRampAPI: "" + OpsRampAPI: "" # ReportingInterval is the frequency specified in seconds at which # the metrics are collected and sent to OpsRamp diff --git a/build/vm/configure.go b/build/vm/configure.go index 311f84607a..ad6a0a3137 100644 --- a/build/vm/configure.go +++ b/build/vm/configure.go @@ -3,31 +3,52 @@ package main import ( "flag" "fmt" + "log" "os" "os/exec" "strings" ) func main() { - var configFile []byte - var fileContent string - var err error - - configFile, err = os.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml") + configFile, err := os.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml") + if err != nil { + log.Fatal(err) + } - api := flag.String("A", "", "API To Send Data") - key := flag.String("K", "", "Opsramp Key") - secret := flag.String("S", "", "Opsramp Secret") - tenant := flag.String("T", "", "Opsramp TenantID") - metricsAPI := flag.String("M", "", "API To Send Metrics Data") + api := flag.String("A", "", "API for Authorization") + key := flag.String("K", "", "OpsRamp Key") + secret := flag.String("S", "", "OpsRamp Secret") + tenant := flag.String("T", "", "OpsRamp TenantID") + tracesAPI := flag.String("B", "", "API to Sent Traces (Defaults to Authorization API specified using -A flag if not set)") + metricsAPI := flag.String("M", "", "API To Send Metrics (Defaults to Authorization API specified using -A flag if not set)") flag.Parse() - fileContent = string(configFile) - fileContent = strings.ReplaceAll(fileContent, "", *api) - fileContent = strings.ReplaceAll(fileContent, "", *metricsAPI) + if *api == "" { + log.Fatal("api cant be empty, please specify using -A flag") + } + if *key == "" { + log.Fatal("key cant be empty, please specify using -K flag") + } + if *secret == "" { + log.Fatal("secret cant be empty, please specify using -S flag") + } + if *tenant == "" { + log.Fatal("tenant cant be empty, please specify using -T flag") + } + if *tracesAPI == "" { + *tracesAPI = *api + } + if *metricsAPI == "" { + *metricsAPI = *api + } + + fileContent := string(configFile) + fileContent = strings.ReplaceAll(fileContent, "", *api) + fileContent = strings.ReplaceAll(fileContent, "", *tracesAPI) + fileContent = strings.ReplaceAll(fileContent, "", *metricsAPI) fileContent = strings.ReplaceAll(fileContent, "", *key) fileContent = strings.ReplaceAll(fileContent, "", *secret) - fileContent = strings.ReplaceAll(fileContent, "", *tenant) + fileContent = strings.ReplaceAll(fileContent, "", *tenant) if err = os.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml", []byte(fileContent), 0666); err != nil { fmt.Println(err) From f579bbaa8d6795a89851a5f9dceafbf89e914c16 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Tue, 25 Apr 2023 10:35:35 +0530 Subject: [PATCH 313/351] build directory refactor --- .gitignore | 8 +- .../etc/systemd/system/tracing-proxy.service | 0 .../tracing-proxy/conf}/config_complete.yaml | 0 .../tracing-proxy/conf}/rules_complete.yaml | 0 build/vm/tracing-deb/script.sh | 37 ++- .../etc/systemd/system/tracing-proxy.service | 13 - build/vm/tracing-rpm/script.sh | 42 +-- cmd/test_redimem/main.go | 289 ------------------ 8 files changed, 54 insertions(+), 335 deletions(-) rename build/vm/{tracing-deb/tracing => package_directories}/etc/systemd/system/tracing-proxy.service (100%) rename build/vm/{ => package_directories/opt/opsramp/tracing-proxy/conf}/config_complete.yaml (100%) rename build/vm/{ => package_directories/opt/opsramp/tracing-proxy/conf}/rules_complete.yaml (100%) delete mode 100644 build/vm/tracing-rpm/etc/systemd/system/tracing-proxy.service delete mode 100644 cmd/test_redimem/main.go diff --git a/.gitignore b/.gitignore index ae6895291e..e38a4ffbde 100644 --- a/.gitignore +++ b/.gitignore @@ -11,12 +11,10 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out -tracing-proxy -!/cmd/tracing-proxy -test_redimem -!/cmd/test_redimem - dockerize* # IDE configs .idea/ +.vscode/* +.history/ +*.vsix diff --git a/build/vm/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service b/build/vm/package_directories/etc/systemd/system/tracing-proxy.service similarity index 100% rename from build/vm/tracing-deb/tracing/etc/systemd/system/tracing-proxy.service rename to build/vm/package_directories/etc/systemd/system/tracing-proxy.service diff --git a/build/vm/config_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml similarity index 100% rename from build/vm/config_complete.yaml rename to build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml diff --git a/build/vm/rules_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/rules_complete.yaml similarity index 100% rename from build/vm/rules_complete.yaml rename to build/vm/package_directories/opt/opsramp/tracing-proxy/conf/rules_complete.yaml diff --git a/build/vm/tracing-deb/script.sh b/build/vm/tracing-deb/script.sh index 274949e115..36fcc125c6 100755 --- a/build/vm/tracing-deb/script.sh +++ b/build/vm/tracing-deb/script.sh @@ -7,22 +7,41 @@ if [ "$architecture" = "x86_64" ]; then architecture='amd64' fi - sed -i "/^Architecture/s/:.*$/: ${architecture}/g" tracing/DEBIAN/control # Updating the files mkdir -p tracing/opt/opsramp/tracing-proxy/bin mkdir -p tracing/opt/opsramp/tracing-proxy/conf -cp ../config_complete.yaml tracing/opt/opsramp/tracing-proxy/conf/config_complete.yaml -cp ../rules_complete.yaml tracing/opt/opsramp/tracing-proxy/conf/rules_complete.yaml -go build -o ../../../cmd/tracing-proxy/main ../../../cmd/tracing-proxy/main.go -cp ../../../cmd/tracing-proxy/main tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy -go build ../configure.go +mkdir -p tracing/etc/systemd/system + +cp -r ../package_directories/* tracing/ + +# Building a static binaries +CGO_ENABLED=0 \ + GOOS=linux \ + GOARCH=amd64 \ + go build -ldflags "-X main.BuildID=${Version}" \ + -o tracing-proxy \ + ../../../cmd/tracing-proxy + +CGO_ENABLED=0 \ + GOOS=linux \ + GOARCH=amd64 \ + go build -ldflags "-X main.BuildID=${Version}" \ + -o configure \ + ../configure.go + +cp tracing-proxy tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy cp configure tracing/opt/opsramp/tracing-proxy/bin/configure dpkg -b tracing - # Rename the package with version and architecture -packageName="tracing-proxy_"$architecture"-"$Version".deb" -mv tracing.deb $packageName +packageName="tracing-proxy_"${architecture}"-"${Version}".deb" +mkdir -p ./output +mv tracing.deb ./output/"${packageName}" + +# Cleanup +rm -rf ./tracing/opt +rm -rf ./tracing/etc +rm -rf configure tracing-proxy \ No newline at end of file diff --git a/build/vm/tracing-rpm/etc/systemd/system/tracing-proxy.service b/build/vm/tracing-rpm/etc/systemd/system/tracing-proxy.service deleted file mode 100644 index 9225670d0a..0000000000 --- a/build/vm/tracing-rpm/etc/systemd/system/tracing-proxy.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=tracing-proxy OpsRamp Trace-Aware Sampling Proxy -After=network.target - -[Service] -ExecStart=/opt/opsramp/tracing-proxy/bin/tracing-proxy -c /opt/opsramp/tracing-proxy/conf/config_complete.yaml -r /opt/opsramp/tracing-proxy/conf/rules_complete.yaml -KillMode=process -Restart=on-failure -LimitNOFILE=infinity - -[Install] -Alias=tracing-proxy tracing-proxy.service - diff --git a/build/vm/tracing-rpm/script.sh b/build/vm/tracing-rpm/script.sh index af018109c3..bdb9b63337 100755 --- a/build/vm/tracing-rpm/script.sh +++ b/build/vm/tracing-rpm/script.sh @@ -1,36 +1,40 @@ yum -y install rpmdevtools rpmdev-setuptree -# $2 is a release of the package -Release=$2 +Release=$(uname -m) sed -i "/^\%define release/s/^.*$/\%define release ${Release}/g" tracing-proxy.spec # $1 is a version of the package Version=$1 sed -i "/^\%define version/s/^.*$/\%define version ${Version}/g" tracing-proxy.spec -# Updating the files -mkdir -p opt/opsramp/tracing-proxy/conf -mkdir -p opt/opsramp/tracing-proxy/bin -cp ../config_complete.yaml opt/opsramp/tracing-proxy/conf/config_complete.yaml -cp ../rules_complete.yaml opt/opsramp/tracing-proxy/conf/rules_complete.yaml -go build -o ../../../cmd/tracing-proxy/main ../../../cmd/tracing-proxy/main.go -go build ../configure.go -cp ../../../cmd/tracing-proxy/main opt/opsramp/tracing-proxy/bin/tracing-proxy -cp configure opt/opsramp/tracing-proxy/bin/configure - - +# Building a static binaries +CGO_ENABLED=0 \ + GOOS=linux \ + GOARCH=amd64 \ + go build -ldflags "-X main.BuildID=${Version}" \ + -o tracing-proxy \ + ../../../cmd/tracing-proxy + +CGO_ENABLED=0 \ + GOOS=linux \ + GOARCH=amd64 \ + go build -ldflags "-X main.BuildID=${Version}" \ + -o configure \ + ../configure.go mkdir tracing-proxy-$1 -cp -r opt tracing-proxy-$1 -cp -r etc tracing-proxy-$1 -tar -czvf tracing-proxy-$1.tar.gz tracing-proxy-$1 +cp -r ../package_directories/* tracing-proxy-$1 +mv configure tracing-proxy-$1/opt/opsramp/tracing-proxy/bin/configure +mv tracing-proxy tracing-proxy-$1/opt/opsramp/tracing-proxy/bin/tracing-proxy +tar -czvf tracing-proxy-$1.tar.gz tracing-proxy-$1 -cp tracing-proxy-$1.tar.gz /root/rpmbuild/SOURCES/ +mv tracing-proxy-$1.tar.gz /root/rpmbuild/SOURCES/ cp tracing-proxy.spec /root/rpmbuild/SPECS/tracing-proxy.spec - rpmbuild -ba --clean /root/rpmbuild/SPECS/tracing-proxy.spec - echo "***** rpm package can be found in /root/rpmbuild/RPMS/x86_64/ ****" + +# CleanUp +rm -rf tracing-proxy-$1 diff --git a/cmd/test_redimem/main.go b/cmd/test_redimem/main.go deleted file mode 100644 index d889125036..0000000000 --- a/cmd/test_redimem/main.go +++ /dev/null @@ -1,289 +0,0 @@ -package main - -// this test is an exercise against an actual redis instance to see the redimem -// package work as expected. - -import ( - "context" - "math/rand" - "sync" - "time" - - "github.com/gomodule/redigo/redis" - "github.com/sirupsen/logrus" - - "github.com/opsramp/tracing-proxy/internal/redimem" -) - -func main() { - rand.Seed(time.Now().UnixNano()) - - // tick := time.NewTicker(time.Second) - // for t := range tick.C { - // logrus.Info("Current time: ", t) - // } - - logrus.SetLevel(logrus.WarnLevel) - - pool := &redis.Pool{ - MaxIdle: 3, - MaxActive: 30, - IdleTimeout: 5 * time.Minute, - Wait: true, - Dial: func() (redis.Conn, error) { - return redis.Dial( - "tcp", "localhost:6379", - redis.DialReadTimeout(1*time.Second), - redis.DialConnectTimeout(1*time.Second), - redis.DialDatabase(0), // TODO enable multiple databases for multiple samproxies - ) - }, - } - - rm := &redimem.RedisMembership{ - Prefix: "test_redimem", - Pool: pool, - } - - wg := sync.WaitGroup{} - for i := 0; i < 500; i++ { - wg.Add(1) - go func() { - singleTestRandomLength(10, 5, rm) - wg.Done() - }() - time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) - } - wg.Wait() -} - -// singleTestRandomLength will register then re-register an entry some number of -// times up to limit. It will check the entire time to verify that the entry is -// still there then that it goes away when it's supposed to at the end. The -// intent is to call this function in multiple goroutines to watch a whole slew -// of entries start and stop. -func singleTestRandomLength(limit, registerDurLimitSec int, rm *redimem.RedisMembership) { - // numTimes will be the number of times to re-register this entry before - // letting it expire - numTimes := rand.Intn(limit) + 1 - // registerDur will be the duration in milliseconds to register this entry - registerDur := rand.Intn(registerDurLimitSec*1000) + 1000 - // reregisterFreq is how frequently we should re-register the entry - reregisterFreq := registerDur / 2 - // done will let this function know when the entry is done being reregistered - done := make(chan struct{}) - // name is a random string used to register this entry - name := GenID(12) - - ctx := context.Background() - - // register the entry once to make sure it's there before the first check runs - logrus.WithFields(logrus.Fields{ - "registerDur": registerDur, - "name": name, - "numTimes": numTimes, - }).Info("registering entry") - rm.Register(ctx, name, time.Duration(registerDur)*time.Millisecond) - - // register the entry and then re-register it numTimes - go func() { - ticker := time.NewTicker(time.Duration(reregisterFreq) * time.Millisecond) - var i int - for range ticker.C { - i = i + 1 - logrus.WithField("name", name).Debug("re-registering entry") - rm.Register(ctx, name, time.Duration(registerDur)*time.Millisecond) - if i >= numTimes { - break - } - } - done <- struct{}{} - }() - - // watch for the entry to appear, then check that it's still there until it's - // time for it to go away, then verify it went away. - func() { - var i int - SHOULDEXIST: - for { - i = i + 1 - // exit out of this for loop when we get a message from the done channel - select { - case <-done: - break SHOULDEXIST - default: - } - // check that name is registered - var found bool - list, err := rm.GetMembers(ctx) - if err != nil { - logrus.WithError(err).WithFields(logrus.Fields{ - "name": name, - "numEntries": len(list), - "iteration": i, - }).Warn("caught error from get members") - } - for _, entry := range list { - if entry == name { - found = true - // logrus.WithField("name", name).Info("shouldexist: found entry") - break - } - } - if !found { - logrus.WithFields(logrus.Fields{ - "name": name, - "numEntries": len(list), - "iteration": i, - }).Warn("shouldexist: Failed to find entry") - } - // pause between each check - time.Sleep(100 * time.Millisecond) - } - // ok, we hit the last registration. We should expect to find the name for - // another registerDur and then it should go away - timer := time.NewTimer(time.Duration(registerDur) * time.Millisecond) - startLastIter := time.Now() - - i = 0 - LASTITER: - for { - i = i + 1 - select { - case <-timer.C: - // ok, now we should expect it to go away - break LASTITER - default: - } - // check that we find the entry - var found bool - list, err := rm.GetMembers(ctx) - if err != nil { - logrus.WithError(err).WithFields(logrus.Fields{ - "name": name, - "numEntries": len(list), - "iteration": i, - }).Warn("in lastiter caught error from get members") - } - for _, entry := range list { - if entry == name { - found = true - // logrus.WithField("name", name).Info("lastiter: found entry") - break - } - } - if !found { - dur := time.Since(startLastIter) - logrus.WithFields(logrus.Fields{ - "name": name, - "numEntries": len(list), - "timeInLastIterMs": float64(dur / time.Millisecond), - "expectedDurMs": registerDur, - "deltaExpire": float64(registerDur) - float64(dur/time.Millisecond), - }).Info("lastiter: Entry vanished") - if float64(registerDur)-float64(dur/time.Millisecond) > 1600 { - logrus.WithFields(logrus.Fields{ - "iteration": i, - "name": name, - "numEntries": len(list), - "timeInLastIterMs": float64(dur / time.Millisecond), - "expectedDurMs": registerDur, - "deltaExpire": float64(registerDur) - float64(dur/time.Millisecond), - }).Warn("delta exceeded 1.6 seconds - out of bounds of expected expiration") - } - break - } - time.Sleep(50 * time.Millisecond) - } - - // ok, we're beyond the duration of the last registration interval; now or - // very soon we should see the entry disappear. - i = 0 - for { - // check that we find the entry - var found bool - list, err := rm.GetMembers(ctx) - if err != nil { - logrus.WithError(err).WithFields(logrus.Fields{ - "name": name, - "numEntries": len(list), - "iteration": i, - }).Warn("in endgame caught error from get members") - } - for _, entry := range list { - if entry == name { - found = true - break - } - } - if !found { - // we're done, the register is gone - logrus.WithField("count", i).WithField("name", name).Infof("entry now gone") - break - } - if i > 100 { - logrus.WithField("name", name).Warn("entry still exists after 100 checks") - } - time.Sleep(10 * time.Millisecond) - } - logrus.WithField("name", name).Infof("all done checking entry") - }() - -} - -// adds two entries with various sleeps and verifies they're there at the -// expected times -//func linearTest(rm *redimem.RedisMembership) { -// ctx := context.Background() -// logrus.Infoln("about to register one for 3sec") -// rm.Register(ctx, "one", 3*time.Second) -// -// logrus.Infoln("about to sleep for 2sec") -// time.Sleep(2 * time.Second) -// -// logrus.Infoln("checking for one") -// list, _ := rm.GetMembers(ctx) -// spew.Dump(list) -// -// logrus.Infoln("about to register two for 3sec") -// rm.Register(ctx, "two", 3*time.Second) -// -// logrus.Infoln("checking for one and two") -// list, _ = rm.GetMembers(ctx) -// spew.Dump(list) -// -// logrus.Infoln("about to sleep for 1.5sec") -// time.Sleep(1500 * time.Millisecond) -// -// logrus.Infoln("checking list; one should be missing, two should be there") -// list, _ = rm.GetMembers(ctx) -// spew.Dump(list) -// -// logrus.Infoln("about to re-register two for 3sec") -// rm.Register(ctx, "two", 3*time.Second) -// -// logrus.Infoln("about to sleep for 2sec") -// time.Sleep(2 * time.Second) -// -// logrus.Infoln("checking list; one should be missing, two should be there") -// list, _ = rm.GetMembers(ctx) -// spew.Dump(list) -// -// logrus.Infoln("about to sleep for 1.5sec") -// time.Sleep(1500 * time.Millisecond) -// -// logrus.Infoln("checking list; both should be missing") -// list, _ = rm.GetMembers(ctx) -// spew.Dump(list) -//} - -const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - -// GenID returns a random string of length numChars -func GenID(numChars int) string { - id := make([]byte, numChars) - for i := 0; i < numChars; i++ { - id[i] = charset[rand.Intn(len(charset))] - } - return string(id) -} From 5523ef1bb06758068c85c50faa9d589f2470b4d7 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Tue, 25 Apr 2023 11:01:25 +0530 Subject: [PATCH 314/351] bugfix in rpm package building --- build/vm/tracing-rpm/script.sh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/build/vm/tracing-rpm/script.sh b/build/vm/tracing-rpm/script.sh index bdb9b63337..90136ed275 100755 --- a/build/vm/tracing-rpm/script.sh +++ b/build/vm/tracing-rpm/script.sh @@ -22,14 +22,15 @@ CGO_ENABLED=0 \ -o configure \ ../configure.go -mkdir tracing-proxy-$1 -cp -r ../package_directories/* tracing-proxy-$1 -mv configure tracing-proxy-$1/opt/opsramp/tracing-proxy/bin/configure -mv tracing-proxy tracing-proxy-$1/opt/opsramp/tracing-proxy/bin/tracing-proxy +package_name="tracing-proxy-${1}" +mkdir -p ${package_name}/opt/opsramp/tracing-proxy/bin/ +cp -r ../package_directories/* ${package_name} +mv configure ${package_name}/opt/opsramp/tracing-proxy/bin/configure +mv tracing-proxy ${package_name}/opt/opsramp/tracing-proxy/bin/tracing-proxy -tar -czvf tracing-proxy-$1.tar.gz tracing-proxy-$1 +tar -czvf ${package_name}.tar.gz ${package_name} -mv tracing-proxy-$1.tar.gz /root/rpmbuild/SOURCES/ +mv ${package_name}.tar.gz /root/rpmbuild/SOURCES/ cp tracing-proxy.spec /root/rpmbuild/SPECS/tracing-proxy.spec rpmbuild -ba --clean /root/rpmbuild/SPECS/tracing-proxy.spec @@ -37,4 +38,5 @@ rpmbuild -ba --clean /root/rpmbuild/SPECS/tracing-proxy.spec echo "***** rpm package can be found in /root/rpmbuild/RPMS/x86_64/ ****" # CleanUp -rm -rf tracing-proxy-$1 +rm -rf ${package_name} +rm -rf configure tracing-proxy From c8e36d4b8d6a40a903ee7a86014e2d31e049ef88 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Tue, 25 Apr 2023 14:41:12 +0530 Subject: [PATCH 315/351] fix debian package issues --- build/vm/configure.go | 8 +++--- .../etc/systemd/system/tracing-proxy.service | 3 --- build/vm/tracing-deb/script.sh | 27 ++++++++++--------- build/vm/tracing-deb/tracing/DEBIAN/postinst | 7 +++++ build/vm/tracing-deb/tracing/DEBIAN/prerm | 2 +- 5 files changed, 26 insertions(+), 21 deletions(-) diff --git a/build/vm/configure.go b/build/vm/configure.go index ad6a0a3137..2c9dd7167f 100644 --- a/build/vm/configure.go +++ b/build/vm/configure.go @@ -50,14 +50,12 @@ func main() { fileContent = strings.ReplaceAll(fileContent, "", *secret) fileContent = strings.ReplaceAll(fileContent, "", *tenant) - if err = os.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml", []byte(fileContent), 0666); err != nil { - fmt.Println(err) - os.Exit(1) + if err = os.WriteFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml", []byte(fileContent), 600); err != nil { + log.Fatal(err) } if _, err := exec.Command("systemctl", "enable", "--now", "tracing-proxy").Output(); err != nil { - fmt.Println(err) - os.Exit(1) + log.Fatal(err) } fmt.Println("Tracing-Proxy Started Successfully") diff --git a/build/vm/package_directories/etc/systemd/system/tracing-proxy.service b/build/vm/package_directories/etc/systemd/system/tracing-proxy.service index 9225670d0a..df0f8a51c9 100644 --- a/build/vm/package_directories/etc/systemd/system/tracing-proxy.service +++ b/build/vm/package_directories/etc/systemd/system/tracing-proxy.service @@ -8,6 +8,3 @@ KillMode=process Restart=on-failure LimitNOFILE=infinity -[Install] -Alias=tracing-proxy tracing-proxy.service - diff --git a/build/vm/tracing-deb/script.sh b/build/vm/tracing-deb/script.sh index 36fcc125c6..1b297def22 100755 --- a/build/vm/tracing-deb/script.sh +++ b/build/vm/tracing-deb/script.sh @@ -4,11 +4,14 @@ sed -i "/^Version/s/:.*$/: ${Version}/g" tracing/DEBIAN/control architecture=$(uname -m) if [ "$architecture" = "x86_64" ]; then - architecture='amd64' + architecture='amd64' fi sed -i "/^Architecture/s/:.*$/: ${architecture}/g" tracing/DEBIAN/control +# remove old data +rm -rf ./output + # Updating the files mkdir -p tracing/opt/opsramp/tracing-proxy/bin mkdir -p tracing/opt/opsramp/tracing-proxy/conf @@ -18,18 +21,18 @@ cp -r ../package_directories/* tracing/ # Building a static binaries CGO_ENABLED=0 \ - GOOS=linux \ - GOARCH=amd64 \ - go build -ldflags "-X main.BuildID=${Version}" \ - -o tracing-proxy \ - ../../../cmd/tracing-proxy + GOOS=linux \ + GOARCH=amd64 \ + go build -ldflags "-X main.BuildID=${Version}" \ + -o tracing-proxy \ + ../../../cmd/tracing-proxy CGO_ENABLED=0 \ - GOOS=linux \ - GOARCH=amd64 \ - go build -ldflags "-X main.BuildID=${Version}" \ - -o configure \ - ../configure.go + GOOS=linux \ + GOARCH=amd64 \ + go build -ldflags "-X main.BuildID=${Version}" \ + -o configure \ + ../configure.go cp tracing-proxy tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy cp configure tracing/opt/opsramp/tracing-proxy/bin/configure @@ -44,4 +47,4 @@ mv tracing.deb ./output/"${packageName}" # Cleanup rm -rf ./tracing/opt rm -rf ./tracing/etc -rm -rf configure tracing-proxy \ No newline at end of file +rm -rf configure tracing-proxy diff --git a/build/vm/tracing-deb/tracing/DEBIAN/postinst b/build/vm/tracing-deb/tracing/DEBIAN/postinst index 59770d8662..8a3134ca69 100755 --- a/build/vm/tracing-deb/tracing/DEBIAN/postinst +++ b/build/vm/tracing-deb/tracing/DEBIAN/postinst @@ -1,2 +1,9 @@ mkdir -p /var/log/opsramp touch /var/log/opsramp/tracing-proxy.log +chmod 644 /etc/systemd/system/tracing-proxy.service +chmod 600 /opt/opsramp/tracing-proxy/conf +chmod 600 /opt/opsramp/tracing-proxy/conf/config_complete.yaml +chmod 600 /opt/opsramp/tracing-proxy/conf/rules_complete.yaml +chmod 744 /opt/opsramp/tracing-proxy/bin +chmod 744 /opt/opsramp/tracing-proxy/bin/configure +chmod 744 /opt/opsramp/tracing-proxy/bin/tracing-proxy \ No newline at end of file diff --git a/build/vm/tracing-deb/tracing/DEBIAN/prerm b/build/vm/tracing-deb/tracing/DEBIAN/prerm index 8a65e05e4f..7bca94d9ef 100755 --- a/build/vm/tracing-deb/tracing/DEBIAN/prerm +++ b/build/vm/tracing-deb/tracing/DEBIAN/prerm @@ -6,5 +6,5 @@ if [ -f /etc/systemd/system/tracing-proxy.service ]; then fi rm -rf /opt/opsramp/tracing-proxy systemctl daemon-reload -systemctl reset-failed tracing-proxy.service +systemctl reset-failed tracing-proxy.service > /dev/null 2>&1 echo "Uninstalled Tracing Proxy Successfully" From 7e7852e889348bab4a16ac42947578afa4d52ebe Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Tue, 25 Apr 2023 15:12:58 +0530 Subject: [PATCH 316/351] fix for leftover service files in case of rpm package --- build/vm/tracing-rpm/tracing-proxy.spec | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/build/vm/tracing-rpm/tracing-proxy.spec b/build/vm/tracing-rpm/tracing-proxy.spec index e0f9a7788d..fb0042b59f 100644 --- a/build/vm/tracing-rpm/tracing-proxy.spec +++ b/build/vm/tracing-rpm/tracing-proxy.spec @@ -24,10 +24,10 @@ Tracing Proxy install -p -d -m 0755 %{buildroot}/opt/opsramp/tracing-proxy/bin install -p -d -m 0755 %{buildroot}/opt/opsramp/tracing-proxy/conf install -p -d -m 0755 %{buildroot}/etc/systemd/system -install -m 0775 opt/opsramp/tracing-proxy/bin/tracing-proxy %{buildroot}/opt/opsramp/tracing-proxy/bin/ -install -m 0775 opt/opsramp/tracing-proxy/bin/configure %{buildroot}/opt/opsramp/tracing-proxy/bin -install -m 0644 opt/opsramp/tracing-proxy/conf/config_complete.yaml %{buildroot}/opt/opsramp/tracing-proxy/conf/ -install -m 0644 opt/opsramp/tracing-proxy/conf/rules_complete.yaml %{buildroot}/opt/opsramp/tracing-proxy/conf/ +install -m 0744 opt/opsramp/tracing-proxy/bin/tracing-proxy %{buildroot}/opt/opsramp/tracing-proxy/bin/ +install -m 0744 opt/opsramp/tracing-proxy/bin/configure %{buildroot}/opt/opsramp/tracing-proxy/bin +install -m 0600 opt/opsramp/tracing-proxy/conf/config_complete.yaml %{buildroot}/opt/opsramp/tracing-proxy/conf/ +install -m 0600 opt/opsramp/tracing-proxy/conf/rules_complete.yaml %{buildroot}/opt/opsramp/tracing-proxy/conf/ install -m 0644 etc/systemd/system/tracing-proxy.service %{buildroot}/etc/systemd/system %clean @@ -49,13 +49,12 @@ systemctl start tracing-proxy echo "Uninstalling Tracing Proxy" systemctl stop tracing-proxy systemctl disable tracing-proxy -#if [ -f /etc/systemd/system/tracing-proxy.service ]; then -# rm -rf /etc/systemd/system/tracing-proxy.service > /dev/null 2>&1 -#fi -#rm -rf /opt/opsramp/tracing-proxy -#systemctl daemon-reload -#systemctl reset-failed tracing-proxy.service %postun -p /bin/bash -rm -d /opt/opsramp/tracing-proxy +%__rm -rf /opt/opsramp/tracing-proxy +if [ -f /etc/systemd/system/tracing-proxy.service ]; then + %__rm -rf /etc/systemd/system/tracing-proxy.service > /dev/null 2>&1 +fi +systemctl daemon-reload +systemctl reset-failed tracing-proxy.service > /dev/null 2>&1 echo "Uninstalled Tracing Proxy Successfully" From fb368cf8bc3d9dcfefc002df546fb899d5787153 Mon Sep 17 00:00:00 2001 From: saikalyan-bhagavathula <103252261+saikalyan-bhagavathula@users.noreply.github.com> Date: Tue, 25 Apr 2023 15:34:31 +0530 Subject: [PATCH 317/351] trace proxy version metric (#6) --- app/app.go | 5 ++++- cmd/tracing-proxy/main.go | 14 +++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/app/app.go b/app/app.go index 231b5a260d..07e4342917 100644 --- a/app/app.go +++ b/app/app.go @@ -36,7 +36,10 @@ func (a *App) Start() error { // and external sources a.IncomingRouter.LnS("incoming") a.PeerRouter.LnS("peer") - + a.Metrics.RegisterWithDescriptionLabels("collector_info", "gauge", "Version Of Tracing-Proxy Running", []string{"version"}) + a.Metrics.GaugeWithLabels("collector_info", map[string]string{ + "version": a.Version, + }, 1) return nil } diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 78960707bc..293f1a6c34 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -29,7 +29,7 @@ import ( // set by travis. var BuildID string -var version string +var CollectorVersion string type Options struct { ConfigFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/tracing-proxy/config.toml"` @@ -48,13 +48,13 @@ func main() { } if BuildID == "" { - version = "dev" + CollectorVersion = "dev" } else { - version = BuildID + CollectorVersion = BuildID } if opts.Version { - fmt.Println("Version: " + version) + fmt.Println("Version: " + CollectorVersion) os.Exit(0) } @@ -71,7 +71,7 @@ func main() { } a := app.App{ - Version: version, + Version: CollectorVersion, } c, err := config.NewConfig(opts.ConfigFile, opts.RulesFile, func(err error) { @@ -135,7 +135,7 @@ func main() { } retryConfig := c.GetRetryConfig() - userAgentAddition := "tracing-proxy/" + version + userAgentAddition := "tracing-proxy/" + CollectorVersion upstreamClient, err := libtrace.NewClient(libtrace.ClientConfig{ Transmission: &transmission.TraceProxy{ MaxBatchSize: c.GetMaxBatchSize(), @@ -223,7 +223,7 @@ func main() { &inject.Object{Value: metricsConfig, Name: "metrics"}, &inject.Object{Value: upstreamMetricsConfig, Name: "upstreamMetrics"}, &inject.Object{Value: peerMetricsConfig, Name: "peerMetrics"}, - &inject.Object{Value: version, Name: "version"}, + &inject.Object{Value: CollectorVersion, Name: "version"}, &inject.Object{Value: samplerFactory}, &inject.Object{Value: &a}, ) From 612e8468b4c74fdc3693519c791007714b32729c Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Wed, 26 Apr 2023 22:00:10 +0530 Subject: [PATCH 318/351] update libtrace-go and bugfix for peer transmission --- cmd/tracing-proxy/main.go | 15 ++++++++------- go.mod | 2 +- go.sum | 4 ++-- route/otlp_trace.go | 39 +++++---------------------------------- 4 files changed, 16 insertions(+), 44 deletions(-) diff --git a/cmd/tracing-proxy/main.go b/cmd/tracing-proxy/main.go index 293f1a6c34..526bbe703a 100644 --- a/cmd/tracing-proxy/main.go +++ b/cmd/tracing-proxy/main.go @@ -3,13 +3,6 @@ package main import ( "context" "fmt" - "net" - "net/http" - "os" - "os/signal" - "syscall" - "time" - "github.com/facebookgo/inject" "github.com/facebookgo/startstop" flag "github.com/jessevdk/go-flags" @@ -25,6 +18,12 @@ import ( "github.com/opsramp/tracing-proxy/service/debug" "github.com/opsramp/tracing-proxy/sharder" "github.com/opsramp/tracing-proxy/transmit" + "net" + "net/http" + "os" + "os/signal" + "syscall" + "time" ) // set by travis. @@ -147,6 +146,7 @@ func main() { BlockOnSend: true, EnableMsgpackEncoding: false, Metrics: upstreamMetricsConfig, + IsPeer: false, UseTls: c.GetGlobalUseTLS(), UseTlsInsecure: c.GetGlobalUseTLSInsecureSkip(), AuthTokenEndpoint: authConfig.Endpoint, @@ -180,6 +180,7 @@ func main() { DisableCompression: !c.GetCompressPeerCommunication(), EnableMsgpackEncoding: false, Metrics: peerMetricsConfig, + IsPeer: true, AuthTokenEndpoint: authConfig.Endpoint, AuthTokenKey: authConfig.Key, AuthTokenSecret: authConfig.Secret, diff --git a/go.mod b/go.mod index 0bbf48ed0c..42ad246051 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c - github.com/opsramp/libtrace-go v0.0.0-20230420114955-ed5a1acf9924 + github.com/opsramp/libtrace-go v0.0.0-20230426162753-20ac6c8a4a9d github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 51f48293d7..6c067cbbfb 100644 --- a/go.sum +++ b/go.sum @@ -581,8 +581,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c h1:FhNFobufJrjU1/E/5LCiZk3IZRbrqk/5gnTz5iZTNQg= github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230420114955-ed5a1acf9924 h1:sTGWGxhN2rqubYG3JU9SQYsOccsv1E6RqrBs4cDtcu4= -github.com/opsramp/libtrace-go v0.0.0-20230420114955-ed5a1acf9924/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/libtrace-go v0.0.0-20230426162753-20ac6c8a4a9d h1:68PAFtB16GKP1e73n5k30CDrmDFoCkqLbalQIo8P5kw= +github.com/opsramp/libtrace-go v0.0.0-20230426162753-20ac6c8a4a9d/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 5494db68fc..989f1d46b4 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -6,9 +6,7 @@ import ( "fmt" "github.com/opsramp/libtrace-go/proto/proxypb" "github.com/opsramp/libtrace-go/transmission" - "google.golang.org/grpc/metadata" "net/http" - "strings" "time" huskyotlp "github.com/opsramp/husky/otlp" @@ -103,38 +101,13 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr r.Logger.Debug().Logf("Received Trace data from peer") r.Metrics.Increment(r.incomingOrPeer + "_router_batch") - var token, tenantId, datasetName string apiHost, err := r.Config.GetOpsrampAPI() if err != nil { r.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get apihost", Status: "Failed"}, nil } - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get request metadata", Status: "Failed"}, nil - } else { - authorization := md.Get("Authorization") - if len(authorization) == 0 { - return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get Authorization", Status: "Failed"}, nil - } else { - token = authorization[0] - recvdTenantId := md.Get("tenantId") - if len(recvdTenantId) == 0 { - tenantId = strings.TrimSpace(in.TenantId) - if tenantId == "" { - return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get TenantId", Status: "Failed"}, nil - } - } else { - tenantId = recvdTenantId[0] - } - } - - if dataSets := md.Get("dataset"); len(dataSets) > 0 { - datasetName = dataSets[0] - } else { - return &proxypb.ExportTraceProxyServiceResponse{Message: "Failed to get dataset", Status: "Failed"}, nil - } - } + dataset, _ := r.Config.GetDataset() + tenantId, _ := r.Config.GetTenantId() var requestID types.RequestIDContextKey @@ -181,12 +154,10 @@ func (r *Router) ExportTraceProxy(ctx context.Context, in *proxypb.ExportTracePr data["endTime"] = item.Data.EndTime event := &types.Event{ - Context: ctx, - APIHost: apiHost, - APIToken: token, - //APIKey: "token", //Hardcoded for time-being. This need to be cleaned + Context: ctx, + APIHost: apiHost, APITenantId: tenantId, - Dataset: datasetName, + Dataset: dataset, Timestamp: timestamp, Data: data, } From 9aa03db51e54ce7ff1665647a9c480b0c0b2bfed Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Wed, 26 Apr 2023 22:13:40 +0530 Subject: [PATCH 319/351] bumping version to 1.1.0 in dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 5f518d2cb3..ccd4b6f951 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM golang:alpine as builder RUN apk update && apk add --no-cache git bash ca-certificates && update-ca-certificates -ARG BUILD_ID="1.0.0" +ARG BUILD_ID="1.1.0" WORKDIR /app From e19347f517245ed278907cb81e5384ecd56d4d40 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Thu, 27 Apr 2023 11:15:46 +0530 Subject: [PATCH 320/351] fix service file debian and rpm packages --- build/vm/configure.go | 22 +++++++++++++++---- .../etc/systemd/system/tracing-proxy.service | 2 ++ 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/build/vm/configure.go b/build/vm/configure.go index 2c9dd7167f..630e1b7a5b 100644 --- a/build/vm/configure.go +++ b/build/vm/configure.go @@ -2,13 +2,15 @@ package main import ( "flag" - "fmt" "log" "os" "os/exec" "strings" + "time" ) +const ServiceName = "tracing-proxy.service" + func main() { configFile, err := os.ReadFile("/opt/opsramp/tracing-proxy/conf/config_complete.yaml") if err != nil { @@ -54,9 +56,21 @@ func main() { log.Fatal(err) } - if _, err := exec.Command("systemctl", "enable", "--now", "tracing-proxy").Output(); err != nil { - log.Fatal(err) + // Enable and start with fallback + if err := exec.Command("systemctl", "enable", "--now", ServiceName).Run(); err != nil { + _ = exec.Command("systemctl", "start", ServiceName).Run() + _ = exec.Command("systemctl", "enable", ServiceName).Run() } - fmt.Println("Tracing-Proxy Started Successfully") + time.Sleep(5 * time.Second) + + // Check if the services are enabled and started properly and attempt again + if output, err := exec.Command("systemctl", "is-enabled", ServiceName).Output(); err != nil || string(output) != "enabled" { + _ = exec.Command("systemctl", "enable", ServiceName).Run() + } + if output, err := exec.Command("systemctl", "is-active", ServiceName).Output(); err != nil || string(output) != "active" { + _ = exec.Command("systemctl", "start", ServiceName).Run() + } else { + log.Println("Tracing-Proxy Started Successfully") + } } diff --git a/build/vm/package_directories/etc/systemd/system/tracing-proxy.service b/build/vm/package_directories/etc/systemd/system/tracing-proxy.service index df0f8a51c9..09cffbca6a 100644 --- a/build/vm/package_directories/etc/systemd/system/tracing-proxy.service +++ b/build/vm/package_directories/etc/systemd/system/tracing-proxy.service @@ -8,3 +8,5 @@ KillMode=process Restart=on-failure LimitNOFILE=infinity +[Install] +WantedBy=multi-user.target \ No newline at end of file From 5f2942ccdb30ed946ed90a0781489cbd114d31d5 Mon Sep 17 00:00:00 2001 From: saikalyan-bhagavathula <103252261+saikalyan-bhagavathula@users.noreply.github.com> Date: Fri, 28 Apr 2023 14:52:49 +0530 Subject: [PATCH 321/351] Kalyan develop (#7) --- .../opt/opsramp/tracing-proxy/conf/rules_complete.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/rules_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/rules_complete.yaml index aedb5ba976..b6065605fc 100644 --- a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/rules_complete.yaml +++ b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/rules_complete.yaml @@ -136,7 +136,7 @@ SampleRate: 1 # # sampler as part of the key. The number of spans is exact, so if there are # # normally small variations in trace length you may want to leave this off. If # # traces are consistent lengths and changes in trace length is a useful -# # indicator of traces you'd like to see in Honeycomb, set this to true. +# # indicator of traces you'd like to see in Opsramp, set this to true. # UseTraceLength: true # # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field From 3b667857202b7fad3094d4574b2e69de4e218dbc Mon Sep 17 00:00:00 2001 From: saikalyan-bhagavathula <103252261+saikalyan-bhagavathula@users.noreply.github.com> Date: Thu, 4 May 2023 11:52:44 +0530 Subject: [PATCH 322/351] adding http config to k8s-deployment.yaml (#8) * trace proxy version metric * replacing honeycomb references * adding http config to k8s-deployment.yaml --- build/kubernetes/yaml/k8s-deployment.yaml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/build/kubernetes/yaml/k8s-deployment.yaml b/build/kubernetes/yaml/k8s-deployment.yaml index 960013ff69..7a7a3ebff6 100644 --- a/build/kubernetes/yaml/k8s-deployment.yaml +++ b/build/kubernetes/yaml/k8s-deployment.yaml @@ -25,7 +25,12 @@ spec: image: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy imagePullPolicy: Always ports: - - containerPort: 9090 + - name: http + containerPort: 8082 + protocol: TCP + - name: grpc + containerPort: 9090 + protocol: TCP resources: requests: memory: "2048Mi" @@ -63,3 +68,8 @@ spec: - protocol: TCP port: 9090 targetPort: 9090 + name: grpc + - protocol: TCP + port: 8082 + targetPort: 8082 + name: http \ No newline at end of file From 5b4e01dcae1ed504f62ee72e39aef5b1bfa2d062 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Mon, 15 May 2023 09:45:15 +0530 Subject: [PATCH 323/351] add additional fields in traces --- .../helm/opsramp-tracing-proxy/values.yaml | 16 +++++++------- build/kubernetes/yaml/k8s-config-cm.yaml | 13 ++++++----- .../tracing-proxy/conf/config_complete.yaml | 12 +++++----- collect/collect.go | 22 +++++++++++++++++-- config/config.go | 2 ++ config/file_config.go | 9 ++++++++ config_complete.yaml | 10 ++++----- go.mod | 2 +- go.sum | 4 ++-- transmit/transmit.go | 7 +++++- 10 files changed, 67 insertions(+), 30 deletions(-) diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml index 6f323f274b..338316245c 100644 --- a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml +++ b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml @@ -107,12 +107,15 @@ config: PeerBufferSize: 1000 # AddHostMetadataToTrace determines whether to add information about - # the host that tracing-proxy is running on to the spans that it processes. + # the host that tracing proxy is running on to the spans that it processes. # If enabled, information about the host will be added to each span with the - # prefix `meta.tracing-proxy.`. - # Currently, the only value added is 'meta.tracing-proxy.local_hostname'. + # key 'meta.local_hostname'. AddHostMetadataToTrace: false + # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics + # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} + AddAdditionalMetadata: { } + # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. # Cache misses lookup the environment name using OpsRampAPI config value. @@ -193,9 +196,6 @@ config: ## Authentication Configuration ## ################################## AuthConfiguration: - # SkipAuth - skips authentication while sending requests (only to be used for debugging) - SkipAuth: false - # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made Endpoint: "" # Key - authentication key provided in OpsRamp Portal @@ -389,7 +389,7 @@ config: SampleCacheConfig: # Type controls the type of sample cache used. # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is - # 5x the size of the trace cache. This is tracing-proxy's original sample cache strategy. + # 5x the size of the trace cache. This is tracing proxy's original sample cache strategy. # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. # It is also more configurable. The cuckoo filter is recommended for most installations. @@ -397,7 +397,7 @@ config: # Type: "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. - # tracing-proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some + # tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). diff --git a/build/kubernetes/yaml/k8s-config-cm.yaml b/build/kubernetes/yaml/k8s-config-cm.yaml index 59c92462af..549104c3b5 100644 --- a/build/kubernetes/yaml/k8s-config-cm.yaml +++ b/build/kubernetes/yaml/k8s-config-cm.yaml @@ -88,12 +88,15 @@ data: PeerBufferSize: 1000 # AddHostMetadataToTrace determines whether to add information about - # the host that tracing-proxy is running on to the spans that it processes. + # the host that tracing proxy is running on to the spans that it processes. # If enabled, information about the host will be added to each span with the - # prefix `meta.tracing-proxy.`. - # Currently, the only value added is 'meta.tracing-proxy.local_hostname'. + # key 'meta.local_hostname'. AddHostMetadataToTrace: false + # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics + # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} + AddAdditionalMetadata: { } + # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. # Cache misses lookup the environment name using OpsRampAPI config value. @@ -386,7 +389,7 @@ data: SampleCacheConfig: # Type controls the type of sample cache used. # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is - # 5x the size of the trace cache. This is tracing-proxy's original sample cache strategy. + # 5x the size of the trace cache. This is tracing proxy's original sample cache strategy. # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. # It is also more configurable. The cuckoo filter is recommended for most installations. @@ -394,7 +397,7 @@ data: # Type: "cuckoo" # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. - # tracing-proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some + # tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some # statistical information. This is most useful in cases where the trace was sent before sending # the root span, so that the root span can be decorated with accurate metadata. # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). diff --git a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml index 3938a20d6b..9ec52bc8c6 100644 --- a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml +++ b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml @@ -76,10 +76,13 @@ PeerBufferSize: 1000 # AddHostMetadataToTrace determines whether to add information about # the host that tracing proxy is running on to the spans that it processes. # If enabled, information about the host will be added to each span with the -# prefix `meta.tracing proxy.`. -# Currently, the only value added is 'meta.tracing proxy.local_hostname'. +# key 'meta.local_hostname'. AddHostMetadataToTrace: false +# AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics +# the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} +AddAdditionalMetadata: { } + # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. # Cache misses lookup the environment name using OpsRampAPI config value. @@ -160,9 +163,6 @@ ProxyConfiguration: ## Authentication Configuration ## ################################## AuthConfiguration: - # SkipAuth - skips authentication while sending requests (only to be used for debugging) - SkipAuth: false - # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made Endpoint: "" # Key - authentication key provided in OpsRamp Portal @@ -283,7 +283,7 @@ PeerManagement: # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. # Not eligible for live reload. - UseTLS: true + UseTLS: false # UseTLSInsecure disables certificate checks # Not eligible for live reload. diff --git a/collect/collect.go b/collect/collect.go index 367b0e6771..b351b12186 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -18,6 +18,12 @@ import ( "time" ) +const ( + resourceAttributesKey = "resourceAttributes" + spanAttributesKey = "spanAttributes" + eventAttributesKey = "eventAttributes" +) + var ErrWouldBlock = errors.New("not adding span, channel buffer is full") type Collector interface { @@ -670,7 +676,7 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { i.Logger.Debug().WithFields(logFields).Logf("Sending trace") for _, sp := range trace.GetSpans() { if i.Config.GetAddRuleReasonToTrace() { - sp.Data["meta.refinery.reason"] = reason + sp.Data["meta.reason"] = reason } // update the root span (if we have one, which we might not if the trace timed out) @@ -683,9 +689,21 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { field := i.Config.GetDryRunFieldName() sp.Data[field] = shouldSend } + + resAttr, ok := sp.Data[resourceAttributesKey].(map[string]interface{}) + if !ok { + resAttr = map[string]interface{}{} + } + for key, value := range i.Config.GetAddAdditionalMetadata() { + if _, ok := resAttr[key]; !ok { + resAttr[key] = value + } + } + if i.hostname != "" { - sp.Data["meta.refinery.local_hostname"] = i.hostname + resAttr["meta.local_hostname"] = i.hostname } + sp.Data[resourceAttributesKey] = resAttr mergeTraceAndSpanSampleRates(sp, trace.SampleRate) i.Transmission.EnqueueSpan(sp) } diff --git a/config/config.go b/config/config.go index e5789ac4a4..293b99087d 100644 --- a/config/config.go +++ b/config/config.go @@ -145,6 +145,8 @@ type Config interface { GetAddHostMetadataToTrace() bool + GetAddAdditionalMetadata() map[string]string + GetSendMetricsToOpsRamp() bool // GetUseTLS returns true when TLS must be enabled to dial diff --git a/config/file_config.go b/config/file_config.go index 2b22c62001..726b60fd07 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -59,6 +59,7 @@ type configContents struct { PeerManagement PeerManagementConfig `validate:"required"` InMemCollector InMemoryCollectorCacheCapacity `validate:"required"` AddHostMetadataToTrace bool + AddAdditionalMetadata map[string]string AddRuleReasonToTrace bool EnvironmentCacheTTL time.Duration DatasetPrefix string @@ -187,6 +188,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerBufferSize", libtrace.DefaultPendingWorkCapacity) c.SetDefault("MaxAlloc", uint64(0)) c.SetDefault("AddHostMetadataToTrace", false) + c.SetDefault("AddAdditionalMetadata", map[string]string{}) c.SetDefault("AddRuleReasonToTrace", false) c.SetDefault("EnvironmentCacheTTL", time.Hour) c.SetDefault("GRPCServerParameters.MaxConnectionIdle", 1*time.Minute) @@ -866,6 +868,13 @@ func (f *fileConfig) GetAddHostMetadataToTrace() bool { return f.conf.AddHostMetadataToTrace } +func (f *fileConfig) GetAddAdditionalMetadata() map[string]string { + f.mux.RLock() + defer f.mux.RUnlock() + + return f.conf.AddAdditionalMetadata +} + func (f *fileConfig) GetSendMetricsToOpsRamp() bool { f.mux.RLock() defer f.mux.RUnlock() diff --git a/config_complete.yaml b/config_complete.yaml index 9372eb3623..c7b3046bfb 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -76,10 +76,13 @@ PeerBufferSize: 1000 # AddHostMetadataToTrace determines whether to add information about # the host that tracing proxy is running on to the spans that it processes. # If enabled, information about the host will be added to each span with the -# prefix `meta.tracing proxy.`. -# Currently, the only value added is 'meta.tracing proxy.local_hostname'. +# key 'meta.local_hostname'. AddHostMetadataToTrace: false +# AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics +# the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} +AddAdditionalMetadata: { } + # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. # Cache misses lookup the environment name using OpsRampAPI config value. @@ -160,9 +163,6 @@ ProxyConfiguration: ## Authentication Configuration ## ################################## AuthConfiguration: - # SkipAuth - skips authentication while sending requests (only to be used for debugging) - SkipAuth: false - # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made Endpoint: "" # Key - authentication key provided in OpsRamp Portal diff --git a/go.mod b/go.mod index 42ad246051..7db60a7b1a 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c - github.com/opsramp/libtrace-go v0.0.0-20230426162753-20ac6c8a4a9d + github.com/opsramp/libtrace-go v0.0.0-20230515040030-89d6ab9f4816 github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 6c067cbbfb..c1d24cd71d 100644 --- a/go.sum +++ b/go.sum @@ -581,8 +581,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c h1:FhNFobufJrjU1/E/5LCiZk3IZRbrqk/5gnTz5iZTNQg= github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230426162753-20ac6c8a4a9d h1:68PAFtB16GKP1e73n5k30CDrmDFoCkqLbalQIo8P5kw= -github.com/opsramp/libtrace-go v0.0.0-20230426162753-20ac6c8a4a9d/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/libtrace-go v0.0.0-20230515040030-89d6ab9f4816 h1:mutPEtUsbD2VPDh4Q3pMUfXaat6o7KLU0/72wvPhpvM= +github.com/opsramp/libtrace-go v0.0.0-20230515040030-89d6ab9f4816/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= diff --git a/transmit/transmit.go b/transmit/transmit.go index b6c54f6f1e..e8fd7476ac 100644 --- a/transmit/transmit.go +++ b/transmit/transmit.go @@ -59,7 +59,12 @@ func (d *DefaultTransmission) Start() error { if d.Config.GetAddHostMetadataToTrace() { if hostname, err := os.Hostname(); err == nil && hostname != "" { // add hostname to spans - d.LibhClient.AddField("meta.tracing-proxy.local_hostname", hostname) + d.LibhClient.AddResourceField("meta.local_hostname", hostname) + } + } + for key, value := range d.Config.GetAddAdditionalMetadata() { + if !d.LibhClient.CheckResourceField(key) { + d.LibhClient.AddResourceField(key, value) } } From b745ca21bfa99361de0e9116da338612bf414868 Mon Sep 17 00:00:00 2001 From: saikalyan-bhagavathula <103252261+saikalyan-bhagavathula@users.noreply.github.com> Date: Mon, 15 May 2023 10:54:00 +0530 Subject: [PATCH 324/351] metric naming changes (#9) * trace proxy version metric * replacing honeycomb references * adding http config to k8s-deployment.yaml * metric naming changes --- README.md | 6 +++--- collect/cache/cache.go | 16 +++++++-------- collect/cache/cache_test.go | 4 ++-- collect/collect.go | 40 +++++++++++++++++++++++++++++++++---- 4 files changed, 49 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index d8d47fa253..0ebc57d0d5 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ There are a few vital configuration options; read through this list and make sur - Peer list: this is a list of all the other servers participating in this tracing-proxy cluster. Traces are evenly distributed across all available servers, and any one trace must be concentrated on one server, regardless of which server handled the incoming spans. The peer list lets the cluster move spans around to the server that is handling the trace. (Not used in the Redis-based config.) -- Buffer size: The `InMemCollector`'s `CacheCapacity` setting determines how many in-flight traces you can have. This should be large enough to avoid overflow. Some multiple (2x, 3x) the total number of in-flight traces you expect is a good place to start. If it's too low you will see the `collect_cache_buffer_overrun` metric increment. If you see that, you should increase the size of the buffer. +- Buffer size: The `InMemCollector`'s `CacheCapacity` setting determines how many in-flight traces you can have. This should be large enough to avoid overflow. Some multiple (2x, 3x) the total number of in-flight traces you expect is a good place to start. If it's too low you will see the `collector_cache_buffer_overrun` metric increment. If you see that, you should increase the size of the buffer. There are a few components of tracing-proxy with multiple implementations; the config file lets you choose which you'd like. As an example, there are two logging implementations - one that uses `logrus` and sends logs to STDOUT and a `honeycomb` implementation that sends the log messages to a Honeycomb dataset instead. Components with multiple implementations have one top level config item that lets you choose which implementation to use and then a section further down with additional config options for that choice (for example, the Honeycomb logger requires an API key). @@ -118,7 +118,7 @@ When dry run mode is enabled, the metric `trace_send_kept` will increment for ea ## Scaling Up -tracing-proxy uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces exceeds the size of the buffer, things will start to go wrong. If you have statistics configured, a counter named `collect_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the throughput of traces (e.g. traces / second started) and multiply that by the maximum duration of a trace (say, 3 seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom. +tracing-proxy uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces exceeds the size of the buffer, things will start to go wrong. If you have statistics configured, a counter named `collector_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the throughput of traces (e.g. traces / second started) and multiply that by the maximum duration of a trace (say, 3 seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom. Determining the number of machines necessary in the cluster is not an exact science, and is best influenced by watching for buffer overruns. But for a rough heuristic, count on a single machine using about 2G of memory to handle 5000 incoming events and tracking 500 sub-second traces per second (for each full trace lasting less than a second and an average size of 10 spans per trace). @@ -128,7 +128,7 @@ tracing-proxy emits a number of metrics to give some indication about the health - Sample rates: how many traces are kept / dropped, and what does the sample rate distribution look like? - [incoming|peer]_router_\*: how many events (no trace info) vs. spans (have trace info) have been accepted, and how many sent on to peers? -- collect_cache_buffer_overrun: this should remain zero; a positive value indicates the need to grow the size of the collector's circular buffer (via configuration `CacheCapacity`). +- collector_cache_buffer_overrun: this should remain zero; a positive value indicates the need to grow the size of the collector's circular buffer (via configuration `CacheCapacity`). - process_uptime_seconds: records the uptime of each process; look for unexpected restarts as a key towards memory constraints. ## Troubleshooting diff --git a/collect/cache/cache.go b/collect/cache/cache.go index 99599b9b49..35ba69115b 100644 --- a/collect/cache/cache.go +++ b/collect/cache/cache.go @@ -50,9 +50,9 @@ func NewInMemCache( // buffer_overrun increments when the trace overwritten in the circular // buffer has not yet been sent - metrics.Register("collect_cache_buffer_overrun", "counter") - metrics.Register("collect_cache_capacity", "gauge") - metrics.Register("collect_cache_entries", "histogram") + metrics.Register("collector_cache_buffer_overrun", "counter") + metrics.Register("collector_cache_capacity", "gauge") + metrics.Register("collector_cache_entries", "histogram") if capacity == 0 { capacity = DefaultInMemCacheCapacity @@ -102,7 +102,7 @@ func (d *DefaultInMemCache) Set(trace *types.Trace) *types.Trace { if !oldTrace.Sent { // if it hasn't already been sent, // record that we're overrunning the buffer - d.Metrics.Increment("collect_cache_buffer_overrun") + d.Metrics.Increment("collector_cache_buffer_overrun") // and return the trace so it can be sent. retTrace = oldTrace } @@ -131,8 +131,8 @@ func (d *DefaultInMemCache) GetAll() []*types.Trace { // TakeExpiredTraces should be called to decide which traces are past their expiration time; // It removes and returns them. func (d *DefaultInMemCache) TakeExpiredTraces(now time.Time) []*types.Trace { - d.Metrics.Gauge("collect_cache_capacity", float64(len(d.insertionOrder))) - d.Metrics.Histogram("collect_cache_entries", float64(len(d.cache))) + d.Metrics.Gauge("collector_cache_capacity", float64(len(d.insertionOrder))) + d.Metrics.Histogram("collector_cache_entries", float64(len(d.cache))) var res []*types.Trace for i, t := range d.insertionOrder { @@ -148,8 +148,8 @@ func (d *DefaultInMemCache) TakeExpiredTraces(now time.Time) []*types.Trace { // RemoveTraces accepts a set of trace IDs and removes any matching ones from // the insertion list. This is used in the case of a cache overrun. func (d *DefaultInMemCache) RemoveTraces(toDelete map[string]struct{}) { - d.Metrics.Gauge("collect_cache_capacity", float64(len(d.insertionOrder))) - d.Metrics.Histogram("collect_cache_entries", float64(len(d.cache))) + d.Metrics.Gauge("collector_cache_capacity", float64(len(d.insertionOrder))) + d.Metrics.Histogram("collector_cache_entries", float64(len(d.cache))) for i, t := range d.insertionOrder { if t != nil { diff --git a/collect/cache/cache_test.go b/collect/cache/cache_test.go index 69a915d4c0..bff24e8662 100644 --- a/collect/cache/cache_test.go +++ b/collect/cache/cache_test.go @@ -40,9 +40,9 @@ func TestBufferOverrun(t *testing.T) { c.Set(traces[0]) c.Set(traces[1]) - assert.Equal(t, 0, s.CounterIncrements["collect_cache_buffer_overrun"], "buffer should not yet have overrun") + assert.Equal(t, 0, s.CounterIncrements["collector_cache_buffer_overrun"], "buffer should not yet have overrun") c.Set(traces[2]) - assert.Equal(t, 1, s.CounterIncrements["collect_cache_buffer_overrun"], "buffer should have overrun") + assert.Equal(t, 1, s.CounterIncrements["collector_cache_buffer_overrun"], "buffer should have overrun") } func TestTakeExpiredTraces(t *testing.T) { diff --git a/collect/collect.go b/collect/collect.go index b351b12186..79ecb8fe74 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -99,7 +99,7 @@ func (i *InMemCollector) Start() error { i.Config.RegisterReloadCallback(i.sendReloadSignal) i.Metrics.Register("trace_duration_ms", "histogram") - i.Metrics.Register("trace_span_count", "histogram") + i.Metrics.Register("trace_spans_count_total", "histogram") i.Metrics.Register("collector_tosend_queue", "histogram") i.Metrics.Register("collector_incoming_queue", "histogram") i.Metrics.Register("collector_peer_queue", "histogram") @@ -117,7 +117,7 @@ func (i *InMemCollector) Start() error { i.Metrics.Register(TraceSendEjectedMemsize, "counter") i.Metrics.RegisterWithDescriptionLabels( - "trace_operation_latency_ms", + "trace_operations_latency_ms", "gauge", "Trace latency wrt each trace operation", []string{"service_name", "operation"}, @@ -140,6 +140,30 @@ func (i *InMemCollector) Start() error { "Total Number of events in spans wrt each trace operation", []string{"service_name", "operation"}, ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_root_span", + "counter", + "Number of root spans in an operation", + []string{"service_name", "operation"}, + ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_spans_count", + "counter", + "Number of spans in an operation", + []string{"service_name", "operation"}, + ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_root_operation_latency_ms", + "gauge", + "Trace latency wrt each root trace operation", + []string{"service_name", "operation"}, + ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_root_operations_failed", + "counter", + "Number of Error events in root spans wrt each trace operation", + []string{"service_name", "operation"}, + ) sampleCacheConfig := i.Config.GetSampleCacheConfig() switch sampleCacheConfig.Type { @@ -591,7 +615,7 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { traceDur := time.Since(trace.ArrivalTime) i.Metrics.Histogram("trace_duration_ms", float64(traceDur.Milliseconds())) - i.Metrics.Histogram("trace_span_count", float64(trace.DescendantCount())) + i.Metrics.Histogram("trace_spans_count_total", float64(trace.DescendantCount())) if trace.RootSpan != nil { i.Metrics.Increment("trace_send_has_root") } else { @@ -613,13 +637,21 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { durationMsString, ok := span.Data["durationMs"] if ok && durationMsString != nil { - i.Metrics.GaugeWithLabels("trace_operation_latency_ms", labels, metrics.ConvertNumeric(durationMsString)) + i.Metrics.GaugeWithLabels("trace_operations_latency_ms", labels, metrics.ConvertNumeric(durationMsString)) } + if isRootSpan(span) { + i.Metrics.GaugeWithLabels("trace_root_operation_latency_ms", labels, metrics.ConvertNumeric(durationMsString)) + i.Metrics.IncrementWithLabels("trace_root_span", labels) + } + i.Metrics.IncrementWithLabels("trace_spans_count", labels) errorStatus, ok := span.Data["error"] if ok && errorStatus != nil && errorStatus.(bool) { i.Metrics.IncrementWithLabels("trace_operations_failed", labels) i.Metrics.IncrementWithLabels("trace_operations_total", labels) + if isRootSpan(span) { + i.Metrics.IncrementWithLabels("trace_root_operations_failed", labels) + } } else { i.Metrics.IncrementWithLabels("trace_operations_succeeded", labels) i.Metrics.IncrementWithLabels("trace_operations_total", labels) From 19ab6014427a7043b4ca0c10fe642ba36ae2564e Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Mon, 15 May 2023 11:56:57 +0530 Subject: [PATCH 325/351] add additional trace fields in metrics with service and operation --- .../helm/opsramp-tracing-proxy/values.yaml | 2 +- build/kubernetes/yaml/k8s-config-cm.yaml | 2 +- .../tracing-proxy/conf/config_complete.yaml | 2 +- collect/collect.go | 17 +++++++++-------- config_complete.yaml | 2 +- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml index 338316245c..1c9b66bcbc 100644 --- a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml +++ b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml @@ -114,7 +114,7 @@ config: # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} - AddAdditionalMetadata: { } + AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. diff --git a/build/kubernetes/yaml/k8s-config-cm.yaml b/build/kubernetes/yaml/k8s-config-cm.yaml index 549104c3b5..3f81a16ec7 100644 --- a/build/kubernetes/yaml/k8s-config-cm.yaml +++ b/build/kubernetes/yaml/k8s-config-cm.yaml @@ -95,7 +95,7 @@ data: # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} - AddAdditionalMetadata: { } + AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. diff --git a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml index 9ec52bc8c6..755d71b760 100644 --- a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml +++ b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml @@ -81,7 +81,7 @@ AddHostMetadataToTrace: false # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} -AddAdditionalMetadata: { } +AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. diff --git a/collect/collect.go b/collect/collect.go index 79ecb8fe74..b8f91e7331 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -120,49 +120,49 @@ func (i *InMemCollector) Start() error { "trace_operations_latency_ms", "gauge", "Trace latency wrt each trace operation", - []string{"service_name", "operation"}, + []string{"service_name", "operation", "app"}, ) i.Metrics.RegisterWithDescriptionLabels( "trace_operations_failed", "counter", "Number of Error events in spans wrt each trace operation", - []string{"service_name", "operation"}, + []string{"service_name", "operation", "app"}, ) i.Metrics.RegisterWithDescriptionLabels( "trace_operations_succeeded", "counter", "Number of Succeeded events in spans wrt each trace operation", - []string{"service_name", "operation"}, + []string{"service_name", "operation", "app"}, ) i.Metrics.RegisterWithDescriptionLabels( "trace_operations_total", "counter", "Total Number of events in spans wrt each trace operation", - []string{"service_name", "operation"}, + []string{"service_name", "operation", "app"}, ) i.Metrics.RegisterWithDescriptionLabels( "trace_root_span", "counter", "Number of root spans in an operation", - []string{"service_name", "operation"}, + []string{"service_name", "operation", "app"}, ) i.Metrics.RegisterWithDescriptionLabels( "trace_spans_count", "counter", "Number of spans in an operation", - []string{"service_name", "operation"}, + []string{"service_name", "operation", "app"}, ) i.Metrics.RegisterWithDescriptionLabels( "trace_root_operation_latency_ms", "gauge", "Trace latency wrt each root trace operation", - []string{"service_name", "operation"}, + []string{"service_name", "operation", "app"}, ) i.Metrics.RegisterWithDescriptionLabels( "trace_root_operations_failed", "counter", "Number of Error events in root spans wrt each trace operation", - []string{"service_name", "operation"}, + []string{"service_name", "operation", "app"}, ) sampleCacheConfig := i.Config.GetSampleCacheConfig() @@ -631,6 +631,7 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { labelToKeyMap := map[string]string{ "service_name": "service.name", "operation": "spanName", + "app": "app", } labels := metrics.ExtractLabelsFromSpan(span, labelToKeyMap) diff --git a/config_complete.yaml b/config_complete.yaml index c7b3046bfb..a75ac5b576 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -81,7 +81,7 @@ AddHostMetadataToTrace: false # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} -AddAdditionalMetadata: { } +AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates # an API key with an environment name. From 49438832d826005fa5e46ab2378e36773e70290e Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Mon, 15 May 2023 12:41:23 +0530 Subject: [PATCH 326/351] limit the number of additionalMetadata keys to 5 --- .../helm/opsramp-tracing-proxy/values.yaml | 2 + build/kubernetes/yaml/k8s-config-cm.yaml | 2 + .../tracing-proxy/conf/config_complete.yaml | 68 ++++++++++--------- config/file_config.go | 22 +++++- config_complete.yaml | 2 + 5 files changed, 61 insertions(+), 35 deletions(-) diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml index 1c9b66bcbc..c3bb7ec5fb 100644 --- a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml +++ b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml @@ -114,6 +114,8 @@ config: # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} + # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 + # based on sorted order of keys AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates diff --git a/build/kubernetes/yaml/k8s-config-cm.yaml b/build/kubernetes/yaml/k8s-config-cm.yaml index 3f81a16ec7..cad9a5ab16 100644 --- a/build/kubernetes/yaml/k8s-config-cm.yaml +++ b/build/kubernetes/yaml/k8s-config-cm.yaml @@ -95,6 +95,8 @@ data: # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} + # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 + # based on sorted order of keys AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates diff --git a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml index 755d71b760..db292a1e6d 100644 --- a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml +++ b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml @@ -81,6 +81,8 @@ AddHostMetadataToTrace: false # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} +# max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 +# based on sorted order of keys AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates @@ -247,39 +249,39 @@ PeerManagement: ###### Redis (Suitable for all types of deployments) ###### ########################################################### # The type should always be redis when deployed to Kubernetes environments -# Type: "redis" -# -# # RedisHost is used to connect to redis for peer cluster membership management. -# # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes -# # precedence and this value is ignored. -# # Not eligible for live reload. -# # RedisHost will default to the name used for the release or name overrides depending on what is used, -# # but can be overriden to a specific value. -# RedisHost: 0.0.0.0:22122 -# -# # RedisUsername is the username used to connect to redis for peer cluster membership management. -# # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes -# # precedence and this value is ignored. -# # Not eligible for live reload. -# RedisUsername: "" -# -# # RedisPassword is the password used to connect to redis for peer cluster membership management. -# # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes -# # precedence and this value is ignored. -# # Not eligible for live reload. -# RedisPassword: "" -# -# # RedisPrefix is a string used as a prefix for the keys in redis while storing -# # the peer membership. It might be useful to set this in any situation where -# # multiple trace-proxy clusters or multiple applications want to share a single -# # Redis instance. It may not be blank. -# RedisPrefix: "tracing-proxy" -# -# # RedisDatabase is an integer from 0-15 indicating the database number to use -# # for the Redis instance storing the peer membership. It might be useful to set -# # this in any situation where multiple trace-proxy clusters or multiple -# # applications want to share a single Redis instance. -# RedisDatabase: 0 + # Type: "redis" + # + # # RedisHost is used to connect to redis for peer cluster membership management. + # # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes + # # precedence and this value is ignored. + # # Not eligible for live reload. + # # RedisHost will default to the name used for the release or name overrides depending on what is used, + # # but can be overriden to a specific value. + # RedisHost: 0.0.0.0:22122 + # + # # RedisUsername is the username used to connect to redis for peer cluster membership management. + # # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes + # # precedence and this value is ignored. + # # Not eligible for live reload. + # RedisUsername: "" + # + # # RedisPassword is the password used to connect to redis for peer cluster membership management. + # # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes + # # precedence and this value is ignored. + # # Not eligible for live reload. + # RedisPassword: "" + # + # # RedisPrefix is a string used as a prefix for the keys in redis while storing + # # the peer membership. It might be useful to set this in any situation where + # # multiple trace-proxy clusters or multiple applications want to share a single + # # Redis instance. It may not be blank. + # RedisPrefix: "tracing-proxy" + # + # # RedisDatabase is an integer from 0-15 indicating the database number to use + # # for the Redis instance storing the peer membership. It might be useful to set + # # this in any situation where multiple trace-proxy clusters or multiple + # # applications want to share a single Redis instance. + # RedisDatabase: 0 # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. # Not eligible for live reload. diff --git a/config/file_config.go b/config/file_config.go index 726b60fd07..caabb3a18d 100644 --- a/config/file_config.go +++ b/config/file_config.go @@ -10,6 +10,7 @@ import ( "net" "net/url" "os" + "sort" "strings" "sync" "time" @@ -188,7 +189,7 @@ func NewConfig(config, rules string, errorCallback func(error)) (Config, error) c.SetDefault("PeerBufferSize", libtrace.DefaultPendingWorkCapacity) c.SetDefault("MaxAlloc", uint64(0)) c.SetDefault("AddHostMetadataToTrace", false) - c.SetDefault("AddAdditionalMetadata", map[string]string{}) + c.SetDefault("AddAdditionalMetadata", map[string]string{"app": "default"}) c.SetDefault("AddRuleReasonToTrace", false) c.SetDefault("EnvironmentCacheTTL", time.Hour) c.SetDefault("GRPCServerParameters.MaxConnectionIdle", 1*time.Minute) @@ -872,7 +873,24 @@ func (f *fileConfig) GetAddAdditionalMetadata() map[string]string { f.mux.RLock() defer f.mux.RUnlock() - return f.conf.AddAdditionalMetadata + if len(f.conf.AddAdditionalMetadata) <= 5 { + return f.conf.AddAdditionalMetadata + } + + // sorting the keys and sending the first 5 + var keys []string + for k := range f.conf.AddAdditionalMetadata { + keys = append(keys, k) + } + sort.Strings(keys) + m := map[string]string{} + for index := 0; index < 5; index++ { + if val, ok := f.conf.AddAdditionalMetadata[keys[index]]; ok { + m[keys[index]] = val + } + } + + return m } func (f *fileConfig) GetSendMetricsToOpsRamp() bool { diff --git a/config_complete.yaml b/config_complete.yaml index a75ac5b576..d51b57665c 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -81,6 +81,8 @@ AddHostMetadataToTrace: false # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} +# max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 +# based on sorted order of keys AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates From ca9a6e9d42a89af5569ec88954e1fb41eb45ef5f Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Mon, 15 May 2023 12:50:53 +0530 Subject: [PATCH 327/351] additional comments in config --- .../helm/opsramp-tracing-proxy/values.yaml | 1 + build/kubernetes/yaml/k8s-config-cm.yaml | 321 +++++++++--------- .../tracing-proxy/conf/config_complete.yaml | 1 + config_complete.yaml | 1 + 4 files changed, 164 insertions(+), 160 deletions(-) diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml index c3bb7ec5fb..1f537c638d 100644 --- a/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml +++ b/build/kubernetes/helm/opsramp-tracing-proxy/values.yaml @@ -116,6 +116,7 @@ config: # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 # based on sorted order of keys + # "app" label is mandatory AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates diff --git a/build/kubernetes/yaml/k8s-config-cm.yaml b/build/kubernetes/yaml/k8s-config-cm.yaml index cad9a5ab16..1c9dbc879d 100644 --- a/build/kubernetes/yaml/k8s-config-cm.yaml +++ b/build/kubernetes/yaml/k8s-config-cm.yaml @@ -97,6 +97,7 @@ data: # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 # based on sorted order of keys + # "app" label is mandatory AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates @@ -124,7 +125,7 @@ data: # If a field is not present in the span, it will not be present in the error log. # Default is ["trace.span_id"]. AdditionalErrorFields: - - trace.span_id + - trace.span_id # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate # the number of child spans on the trace at the time the sampling decision was made. @@ -146,47 +147,47 @@ data: ## Retry Configuration ## ######################### RetryConfiguration: - # InitialInterval the time to wait after the first failure before retrying. - InitialInterval: 500ms - # RandomizationFactor is a random factor used to calculate next backoff - # Randomized interval = RetryInterval * (1 ± RandomizationFactor) - RandomizationFactor: 0.5 - # Multiplier is the value multiplied by the backoff interval bounds - Multiplier: 1.5 - # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between - # consecutive retries will always be `MaxInterval`. - MaxInterval: 60s - # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. - # Once this value is reached, the data is discarded. - MaxElapsedTime: 15m + # InitialInterval the time to wait after the first failure before retrying. + InitialInterval: 500ms + # RandomizationFactor is a random factor used to calculate next backoff + # Randomized interval = RetryInterval * (1 ± RandomizationFactor) + RandomizationFactor: 0.5 + # Multiplier is the value multiplied by the backoff interval bounds + Multiplier: 1.5 + # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between + # consecutive retries will always be `MaxInterval`. + MaxInterval: 60s + # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. + # Once this value is reached, the data is discarded. + MaxElapsedTime: 15m ######################### ## Proxy Configuration ## ######################### ProxyConfiguration: - # Protocol accepts http and https - Protocol: "http" - # Host takes the proxy server address - Host: "" - # Port takes the proxy server port - Port: 3128 - # UserName takes the proxy username - Username: "" - # Password takes the proxy password - Password: "" + # Protocol accepts http and https + Protocol: "http" + # Host takes the proxy server address + Host: "" + # Port takes the proxy server port + Port: 3128 + # UserName takes the proxy username + Username: "" + # Password takes the proxy password + Password: "" ################################## ## Authentication Configuration ## ################################## AuthConfiguration: - # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made - Endpoint: "" - # Key - authentication key provided in OpsRamp Portal - Key: "" - # Secret - authentication Secret provided in OpsRamp Portal - Secret: "" - # TenantId - tenant/client id to which the traces are to be posted - TenantId: "" + # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made + Endpoint: "" + # Key - authentication key provided in OpsRamp Portal + Key: "" + # Secret - authentication Secret provided in OpsRamp Portal + Secret: "" + # TenantId - tenant/client id to which the traces are to be posted + TenantId: "" ############################ ## Implementation Choices ## @@ -203,26 +204,26 @@ data: # InMemCollector brings together all the settings that are relevant to # collecting spans together to make traces. InMemCollector: - - # The collection cache is used to collect all spans into a trace as well as - # remember the sampling decision for any spans that might come in after the - # trace has been marked "complete" (either by timing out or seeing the root - # span). The number of traces in the cache should be many multiples (100x to - # 1000x) of the total number of concurrently active traces (trace throughput * - # trace duration). - CacheCapacity: 1000 - - # MaxAlloc is optional. If set, it must be an integer >= 0. - # If set to a non-zero value, once per tick (see SendTicker) the collector - # will compare total allocated bytes to this value. If allocation is too - # high, cache capacity will be reduced and an error will be logged. - # Useful values for this setting are generally in the range of 75%-90% of - # available system memory. Using 80% is the recommended. - # This value should be set in according to the resources.limits.memory - # By default that setting is 4GB, and this is set to 85% of that limit - # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 - # MaxAlloc: 3435973836 - MaxAlloc: 0 + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + CacheCapacity: 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. Using 80% is the recommended. + # This value should be set in according to the resources.limits.memory + # By default that setting is 4GB, and this is set to 85% of that limit + # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 + # MaxAlloc: 3435973836 + MaxAlloc: 0 ##################### ## Peer Management ## @@ -230,120 +231,120 @@ data: # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed PeerManagement: - # Strategy controls the way that traces are assigned to Trace Proxy nodes. - # The "legacy" strategy uses a simple algorithm that unfortunately causes - # 1/2 of the in-flight traces to be assigned to a different node whenever the - # number of nodes changes. - # The legacy strategy is deprecated and is intended to be removed in a future release. - # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the - # number of nodes) are disrupted when the node count changes. - # Not eligible for live reload. - Strategy: "hash" - - ########################################################### - ###### File (Suitable only for VM based deployments) ###### - ########################################################### - Type: "file" - - # Peers is the list of all servers participating in this proxy cluster. Events - # will be sharded evenly across all peers based on the Trace ID. Values here - # should be the base URL used to access the peer, and should include scheme, - # hostname (or ip address) and port. All servers in the cluster should be in - # this list, including this host. - Peers: [ - "http://127.0.0.1:8084", #only grpc peer listener used - # "http://127.0.0.1:8083", - # "http://10.1.2.3.4:8080", - # "http://tracing-proxy-1231:8080", - # "http://peer-3.fqdn" // assumes port 80 - ] - ########################################################### - - ########################################################### - ###### Redis (Suitable for all types of deployments) ###### - ########################################################### - # # The type should always be redis when deployed to Kubernetes environments - # Type: "redis" - # - # # RedisHost is used to connect to redis for peer cluster membership management. - # # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes - # # precedence and this value is ignored. - # # Not eligible for live reload. - # # RedisHost will default to the name used for the release or name overrides depending on what is used, - # # but can be overriden to a specific value. - # RedisHost: localhost:6379 - # - # # RedisUsername is the username used to connect to redis for peer cluster membership management. - # # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes - # # precedence and this value is ignored. - # # Not eligible for live reload. - # RedisUsername: "" - # - # # RedisPassword is the password used to connect to redis for peer cluster membership management. - # # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes - # # precedence and this value is ignored. - # # Not eligible for live reload. - # RedisPassword: "" - # - # # RedisPrefix is a string used as a prefix for the keys in redis while storing - # # the peer membership. It might be useful to set this in any situation where - # # multiple trace-proxy clusters or multiple applications want to share a single - # # Redis instance. It may not be blank. - # RedisPrefix: "tracing-proxy" - # - # # RedisDatabase is an integer from 0-15 indicating the database number to use - # # for the Redis instance storing the peer membership. It might be useful to set - # # this in any situation where multiple trace-proxy clusters or multiple - # # applications want to share a single Redis instance. - # RedisDatabase: 0 - # - # # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. - # # Not eligible for live reload. - # UseTLS: false - # - # # UseTLSInsecure disables certificate checks - # # Not eligible for live reload. - # UseTLSInsecure: false - # - # # IdentifierInterfaceName is optional. - # # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. - # # When configured the pod's IP will be used in the peer list - # # IdentifierInterfaceName: eth0 - # - # # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first - # # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use - # # the first IPV6 unicast address found. - # UseIPV6Identifier: false - ########################################################### + # Strategy controls the way that traces are assigned to Trace Proxy nodes. + # The "legacy" strategy uses a simple algorithm that unfortunately causes + # 1/2 of the in-flight traces to be assigned to a different node whenever the + # number of nodes changes. + # The legacy strategy is deprecated and is intended to be removed in a future release. + # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the + # number of nodes) are disrupted when the node count changes. + # Not eligible for live reload. + Strategy: "hash" + + ########################################################### + ###### File (Suitable only for VM based deployments) ###### + ########################################################### + Type: "file" + + # Peers is the list of all servers participating in this proxy cluster. Events + # will be sharded evenly across all peers based on the Trace ID. Values here + # should be the base URL used to access the peer, and should include scheme, + # hostname (or ip address) and port. All servers in the cluster should be in + # this list, including this host. + Peers: [ + "http://127.0.0.1:8084", #only grpc peer listener used + # "http://127.0.0.1:8083", + # "http://10.1.2.3.4:8080", + # "http://tracing-proxy-1231:8080", + # "http://peer-3.fqdn" // assumes port 80 + ] + ########################################################### + + ########################################################### + ###### Redis (Suitable for all types of deployments) ###### + ########################################################### + # # The type should always be redis when deployed to Kubernetes environments + # Type: "redis" + # + # # RedisHost is used to connect to redis for peer cluster membership management. + # # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes + # # precedence and this value is ignored. + # # Not eligible for live reload. + # # RedisHost will default to the name used for the release or name overrides depending on what is used, + # # but can be overriden to a specific value. + # RedisHost: localhost:6379 + # + # # RedisUsername is the username used to connect to redis for peer cluster membership management. + # # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes + # # precedence and this value is ignored. + # # Not eligible for live reload. + # RedisUsername: "" + # + # # RedisPassword is the password used to connect to redis for peer cluster membership management. + # # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes + # # precedence and this value is ignored. + # # Not eligible for live reload. + # RedisPassword: "" + # + # # RedisPrefix is a string used as a prefix for the keys in redis while storing + # # the peer membership. It might be useful to set this in any situation where + # # multiple trace-proxy clusters or multiple applications want to share a single + # # Redis instance. It may not be blank. + # RedisPrefix: "tracing-proxy" + # + # # RedisDatabase is an integer from 0-15 indicating the database number to use + # # for the Redis instance storing the peer membership. It might be useful to set + # # this in any situation where multiple trace-proxy clusters or multiple + # # applications want to share a single Redis instance. + # RedisDatabase: 0 + # + # # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + # # Not eligible for live reload. + # UseTLS: false + # + # # UseTLSInsecure disables certificate checks + # # Not eligible for live reload. + # UseTLSInsecure: false + # + # # IdentifierInterfaceName is optional. + # # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. + # # When configured the pod's IP will be used in the peer list + # # IdentifierInterfaceName: eth0 + # + # # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first + # # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + # # the first IPV6 unicast address found. + # UseIPV6Identifier: false + ########################################################### # LogrusLogger is a section of the config only used if you are using the # LogrusLogger to send all logs to STDOUT using the logrus package. LogrusLogger: - # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] - LogFormatter: 'json' - # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] - LogOutput: 'stdout' + # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] + LogFormatter: 'json' + # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] + LogOutput: 'stdout' MetricsConfig: - # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp - Enable: true - - # ListenAddr determines the interface and port on which Prometheus will - # listen for requests for /metrics. Must be different from the main Trace Proxy - # listener. - ListenAddr: '0.0.0.0:2112' - - # OpsRampAPI is the URL for the upstream OpsRamp API. - OpsRampAPI: "" - - # ReportingInterval is the frequency specified in seconds at which - # the metrics are collected and sent to OpsRamp - ReportingInterval: 10 - - # MetricsList is a list of regular expressions which match the metric - # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. - # Internally, all the items in the list are concatenated using '|' to make the computation faster. - MetricsList: [ ".*" ] + # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp + Enable: true + + # ListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Trace Proxy + # listener. + ListenAddr: '0.0.0.0:2112' + + # OpsRampAPI is the URL for the upstream OpsRamp API. + OpsRampAPI: "" + + # ReportingInterval is the frequency specified in seconds at which + # the metrics are collected and sent to OpsRamp + ReportingInterval: 10 + + # MetricsList is a list of regular expressions which match the metric + # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. + # Internally, all the items in the list are concatenated using '|' to make the computation faster. + MetricsList: [ ".*" ] GRPCServerParameters: # MaxConnectionIdle is a duration for the amount of time after which an diff --git a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml index db292a1e6d..abfc5594dd 100644 --- a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml +++ b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml @@ -83,6 +83,7 @@ AddHostMetadataToTrace: false # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 # based on sorted order of keys +# "app" label is mandatory AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates diff --git a/config_complete.yaml b/config_complete.yaml index d51b57665c..3eaaea5340 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -83,6 +83,7 @@ AddHostMetadataToTrace: false # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 # based on sorted order of keys +# "app" label is mandatory AddAdditionalMetadata: { "app": "default" } # EnvironmentCacheTTL is the amount of time a cache entry will live that associates From 8f9598b369b03a5d04a36b246101b3a938fe0475 Mon Sep 17 00:00:00 2001 From: saikalyan-bhagavathula <103252261+saikalyan-bhagavathula@users.noreply.github.com> Date: Tue, 23 May 2023 09:37:16 +0530 Subject: [PATCH 328/351] Adding dataset as environment (#10) * Adding dataset as environment --- collect/collect.go | 24 ++++++++++++++++++++++++ route/otlp_trace.go | 7 ++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/collect/collect.go b/collect/collect.go index b8f91e7331..3842cf5b13 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -164,6 +164,30 @@ func (i *InMemCollector) Start() error { "Number of Error events in root spans wrt each trace operation", []string{"service_name", "operation", "app"}, ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_root_span", + "counter", + "Number of root spans in an operation", + []string{"service_name", "operation"}, + ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_spans_count", + "counter", + "Number of spans in an operation", + []string{"service_name", "operation"}, + ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_root_operation_latency_ms", + "gauge", + "Trace latency wrt each root trace operation", + []string{"service_name", "operation"}, + ) + i.Metrics.RegisterWithDescriptionLabels( + "trace_root_operations_failed", + "counter", + "Number of Error events in root spans wrt each trace operation", + []string{"service_name", "operation"}, + ) sampleCacheConfig := i.Config.GetSampleCacheConfig() switch sampleCacheConfig.Type { diff --git a/route/otlp_trace.go b/route/otlp_trace.go index 989f1d46b4..a1dfe2dc9a 100644 --- a/route/otlp_trace.go +++ b/route/otlp_trace.go @@ -73,6 +73,11 @@ func processTraceRequest( router.Logger.Error().Logf("Unable to retrieve APIHost from config while processing OTLP batch") return err } + datasetName, err = router.Config.GetDataset() + if err != nil { + router.Logger.Error().Logf("Unable to retrieve DataSet from config while processing OTLP batch") + return err + } for _, batch := range batches { for _, ev := range batch.Events { @@ -82,7 +87,7 @@ func processTraceRequest( APIToken: token, APITenantId: tenantID, Dataset: datasetName, - Environment: "", + Environment: datasetName, SampleRate: uint(ev.SampleRate), Timestamp: ev.Timestamp, Data: ev.Attributes, From a7d7ea7209b96a61b871520c9d99a306ab9f39a8 Mon Sep 17 00:00:00 2001 From: LokeshOpsramp <59053467+LokeshOpsramp@users.noreply.github.com> Date: Tue, 23 May 2023 23:43:36 +0530 Subject: [PATCH 329/351] Build (#11) --- .github/CODEOWNERS | 1 - .github/ISSUE_TEMPLATE/bug_report.md | 28 -------- .github/ISSUE_TEMPLATE/feature_request.md | 25 -------- .github/ISSUE_TEMPLATE/question-discussion.md | 14 ---- .../security-vulnerability-report.md | 22 ------- .github/PULL_REQUEST_TEMPLATE.md | 20 ------ .github/dependabot.yml | 18 ------ .github/release.yml | 23 ------- .github/workflows/add-to-project-v2.yml | 15 ----- .github/workflows/apply-labels.yml | 10 --- .github/workflows/release.yml | 57 ++++++++++++----- .github/workflows/stale.yml | 26 -------- .github/workflows/validate-pr-title.yml | 64 ------------------- build/vm/tracing-deb/script.sh | 50 +++++++++------ build/vm/tracing-rpm/script.sh | 35 ++++++---- 15 files changed, 95 insertions(+), 313 deletions(-) delete mode 100644 .github/CODEOWNERS delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md delete mode 100644 .github/ISSUE_TEMPLATE/question-discussion.md delete mode 100644 .github/ISSUE_TEMPLATE/security-vulnerability-report.md delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md delete mode 100644 .github/dependabot.yml delete mode 100644 .github/release.yml delete mode 100644 .github/workflows/add-to-project-v2.yml delete mode 100644 .github/workflows/apply-labels.yml delete mode 100644 .github/workflows/stale.yml delete mode 100644 .github/workflows/validate-pr-title.yml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index bc0633463f..0000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @honeycombio/telemetry-team diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 5e73f80fc7..0000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -name: Bug report -about: Let us know if something is not working as expected -title: '' -labels: 'type: bug' -assignees: '' - ---- - - - -**Versions** - -- Go: -- tracing-proxy: - - -**Steps to reproduce** - -1. - -**Additional context** diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index f8ecf25263..0000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: 'type: enhancement' -assignees: '' - ---- - - - -**Is your feature request related to a problem? Please describe.** - - -**Describe the solution you'd like** - - -**Describe alternatives you've considered** - - -**Additional context** diff --git a/.github/ISSUE_TEMPLATE/question-discussion.md b/.github/ISSUE_TEMPLATE/question-discussion.md deleted file mode 100644 index 3aa8920cb0..0000000000 --- a/.github/ISSUE_TEMPLATE/question-discussion.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -name: Question/Discussion -about: General question about how things work or a discussion -title: '' -labels: 'type: discussion' -assignees: '' - ---- - - diff --git a/.github/ISSUE_TEMPLATE/security-vulnerability-report.md b/.github/ISSUE_TEMPLATE/security-vulnerability-report.md deleted file mode 100644 index 9efc7f698c..0000000000 --- a/.github/ISSUE_TEMPLATE/security-vulnerability-report.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -name: Security vulnerability report -about: Let us know if you discover a security vulnerability -title: '' -labels: 'type: security' -assignees: '' - ---- - - -**Versions** - -- Go: -- tracing-proxy: - -**Description** - -(Please include any relevant CVE advisory links) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 429aebf7fd..0000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,20 +0,0 @@ - - -## Which problem is this PR solving? - -- - -## Short description of the changes - -- - diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 4752e727bf..0000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,18 +0,0 @@ -# To get started with Dependabot version updates, you'll need to specify which -# package ecosystems to update and where the package manifests are located. -# Please see the documentation for all configuration options: -# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates - -version: 2 -updates: - - package-ecosystem: "gomod" # See documentation for possible values - directory: "/" # Location of package manifests - schedule: - interval: "monthly" - labels: - - "type: dependencies" - reviewers: - - "honeycombio/telemetry-team" - commit-message: - prefix: "maint" - include: "scope" diff --git a/.github/release.yml b/.github/release.yml deleted file mode 100644 index 3d9ee33826..0000000000 --- a/.github/release.yml +++ /dev/null @@ -1,23 +0,0 @@ -# .github/release.yml - -changelog: - exclude: - labels: - - no-changelog - categories: - - title: 💥 Breaking Changes 💥 - labels: - - "version: bump major" - - breaking-change - - title: 💡 Enhancements - labels: - - "type: enhancement" - - title: 🐛 Fixes - labels: - - "type: bug" - - title: 🛠 Maintenance - labels: - - "type: maintenance" - - title: 🤷 Other Changes - labels: - - "*" \ No newline at end of file diff --git a/.github/workflows/add-to-project-v2.yml b/.github/workflows/add-to-project-v2.yml deleted file mode 100644 index 5d569202b2..0000000000 --- a/.github/workflows/add-to-project-v2.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: Add to project -on: - issues: - types: [opened] - pull_request_target: - types: [opened] -jobs: - add-to-project: - runs-on: ubuntu-latest - name: Add issues and PRs to project - steps: - - uses: actions/add-to-project@main - with: - project-url: https://github.com/orgs/honeycombio/projects/11 - github-token: ${{ secrets.GHPROJECTS_TOKEN }} diff --git a/.github/workflows/apply-labels.yml b/.github/workflows/apply-labels.yml deleted file mode 100644 index d3293214fc..0000000000 --- a/.github/workflows/apply-labels.yml +++ /dev/null @@ -1,10 +0,0 @@ -name: Apply project labels -on: [issues, pull_request_target, label] -jobs: - apply-labels: - runs-on: ubuntu-latest - name: Apply common project labels - steps: - - uses: honeycombio/oss-management-actions/labels@v1 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9174d7b661..667eb963af 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,23 +1,48 @@ -name: Create helm chart issue on release on: release: - types: [published] - workflow_dispatch: + types: [ created ] +name: Handle Release jobs: - create_issue: + generate-deb: + name: Create debian package runs-on: ubuntu-latest steps: - - name: Create an issue - uses: actions-ecosystem/action-create-issue@v1 + - name: Checkout the repository + uses: actions/checkout@v3 + - uses: addnab/docker-run-action@v3 with: - github_token: ${{ secrets.GHPROJECTS_TOKEN }} - repo: honeycombio/helm-charts - title: Bump Refinery to Latest Version - body: | - ## Bump Refinery - - Update Refinery to latest version + image: golang:1.20.4-buster + options: -v ${{ github.workspace }}:${{ github.workspace }} --env IS_GITHUB_ACTION=true --env VERSION_TAG=${{ github.event.release.tag_name }} + run: | + cd ${{ github.workspace }} + /bin/bash build/vm/tracing-deb/script.sh + - name: Upload the artifacts + uses: skx/github-action-publish-binaries@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + args: "build/vm/tracing-deb/output/*" - labels: | - type: dependencies - status: oncall + generate-rpm: + name: Create RPM package + runs-on: ubuntu-latest + steps: + - name: Checkout the repository + uses: actions/checkout@v3 + - uses: addnab/docker-run-action@v3 + with: + image: rockylinux:8 + options: -v ${{ github.workspace }}:${{ github.workspace }} --env IS_GITHUB_ACTION=true --env VERSION_TAG=${{ github.event.release.tag_name }} + run: | + curl -L -O https://go.dev/dl/go1.20.4.linux-amd64.tar.gz + rm -rf /usr/local/go && tar -C /usr/local -xzf go1.20.4.linux-amd64.tar.gz + cd ${{ github.workspace }} + PATH=$PATH:/usr/local/go/bin /bin/bash build/vm/tracing-rpm/script.sh + mkdir -p ${{ github.workspace }}/output + cp -r /root/rpmbuild/RPMS/x86_64/* ${{ github.workspace }}/output + - name: Upload the artifacts + uses: skx/github-action-publish-binaries@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + args: "${{ github.workspace }}/output/*" \ No newline at end of file diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index 5a4d4373c9..0000000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: 'Close stale issues and PRs' -on: - schedule: - - cron: '30 1 * * *' - -jobs: - stale: - name: 'Close stale issues and PRs' - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - - steps: - - uses: actions/stale@v4 - with: - start-date: '2021-09-01T00:00:00Z' - stale-issue-message: 'Marking this issue as stale because it has been open 14 days with no activity. Please add a comment if this is still an ongoing issue; otherwise this issue will be automatically closed in 7 days.' - stale-pr-message: 'Marking this PR as stale because it has been open 30 days with no activity. Please add a comment if this PR is still relevant; otherwise this PR will be automatically closed in 7 days.' - close-issue-message: 'Closing this issue due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' - close-pr-message: 'Closing this PR due to inactivity. Please see our [Honeycomb OSS Lifecyle and Practices](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md).' - days-before-issue-stale: 14 - days-before-pr-stale: 30 - days-before-issue-close: 7 - days-before-pr-close: 7 - any-of-labels: 'status: info needed,status: revision needed' diff --git a/.github/workflows/validate-pr-title.yml b/.github/workflows/validate-pr-title.yml deleted file mode 100644 index 5186cd01be..0000000000 --- a/.github/workflows/validate-pr-title.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: "Validate PR Title" - -on: - pull_request: - types: - - opened - - edited - - synchronize - -jobs: - main: - name: Validate PR title - runs-on: ubuntu-latest - steps: - - uses: amannn/action-semantic-pull-request@v5 - id: lint_pr_title - name: "🤖 Check PR title follows conventional commit spec" - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - # Have to specify all types because `maint` and `rel` aren't defaults - types: | - maint - rel - fix - feat - chore - ci - docs - style - refactor - perf - test - ignoreLabels: | - "type: dependencies" - # When the previous steps fails, the workflow would stop. By adding this - # condition you can continue the execution with the populated error message. - - if: always() && (steps.lint_pr_title.outputs.error_message != null) - name: "📝 Add PR comment about using conventional commit spec" - uses: marocchino/sticky-pull-request-comment@v2 - with: - header: pr-title-lint-error - message: | - Thank you for contributing to the project! 🎉 - - We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted. - - Make sure to prepend with `feat:`, `fix:`, or another option in the list below. - - Once you update the title, this workflow will re-run automatically and validate the updated title. - - Details: - - ``` - ${{ steps.lint_pr_title.outputs.error_message }} - ``` - - # Delete a previous comment when the issue has been resolved - - if: ${{ steps.lint_pr_title.outputs.error_message == null }} - name: "❌ Delete PR comment after title has been updated" - uses: marocchino/sticky-pull-request-comment@v2 - with: - header: pr-title-lint-error - delete: true diff --git a/build/vm/tracing-deb/script.sh b/build/vm/tracing-deb/script.sh index 1b297def22..feb67d304c 100755 --- a/build/vm/tracing-deb/script.sh +++ b/build/vm/tracing-deb/script.sh @@ -1,50 +1,62 @@ +#!/bin/bash + # $1 is a version of the package Version=$1 -sed -i "/^Version/s/:.*$/: ${Version}/g" tracing/DEBIAN/control +if [[ -z "$Version" ]]; then + Version=$VERSION_TAG +fi + +BUILD_DIR="." + +if [ "$IS_GITHUB_ACTION" = "true" ]; then + BUILD_DIR="build/vm/tracing-deb" +fi + +sed -i "/^Version/s/:.*$/: ${Version}/g" $BUILD_DIR/tracing/DEBIAN/control architecture=$(uname -m) if [ "$architecture" = "x86_64" ]; then architecture='amd64' fi -sed -i "/^Architecture/s/:.*$/: ${architecture}/g" tracing/DEBIAN/control +sed -i "/^Architecture/s/:.*$/: ${architecture}/g" $BUILD_DIR/tracing/DEBIAN/control # remove old data -rm -rf ./output +rm -rf $BUILD_DIR/output # Updating the files -mkdir -p tracing/opt/opsramp/tracing-proxy/bin -mkdir -p tracing/opt/opsramp/tracing-proxy/conf -mkdir -p tracing/etc/systemd/system +mkdir -p $BUILD_DIR/tracing/opt/opsramp/tracing-proxy/bin +mkdir -p $BUILD_DIR/tracing/opt/opsramp/tracing-proxy/conf +mkdir -p $BUILD_DIR/tracing/etc/systemd/system -cp -r ../package_directories/* tracing/ +cp -r $BUILD_DIR/../package_directories/* $BUILD_DIR/tracing/ # Building a static binaries CGO_ENABLED=0 \ GOOS=linux \ GOARCH=amd64 \ go build -ldflags "-X main.BuildID=${Version}" \ - -o tracing-proxy \ - ../../../cmd/tracing-proxy + -o $BUILD_DIR/tracing-proxy \ + $BUILD_DIR/../../../cmd/tracing-proxy/main.go CGO_ENABLED=0 \ GOOS=linux \ GOARCH=amd64 \ go build -ldflags "-X main.BuildID=${Version}" \ - -o configure \ - ../configure.go + -o $BUILD_DIR/configure \ + $BUILD_DIR/../configure.go -cp tracing-proxy tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy -cp configure tracing/opt/opsramp/tracing-proxy/bin/configure +cp $BUILD_DIR/tracing-proxy $BUILD_DIR/tracing/opt/opsramp/tracing-proxy/bin/tracing-proxy +cp $BUILD_DIR/configure $BUILD_DIR/tracing/opt/opsramp/tracing-proxy/bin/configure -dpkg -b tracing +dpkg -b $BUILD_DIR/tracing # Rename the package with version and architecture packageName="tracing-proxy_"${architecture}"-"${Version}".deb" -mkdir -p ./output -mv tracing.deb ./output/"${packageName}" +mkdir -p $BUILD_DIR/output +mv $BUILD_DIR/tracing.deb $BUILD_DIR/output/"${packageName}" # Cleanup -rm -rf ./tracing/opt -rm -rf ./tracing/etc -rm -rf configure tracing-proxy +rm -rf $BUILD_DIR/tracing/opt +rm -rf $BUILD_DIR/tracing/etc +rm -rf $BUILD_DIR/configure $BUILD_DIR/tracing-proxy diff --git a/build/vm/tracing-rpm/script.sh b/build/vm/tracing-rpm/script.sh index 90136ed275..36d46b1d19 100755 --- a/build/vm/tracing-rpm/script.sh +++ b/build/vm/tracing-rpm/script.sh @@ -1,37 +1,48 @@ +#!/bin/bash + yum -y install rpmdevtools rpmdev-setuptree +BUILD_DIR="." + +if [ "$IS_GITHUB_ACTION" = "true" ]; then + BUILD_DIR="build/vm/tracing-rpm" +fi + Release=$(uname -m) -sed -i "/^\%define release/s/^.*$/\%define release ${Release}/g" tracing-proxy.spec +sed -i "/^\%define release/s/^.*$/\%define release ${Release}/g" $BUILD_DIR/tracing-proxy.spec # $1 is a version of the package Version=$1 -sed -i "/^\%define version/s/^.*$/\%define version ${Version}/g" tracing-proxy.spec +if [[ -z "$Version" ]]; then + Version=$VERSION_TAG +fi +sed -i "/^\%define version/s/^.*$/\%define version ${Version}/g" $BUILD_DIR/tracing-proxy.spec # Building a static binaries CGO_ENABLED=0 \ GOOS=linux \ GOARCH=amd64 \ go build -ldflags "-X main.BuildID=${Version}" \ - -o tracing-proxy \ - ../../../cmd/tracing-proxy + -o $BUILD_DIR/tracing-proxy \ + $BUILD_DIR/../../../cmd/tracing-proxy/main.go CGO_ENABLED=0 \ GOOS=linux \ GOARCH=amd64 \ go build -ldflags "-X main.BuildID=${Version}" \ - -o configure \ - ../configure.go + -o $BUILD_DIR/configure \ + $BUILD_DIR/../configure.go -package_name="tracing-proxy-${1}" +package_name="tracing-proxy-${Version}" mkdir -p ${package_name}/opt/opsramp/tracing-proxy/bin/ -cp -r ../package_directories/* ${package_name} -mv configure ${package_name}/opt/opsramp/tracing-proxy/bin/configure -mv tracing-proxy ${package_name}/opt/opsramp/tracing-proxy/bin/tracing-proxy +cp -r $BUILD_DIR/../package_directories/* ${package_name} +mv $BUILD_DIR/configure ${package_name}/opt/opsramp/tracing-proxy/bin/configure +mv $BUILD_DIR/tracing-proxy ${package_name}/opt/opsramp/tracing-proxy/bin/tracing-proxy tar -czvf ${package_name}.tar.gz ${package_name} mv ${package_name}.tar.gz /root/rpmbuild/SOURCES/ -cp tracing-proxy.spec /root/rpmbuild/SPECS/tracing-proxy.spec +cp $BUILD_DIR/tracing-proxy.spec /root/rpmbuild/SPECS/tracing-proxy.spec rpmbuild -ba --clean /root/rpmbuild/SPECS/tracing-proxy.spec @@ -39,4 +50,4 @@ echo "***** rpm package can be found in /root/rpmbuild/RPMS/x86_64/ Date: Tue, 23 May 2023 23:46:48 +0530 Subject: [PATCH 330/351] Create codeql.yml --- .github/workflows/codeql.yml | 76 ++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 .github/workflows/codeql.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000000..1b338b0d9d --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,76 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "main" ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "main" ] + schedule: + - cron: '28 11 * * 2' + +jobs: + analyze: + name: Analyze + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{matrix.language}}" From 26f0130da67f2eca611ee1151f73e426f4fca41f Mon Sep 17 00:00:00 2001 From: LokeshOpsramp <59053467+LokeshOpsramp@users.noreply.github.com> Date: Wed, 24 May 2023 10:45:14 +0530 Subject: [PATCH 331/351] Github Action for Building and Publishing OCI Container Image * github action for building and publishing OCI Container Image --- .github/workflows/release.yml | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 667eb963af..6454ef1fd4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -45,4 +45,30 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: - args: "${{ github.workspace }}/output/*" \ No newline at end of file + args: "${{ github.workspace }}/output/*" + + oci-container-image: + runs-on: ubuntu-latest + steps: + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: LokeshOpsramp + password: ${{ secrets.SUPERSECRETPASSWORD }} + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: ghcr.io/LokeshOpsramp/trace-proxy + tags: | + type=semver,pattern=${{ github.event.release.tag_name }} + - name: Build and push + uses: docker/build-push-action@v4 + with: + push: true + tags: ${{ steps.meta.outputs.tags }} \ No newline at end of file From f350908809b5a1072006735e6292b3bc0f1e02fc Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 24 May 2023 11:10:27 +0530 Subject: [PATCH 332/351] github action for notifying gchat --- .github/workflows/notify.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/notify.yml diff --git a/.github/workflows/notify.yml b/.github/workflows/notify.yml new file mode 100644 index 0000000000..6c7c43dce3 --- /dev/null +++ b/.github/workflows/notify.yml @@ -0,0 +1,21 @@ +name: Notify +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + release: + types: [ published ] + +jobs: + notify: + name: Notify via Google Chat + runs-on: ubuntu-latest + steps: + - name: Google Chat Notification + uses: nakamuraos/google-chat-notifications@v2.0.1 + with: + title: Build + webhookUrl: ${{ secrets.GOOGLE_CHAT_WEBHOOK }} + status: ${{ job.status }} + if: always() \ No newline at end of file From d4ba93c629edda228f1cf6f6e046b7bd26d2e982 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 24 May 2023 11:20:38 +0530 Subject: [PATCH 333/351] github action for notifying gchat --- .github/workflows/notify.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/notify.yml b/.github/workflows/notify.yml index 6c7c43dce3..920520f108 100644 --- a/.github/workflows/notify.yml +++ b/.github/workflows/notify.yml @@ -15,7 +15,8 @@ jobs: - name: Google Chat Notification uses: nakamuraos/google-chat-notifications@v2.0.1 with: - title: Build + title: ${{ github.event_name }} + subtitle: ${{ github.event.head_commit.message }} webhookUrl: ${{ secrets.GOOGLE_CHAT_WEBHOOK }} status: ${{ job.status }} if: always() \ No newline at end of file From c1b3b14accbd9f036de57b9c1dbd9075dbc8b0e2 Mon Sep 17 00:00:00 2001 From: saikalyan-bhagavathula <103252261+saikalyan-bhagavathula@users.noreply.github.com> Date: Wed, 31 May 2023 15:40:20 +0530 Subject: [PATCH 334/351] addedd trace_operations_error metric (#13) addedd trace_operations_error metric --- collect/collect.go | 24 +++---------------- metrics/opsramp.go | 57 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 21 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index 3842cf5b13..471d16e35a 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -165,28 +165,10 @@ func (i *InMemCollector) Start() error { []string{"service_name", "operation", "app"}, ) i.Metrics.RegisterWithDescriptionLabels( - "trace_root_span", - "counter", - "Number of root spans in an operation", - []string{"service_name", "operation"}, - ) - i.Metrics.RegisterWithDescriptionLabels( - "trace_spans_count", - "counter", - "Number of spans in an operation", - []string{"service_name", "operation"}, - ) - i.Metrics.RegisterWithDescriptionLabels( - "trace_root_operation_latency_ms", + "trace_operations_error", "gauge", - "Trace latency wrt each root trace operation", - []string{"service_name", "operation"}, - ) - i.Metrics.RegisterWithDescriptionLabels( - "trace_root_operations_failed", - "counter", - "Number of Error events in root spans wrt each trace operation", - []string{"service_name", "operation"}, + "Trace errors wrt each trace operation / trace_span_count", + []string{"service_name", "operation", "app"}, ) sampleCacheConfig := i.Config.GetSampleCacheConfig() diff --git a/metrics/opsramp.go b/metrics/opsramp.go index 99133a4b58..a5ae85ecb8 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -17,6 +17,7 @@ import ( "net/url" "os" "regexp" + "sort" "strings" "sync" "time" @@ -353,12 +354,68 @@ func (p *OpsRampMetrics) Populate() { } } +func ConvertLabelsToMap(labels []prompb.Label) map[string]string { + labelMap := make(map[string]string) + for _, label := range labels { + labelMap[label.Name] = label.Value + } + return labelMap +} + +func (p *OpsRampMetrics) calculateTraceOperationError(metricFamilySlice []*io_prometheus_client.MetricFamily) { + var labelMap map[string]string + uniqueLabelsMap := make(map[string][]prompb.Label) + uniqueFailedMap := make(map[string]float64) + uniqueSpansMap := make(map[string]float64) + for _, metricFamily := range metricFamilySlice { + if !p.re.MatchString(metricFamily.GetName()) { + continue + } + if metricFamily.GetName() == "trace_operations_failed" || metricFamily.GetName() == "trace_spans_count" { + for _, metric := range metricFamily.GetMetric() { + var labels []prompb.Label + for _, label := range metric.GetLabel() { + labels = append(labels, prompb.Label{ + Name: label.GetName(), + Value: label.GetValue(), + }) + } + key := "trace_operations_failed&trace_spans_count&" + labelSlice := metric.GetLabel() + sort.Slice(labelSlice, func(i, j int) bool { + return labelSlice[i].GetName()+labelSlice[i].GetValue() > labelSlice[j].GetName()+labelSlice[i].GetValue() + }) + for _, label := range labelSlice { + key += label.GetName() + label.GetValue() + } + if metricFamily.GetName() == "trace_operations_failed" { + uniqueFailedMap[key] = *metric.Counter.Value + } else { + uniqueSpansMap[key] = *metric.Counter.Value + } + uniqueLabelsMap[key] = labels + } + } + } + for key, _ := range uniqueLabelsMap { + labelMap = ConvertLabelsToMap(uniqueLabelsMap[key]) + p.GaugeWithLabels("trace_operations_error", labelMap, uniqueFailedMap[key]/uniqueSpansMap[key]) + } +} + func (p *OpsRampMetrics) Push() (int, error) { metricFamilySlice, err := p.promRegistry.Gather() if err != nil { return -1, err } + p.calculateTraceOperationError(metricFamilySlice) + + metricFamilySlice, err = p.promRegistry.Gather() + if err != nil { + return -1, err + } + presentTime := time.Now().UnixMilli() var timeSeries []prompb.TimeSeries From 5e5fd70e5abd3af1b7a13986c74b5ab1b2789c5f Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Thu, 1 Jun 2023 17:58:43 +0530 Subject: [PATCH 335/351] send msg to stop channel when max time is reached in exponential backoff in libtrace-go --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7db60a7b1a..e4ac334f3c 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c - github.com/opsramp/libtrace-go v0.0.0-20230515040030-89d6ab9f4816 + github.com/opsramp/libtrace-go v0.0.0-20230601122019-04ce9e86c2aa github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index c1d24cd71d..a80577460a 100644 --- a/go.sum +++ b/go.sum @@ -581,8 +581,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c h1:FhNFobufJrjU1/E/5LCiZk3IZRbrqk/5gnTz5iZTNQg= github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230515040030-89d6ab9f4816 h1:mutPEtUsbD2VPDh4Q3pMUfXaat6o7KLU0/72wvPhpvM= -github.com/opsramp/libtrace-go v0.0.0-20230515040030-89d6ab9f4816/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/libtrace-go v0.0.0-20230601122019-04ce9e86c2aa h1:J5v6xBo4YwrXjuqtzKMhfZi6pkqdJZVMPkZlOAfMF/k= +github.com/opsramp/libtrace-go v0.0.0-20230601122019-04ce9e86c2aa/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= From 1bcdf64eea325a469bc193060544380b28ce56d4 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 7 Jun 2023 14:59:15 +0530 Subject: [PATCH 336/351] setting default value when service_name is empty --- go.mod | 4 ++-- go.sum | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index e4ac334f3c..ecf5534bc2 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 - github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c - github.com/opsramp/libtrace-go v0.0.0-20230601122019-04ce9e86c2aa + github.com/opsramp/husky v0.0.0-20230607092333-7d335e45a6d7 + github.com/opsramp/libtrace-go v0.0.0-20230607092128-33a542d473fc github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index a80577460a..aaec198abd 100644 --- a/go.sum +++ b/go.sum @@ -367,7 +367,6 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -579,10 +578,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c h1:FhNFobufJrjU1/E/5LCiZk3IZRbrqk/5gnTz5iZTNQg= -github.com/opsramp/husky v0.0.0-20230420114859-538fa960313c/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230601122019-04ce9e86c2aa h1:J5v6xBo4YwrXjuqtzKMhfZi6pkqdJZVMPkZlOAfMF/k= -github.com/opsramp/libtrace-go v0.0.0-20230601122019-04ce9e86c2aa/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/husky v0.0.0-20230607092333-7d335e45a6d7 h1:5DYQxOiNfPVn0Uu127402O+4Gz+TYq7p1XxC5PrKepw= +github.com/opsramp/husky v0.0.0-20230607092333-7d335e45a6d7/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= +github.com/opsramp/libtrace-go v0.0.0-20230607092128-33a542d473fc h1:9PWrSlAQaBTufgCZX6YqX5lRYmxux5SYs3JtliaPLCc= +github.com/opsramp/libtrace-go v0.0.0-20230607092128-33a542d473fc/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= From f0fcd95308e7144e8e184993482403a7aba10ab2 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Fri, 9 Jun 2023 18:21:15 +0530 Subject: [PATCH 337/351] fix app label and service name for metrics --- collect/collect.go | 19 +++++++++++++++---- go.mod | 4 ++-- go.sum | 8 ++++---- metrics/metrics.go | 41 +++++++++++++++++++++-------------------- 4 files changed, 42 insertions(+), 30 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index 471d16e35a..c5d1619690 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -634,10 +634,21 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { continue } - labelToKeyMap := map[string]string{ - "service_name": "service.name", - "operation": "spanName", - "app": "app", + resAttr, ok := span.Data[resourceAttributesKey].(map[string]interface{}) + if !ok { + resAttr = map[string]interface{}{} + } + for key, value := range i.Config.GetAddAdditionalMetadata() { + if _, ok := resAttr[key]; !ok { + resAttr[key] = value + } + } + span.Data[resourceAttributesKey] = resAttr + + labelToKeyMap := map[string][]string{ + "service_name": {"service_name", "service.name"}, + "operation": {"spanName"}, + "app": {"app"}, } labels := metrics.ExtractLabelsFromSpan(span, labelToKeyMap) diff --git a/go.mod b/go.mod index ecf5534bc2..5bd41398a5 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.15.12 - github.com/opsramp/husky v0.0.0-20230607092333-7d335e45a6d7 - github.com/opsramp/libtrace-go v0.0.0-20230607092128-33a542d473fc + github.com/opsramp/husky v0.0.0-20230609124403-2da97ba6e273 + github.com/opsramp/libtrace-go v0.0.0-20230609124256-55d8d4e593ea github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index aaec198abd..c090e8127b 100644 --- a/go.sum +++ b/go.sum @@ -578,10 +578,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opsramp/husky v0.0.0-20230607092333-7d335e45a6d7 h1:5DYQxOiNfPVn0Uu127402O+4Gz+TYq7p1XxC5PrKepw= -github.com/opsramp/husky v0.0.0-20230607092333-7d335e45a6d7/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230607092128-33a542d473fc h1:9PWrSlAQaBTufgCZX6YqX5lRYmxux5SYs3JtliaPLCc= -github.com/opsramp/libtrace-go v0.0.0-20230607092128-33a542d473fc/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/husky v0.0.0-20230609124403-2da97ba6e273 h1:T5FmJZ5McufpKgX4fqleen0PJtyDwpWFNJcvr5ff1xQ= +github.com/opsramp/husky v0.0.0-20230609124403-2da97ba6e273/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= +github.com/opsramp/libtrace-go v0.0.0-20230609124256-55d8d4e593ea h1:UtVFx3EkflrNlw467lBHu6qcaS6+NKg9PZZRvzjO9eo= +github.com/opsramp/libtrace-go v0.0.0-20230609124256-55d8d4e593ea/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= diff --git a/metrics/metrics.go b/metrics/metrics.go index 477a0ad71f..8440629522 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -52,35 +52,36 @@ func ConvertNumeric(val interface{}) float64 { } } -func ExtractLabelsFromSpan(span *types.Span, labelToKeyMap map[string]string) map[string]string { +func ExtractLabelsFromSpan(span *types.Span, labelToKeyMap map[string][]string) map[string]string { labels := map[string]string{} attributeMapKeys := []string{"spanAttributes", "resourceAttributes", "eventAttributes"} - for labelName, searchKey := range labelToKeyMap { - - // check of the higher level first - searchValue, exists := span.Data[searchKey] - if exists && searchValue != nil { - labels[labelName] = searchValue.(string) - continue - } + for labelName, searchKeys := range labelToKeyMap { + for _, searchKey := range searchKeys { + // check of the higher level first + searchValue, exists := span.Data[searchKey] + if exists && searchValue != nil { + labels[labelName] = searchValue.(string) + continue + } - // check in the span, resource and event attributes when key is not found - for _, attributeKey := range attributeMapKeys { - if attribute, ok := span.Data[attributeKey]; ok && attribute != nil { - searchValue, exists = attribute.(map[string]interface{})[searchKey] - if exists && searchValue != nil { - labels[labelName] = searchValue.(string) - break + // check in the span, resource and event attributes when key is not found + for _, attributeKey := range attributeMapKeys { + if attribute, ok := span.Data[attributeKey]; ok && attribute != nil { + searchValue, exists = attribute.(map[string]interface{})[searchKey] + if exists && searchValue != nil { + labels[labelName] = searchValue.(string) + break + } } } - } - // if the key does not exist then set it to empty - if !exists { - labels[labelName] = "" + // if the key does not exist then set it to empty + if !exists { + labels[labelName] = "" + } } } From 488b70e2cb1cafc47df2f031d0b7949a88f8b720 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Tue, 13 Jun 2023 13:11:49 +0530 Subject: [PATCH 338/351] updating version --- Dockerfile | 2 +- build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml | 4 ++-- .../opt/opsramp/tracing-proxy/conf/config_complete.yaml | 2 +- config_complete.yaml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index ccd4b6f951..f59642070b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM golang:alpine as builder RUN apk update && apk add --no-cache git bash ca-certificates && update-ca-certificates -ARG BUILD_ID="1.1.0" +ARG BUILD_ID="15.0.0-1" WORKDIR /app diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml index b2fe70359b..da7c7224c3 100644 --- a/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml +++ b/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml @@ -7,10 +7,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.0 +version: 15.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.0.0" \ No newline at end of file +appVersion: "15.0.0-1" \ No newline at end of file diff --git a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml index abfc5594dd..2087f8ba92 100644 --- a/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml +++ b/build/vm/package_directories/opt/opsramp/tracing-proxy/conf/config_complete.yaml @@ -325,7 +325,7 @@ LogrusLogger: MetricsConfig: # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp - Enable: false + Enable: true # ListenAddr determines the interface and port on which Prometheus will # listen for requests for /metrics. Must be different from the main Trace Proxy diff --git a/config_complete.yaml b/config_complete.yaml index 3eaaea5340..1a17e715c9 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -10,7 +10,7 @@ ListenAddr: 0.0.0.0:8082 # GRPCListenAddr is the IP and port on which to listen for incoming events over # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in # front to do the TLS Termination. -GRPCListenAddr: 0.0.0.0:4317 +GRPCListenAddr: 0.0.0.0:9090 # PeerListenAddr is the IP and port on which to listen for traffic being # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL From 66d8031ae6835c881e8753c3a2fbc9082d79df2a Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Tue, 13 Jun 2023 14:49:04 +0530 Subject: [PATCH 339/351] updating version --- Dockerfile | 2 +- build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index f59642070b..82dab3cfe7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM golang:alpine as builder RUN apk update && apk add --no-cache git bash ca-certificates && update-ca-certificates -ARG BUILD_ID="15.0.0-1" +ARG BUILD_ID="15.0.0" WORKDIR /app diff --git a/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml b/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml index da7c7224c3..a4706d35f3 100644 --- a/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml +++ b/build/kubernetes/helm/opsramp-tracing-proxy/Chart.yaml @@ -13,4 +13,4 @@ version: 15.0.0 # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "15.0.0-1" \ No newline at end of file +appVersion: "15.0.0" \ No newline at end of file From 5dd318991f3d32d0f3239b52bd8258c9ce4d7d26 Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Wed, 19 Jul 2023 09:39:15 +0530 Subject: [PATCH 340/351] fixing race condition in opsramp metrics --- metrics/opsramp.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/metrics/opsramp.go b/metrics/opsramp.go index a5ae85ecb8..354ecb7387 100644 --- a/metrics/opsramp.go +++ b/metrics/opsramp.go @@ -34,9 +34,10 @@ const ( ) var ( - muxer *mux.Router - server *http.Server - hostname string + muxer *mux.Router + server *http.Server + serverMut sync.Mutex + hostname string ) func init() { @@ -104,13 +105,17 @@ func (p *OpsRampMetrics) Start() error { p.Logger.Error().Logf("metrics server shutdown: %v", err) } } + serverMut.Lock() server = &http.Server{ Addr: metricsConfig.ListenAddr, Handler: muxer, ReadHeaderTimeout: 10 * time.Second, } go func() { - server.ListenAndServe() + defer serverMut.Unlock() + if err := server.ListenAndServe(); err != http.ErrServerClosed { + p.Logger.Error().Logf("%v", err) + } }() if p.Config.GetSendMetricsToOpsRamp() { From e5c815002df776c35b2a3144004440ccee58cdfc Mon Sep 17 00:00:00 2001 From: Lokesh-Balla Date: Wed, 19 Jul 2023 21:14:44 +0530 Subject: [PATCH 341/351] [ITOM-76287] - fixing race conditions in auth, additional attributes etc --- collect/collect.go | 24 +--------------------- go.mod | 24 ++++++++++++---------- go.sum | 50 ++++++++++++++++++++++++++-------------------- route/route.go | 29 +++++++++++++++++++++++++++ 4 files changed, 71 insertions(+), 56 deletions(-) diff --git a/collect/collect.go b/collect/collect.go index c5d1619690..d4d53b03f0 100644 --- a/collect/collect.go +++ b/collect/collect.go @@ -634,17 +634,6 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { continue } - resAttr, ok := span.Data[resourceAttributesKey].(map[string]interface{}) - if !ok { - resAttr = map[string]interface{}{} - } - for key, value := range i.Config.GetAddAdditionalMetadata() { - if _, ok := resAttr[key]; !ok { - resAttr[key] = value - } - } - span.Data[resourceAttributesKey] = resAttr - labelToKeyMap := map[string][]string{ "service_name": {"service_name", "service.name"}, "operation": {"spanName"}, @@ -740,20 +729,9 @@ func (i *InMemCollector) send(trace *types.Trace, reason string) { sp.Data[field] = shouldSend } - resAttr, ok := sp.Data[resourceAttributesKey].(map[string]interface{}) - if !ok { - resAttr = map[string]interface{}{} - } - for key, value := range i.Config.GetAddAdditionalMetadata() { - if _, ok := resAttr[key]; !ok { - resAttr[key] = value - } - } - if i.hostname != "" { - resAttr["meta.local_hostname"] = i.hostname + sp.Data["meta.local_hostname"] = i.hostname } - sp.Data[resourceAttributesKey] = resAttr mergeTraceAndSpanSampleRates(sp, trace.SampleRate) i.Transmission.EnqueueSpan(sp) } diff --git a/go.mod b/go.mod index 5bd41398a5..98a7412b30 100644 --- a/go.mod +++ b/go.mod @@ -16,9 +16,9 @@ require ( github.com/honeycombio/dynsampler-go v0.2.1 github.com/jessevdk/go-flags v1.5.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.15.12 - github.com/opsramp/husky v0.0.0-20230609124403-2da97ba6e273 - github.com/opsramp/libtrace-go v0.0.0-20230609124256-55d8d4e593ea + github.com/klauspost/compress v1.16.7 + github.com/opsramp/husky v0.0.0-20230719151104-01eeb1b7e530 + github.com/opsramp/libtrace-go v0.0.0-20230719150918-e2ba67c0f350 github.com/panmari/cuckoofilter v1.0.3 github.com/pelletier/go-toml/v2 v2.0.5 github.com/pkg/errors v0.9.1 @@ -32,7 +32,7 @@ require ( github.com/stretchr/testify v1.8.1 github.com/tidwall/gjson v1.14.3 github.com/vmihailenco/msgpack/v5 v5.3.5 - google.golang.org/grpc v1.51.0 + google.golang.org/grpc v1.56.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -48,8 +48,8 @@ require ( github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect github.com/go-playground/locales v0.13.0 // indirect github.com/go-playground/universal-translator v0.17.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/leodido/go-urn v1.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect @@ -68,11 +68,13 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect - google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect - google.golang.org/protobuf v1.28.1 // indirect + golang.org/x/net v0.12.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/alexcesaro/statsd.v2 v2.0.0 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index c090e8127b..0fcb752db8 100644 --- a/go.sum +++ b/go.sum @@ -367,7 +367,6 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -441,8 +440,8 @@ github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+Pugk github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -471,8 +470,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -536,8 +536,9 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0 h1:t7uX3JBHdVwAi3G7sSSdbsk8NfgA+LnUS88V/2EKaA0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.14.0/go.mod h1:4OGVnY4qf2+gw+ssiHbW+pq4mo2yko94YxxMmXZ7jCA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -557,11 +558,11 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= -github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -578,10 +579,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opsramp/husky v0.0.0-20230609124403-2da97ba6e273 h1:T5FmJZ5McufpKgX4fqleen0PJtyDwpWFNJcvr5ff1xQ= -github.com/opsramp/husky v0.0.0-20230609124403-2da97ba6e273/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= -github.com/opsramp/libtrace-go v0.0.0-20230609124256-55d8d4e593ea h1:UtVFx3EkflrNlw467lBHu6qcaS6+NKg9PZZRvzjO9eo= -github.com/opsramp/libtrace-go v0.0.0-20230609124256-55d8d4e593ea/go.mod h1:XvyEnnTyL+klFisHWbLeBAihYq7b1QBF90iLCcns0cQ= +github.com/opsramp/husky v0.0.0-20230719151104-01eeb1b7e530 h1:1lgA35HukLuhLrONDpHvmnrlGdmozMD5oju1Pk2RLgo= +github.com/opsramp/husky v0.0.0-20230719151104-01eeb1b7e530/go.mod h1:GzTlIB+x7FULGr/mPWJNWMBd+7mrGHzhB9sMdaIyRUw= +github.com/opsramp/libtrace-go v0.0.0-20230719150918-e2ba67c0f350 h1:h2W9jda/cvoiir4kyJRGlaMb6+aeAASInkVE7+ZCP7M= +github.com/opsramp/libtrace-go v0.0.0-20230719150918-e2ba67c0f350/go.mod h1:yn9rTiwFOqvh/3VqS5jaIo1vmYf/Mast5jptJz3GJvU= github.com/panmari/cuckoofilter v1.0.3 h1:MgTxXG2aP0YPWFyY1sKt1caWidUFREk9BaOnakDKZOU= github.com/panmari/cuckoofilter v1.0.3/go.mod h1:O7+ZOHxwlADJ1So2/ZsKBExDwILNPZsyt77zN0ZTBLg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -754,8 +755,8 @@ golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -864,8 +865,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -880,8 +881,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1111,8 +1112,12 @@ google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnp google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= +google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM= +google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE= +google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1148,8 +1153,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1165,8 +1170,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/route/route.go b/route/route.go index d6d75e524a..13e7a44236 100644 --- a/route/route.go +++ b/route/route.go @@ -51,8 +51,15 @@ const ( traceIDLongLength = 16 GRPCMessageSizeMax int = 5000000 // 5MB defaultSampleRate = 1 + + resourceAttributesKey = "resourceAttributes" + spanAttributesKey = "spanAttributes" + eventAttributesKey = "eventAttributes" + unknownService = "unknown_service" ) +var possibleServiceNames = []string{"service_name", "service.name"} + type Router struct { Config config.Config `inject:""` Logger logger.Logger `inject:""` @@ -519,6 +526,28 @@ func (r *Router) processEvent(ev *types.Event, reqID interface{}) error { WithString("dataset", ev.Dataset). WithString("environment", ev.Environment) + // adding additional attributes to resource attributes + resAttr, ok := ev.Data[resourceAttributesKey].(map[string]interface{}) + if !ok { + resAttr = map[string]interface{}{} + } + for key, value := range r.Config.GetAddAdditionalMetadata() { + if _, ok := resAttr[key]; !ok { + resAttr[key] = value + } + } + isUnknownService := true + for _, key := range possibleServiceNames { + if _, ok := resAttr[key]; ok { + isUnknownService = false + break + } + } + if isUnknownService { + resAttr[possibleServiceNames[0]] = unknownService + } + ev.Data[resourceAttributesKey] = resAttr + // extract trace ID, route to self or peer, pass on to collector // TODO make trace ID field configurable var traceID string From 74fa88cdc74ce58806b1faf40ff18e889cf876b6 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Fri, 11 Aug 2023 06:10:16 +0530 Subject: [PATCH 342/351] Updating ReadMe.md --- README.md | 213 +++++++++--------------------------------------------- 1 file changed, 36 insertions(+), 177 deletions(-) diff --git a/README.md b/README.md index 0ebc57d0d5..8cc1db892d 100644 --- a/README.md +++ b/README.md @@ -1,187 +1,46 @@ -# tracing-proxy - the Honeycomb Sampling Proxy - -![tracing-proxy](https://user-images.githubusercontent.com/6510988/94976958-8cadba80-04cb-11eb-9883-6e8ea554a081.png) - -[![OSS Lifecycle](https://img.shields.io/osslifecycle/honeycombio/tracing-proxy?color=success)](https://github.com/jirs5/home/blob/main/honeycomb-oss-lifecycle-and-practices.md) -[![Build Status](https://circleci.com/gh/honeycombio/tracing-proxy.svg?style=shield)](https://circleci.com/gh/honeycombio/tracing-proxy) +# Tracing-Proxy - Sampling Proxy For OpenTelemetry Traces + +[![Go Report Card](https://goreportcard.com/badge/github.com/opsramp/tracing-proxy)](https://goreportcard.com/report/github.com/opsramp/tracing-proxy) +

+GitHub go.mod Go version +GitHub release (latest by date) +GoDoc +GitHub +GitHub Workflow Status (with branch) +GitHub repo size +

## Purpose -tracing-proxy is a trace-aware sampling proxy. It collects spans emitted by your application, gathers them into traces, and examines them as a whole. This enables tracing-proxy to make an intelligent sampling decision (whether to keep or discard) based on the entire trace. Buffering the spans allows you to use fields that might be present in different spans within the trace to influence the sampling decision. For example, the root span might have HTTP status code, whereas another span might have information on whether the request was served from a cache. Using tracing-proxy, you can choose to keep only traces that had a 500 status code and were also served from a cache. - -## Setting up tracing-proxy - -tracing-proxy is designed to sit within your infrastructure where all sources of Honeycomb events (aka spans if you're doing tracing) can reach it. -A standard deployment will have a cluster of two or more tracing-proxy processes accessible via a separate load balancer. -tracing-proxy processes must be able to communicate with each other to concentrate traces on single servers. - -Within your application (or other Honeycomb event sources) you would configure the `API Host` to be http(s)://load-balancer/. Everything else remains the same (api key, dataset name, etc. - all that lives with the originating client). +Tracing-Proxy is a trace-aware sampling proxy. It collects spans emitted by your application, gathers them into traces, +and examines them as a whole. This enables the proxy to make an intelligent sampling decision (whether to keep or +discard) based on the entire trace. Buffering the spans allows you to use fields that might be present in different +spans within the trace to influence the sampling decision. For example, the root span might have HTTP status code, +whereas another span might have information on whether the request was served from a cache. Using this proxy, you can +choose to keep only traces that had a 500 status code and were also served from a cache. ### Minimum configuration -The tracing-proxy cluster should have at least 2 servers with 2GB RAM and access to 2 cores each. - -Additional RAM and CPU can be used by increasing configuration values to have a larger `CacheCapacity`. The cluster should be monitored for panics caused by running out of memory and scaled up (with either more servers or more RAM per server) when they occur. - -### Builds - -tracing-proxy is built by [CircleCI](https://circleci.com/gh/honeycombio/tracing-proxy). Released versions of tracing-proxy are available via Github under the Releases tab. - -## Configuration - -Configuration is done in one of two ways, either entirely by the config file or a combination of the config file and a Redis service for managing the list of peers in the cluster. -When using Redis, it only manages peers; all other configuration remains managed by the config file. - -There are a few vital configuration options; read through this list and make sure all the variables are set. - -### File-based Config - -- API Keys: tracing-proxy itself needs to be configured with a list of your API keys. This lets it respond with a 401/Unauthorized if an unexpected API key is used. You can configure tracing-proxy to accept all API keys by setting it to `*` but then you will lose the authentication feedback to your application. tracing-proxy will accept all events even if those events will eventually be rejected by the Honeycomb API due to an API key issue. - -- Goal Sample Rate and the list of fields you'd like to use to generate the keys off which sample rate is chosen. This is where the power of the proxy comes in - being able to dynamically choose sample rates based on the contents of the traces as they go by. There is an overall default and dataset-specific sections for this configuration, so that different datasets can have different sets of fields and goal sample rates. - -- Trace timeout - it should be set higher (maybe double?) the longest expected trace. If all of your traces complete in under 10 seconds, 30 is a good value here. If you have traces that can last minutes, it should be raised accordingly. Note that the trace doesn't _have_ to complete before this timer expires - but the sampling decision will be made at that time. So any spans that contain fields that you want to use to compute the sample rate should arrive before this timer expires. Additional spans that arrive after the timer has expired will be sent or dropped according to the sampling decision made when the timer expired. - -- Peer list: this is a list of all the other servers participating in this tracing-proxy cluster. Traces are evenly distributed across all available servers, and any one trace must be concentrated on one server, regardless of which server handled the incoming spans. The peer list lets the cluster move spans around to the server that is handling the trace. (Not used in the Redis-based config.) - -- Buffer size: The `InMemCollector`'s `CacheCapacity` setting determines how many in-flight traces you can have. This should be large enough to avoid overflow. Some multiple (2x, 3x) the total number of in-flight traces you expect is a good place to start. If it's too low you will see the `collector_cache_buffer_overrun` metric increment. If you see that, you should increase the size of the buffer. - -There are a few components of tracing-proxy with multiple implementations; the config file lets you choose which you'd like. As an example, there are two logging implementations - one that uses `logrus` and sends logs to STDOUT and a `honeycomb` implementation that sends the log messages to a Honeycomb dataset instead. Components with multiple implementations have one top level config item that lets you choose which implementation to use and then a section further down with additional config options for that choice (for example, the Honeycomb logger requires an API key). - -When configuration changes, tracing-proxy will automatically reload the configuration[^1]. - -[^1]: When running tracing-proxy within docker, be sure to mount the directory containing configuration & rules files so that [reloading will work](https://github.com/spf13/viper/issues/920) as expected. - -### Redis-based Peer Management - -With peer management in Redis, all config options _except_ peer management are still handled by the config file. -Only coordinating the list of peers in the tracing-proxy cluster is managed with Redis. - -To enable the redis-based config: - -- set PeerManagement.Type in the config file to "redis" - -When launched in redis-config mode, tracing-proxy needs a redis host to use for managing the list of peers in the tracing-proxy cluster. This hostname and port can be specified in one of two ways: - -- set the `REFINERY_REDIS_HOST` environment variable (and optionally the `REFINERY_REDIS_USERNAME` and `REFINERY_REDIS_PASSWORD` environment variables) -- set the `RedisHost` field in the config file (and optionally the `RedisUsername` and `RedisPassword` fields in the config file) - -The Redis host should be a hostname and a port, for example `redis.mydomain.com:6379`. The example config file has `localhost:6379` which obviously will not work with more than one host. When TLS is required to connect to the Redis instance, set the `UseTLS` config to `true`. - -By default, a tracing-proxy process will register itself in Redis using its local hostname as its identifier for peer communications. -In environments where domain name resolution is slow or unreliable, override the reliance on name lookups by specifying the name of the peering network interface with the `IdentifierInterfaceName` configuration option. -See the [tracing-proxy documentation](https://docs.honeycomb.io/manage-data-volume/tracing-proxy/) for more details on tuning a cluster. - - -### Mixing Classic and Environment & Services Rule Definitions - -With the change to support environemt and services in Honeycomb, some users will want to support both sending telemetry to a classic dataset and a new environment called the same thing (eg `production`). - -This can be accomplished by leveraging the new `DatasetPrefix` configuration property and then using that prefix in the rules definitions for the classic datasets. - -When Refinery receives telemetry using an API key associated to a classic dataset, it will then use the prefix in the form `{prefix}.{dataset}` when trying to resolve the rules definition. - -For example -config.toml -```toml -DatasetPrefix = "classic" -``` - -rules.toml -```toml -# default rules -Sampler = "DeterministicSampler" -SampleRate = 1 - - [production] # environment called "production" - Sampler = "DeterministicSampler" - SampleRate = 5 - - [classic.production] # dataset called "production" - Sampler = "DeterministicSampler" - SampleRate = 10 -``` +The Tracing-Proxy cluster should have at least 2 servers with 2GB RAM and access to 2 cores each. -## How sampling decisions are made - -In the configuration file, you can choose from a few sampling methods and specify options for each. The `DynamicSampler` is the most interesting and most commonly used. It uses the `AvgSampleRate` algorithm from the [`dynsampler-go`](https://github.com/honeycombio/dynsampler-go) package. Briefly described, you configure tracing-proxy to examine the trace for a set of fields (for example, `request.status_code` and `request.method`). It collects all the values found in those fields anywhere in the trace (eg "200" and "GET") together into a key it hands to the dynsampler. The dynsampler code will look at the frequency that key appears during the previous 30 seconds (or other value set by the `ClearFrequencySec` setting) and use that to hand back a desired sample rate. More frequent keys are sampled more heavily, so that an even distribution of traffic across the keyspace is represented in Honeycomb. - -By selecting fields well, you can drop significant amounts of traffic while still retaining good visibility into the areas of traffic that interest you. For example, if you want to make sure you have a complete list of all URL handlers invoked, you would add the URL (or a normalized form) as one of the fields to include. Be careful in your selection though, because if the combination of fields creates a unique key each time, you won't sample out any traffic. Because of this it is not effective to use fields that have unique values (like a UUID) as one of the sampling fields. Each field included should ideally have values that appear many times within any given 30 second window in order to effectively turn in to a sample rate. - -For more detail on how this algorithm works, please refer to the `dynsampler` package itself. - -## Dry Run Mode - -When getting started with tracing-proxy or when updating sampling rules, it may be helpful to verify that the rules are working as expected before you start dropping traffic. By enabling dry run mode, all spans in each trace will be marked with the sampling decision in a field called `tracing-proxy_kept`. All traces will be sent to Honeycomb regardless of the sampling decision. You can then run queries in Honeycomb on this field to check your results and verify that the rules are working as intended. Enable dry run mode by adding `DryRun = true` in your configuration, as noted in `rules_complete.toml`. - -When dry run mode is enabled, the metric `trace_send_kept` will increment for each trace, and the metric for `trace_send_dropped` will remain 0, reflecting that we are sending all traces to Honeycomb. +Additional RAM and CPU can be used by increasing configuration values to have a larger `CacheCapacity`. The cluster +should be monitored for panics caused by running out of memory and scaled up (with either more servers or more RAM per +server) when they occur. ## Scaling Up -tracing-proxy uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces exceeds the size of the buffer, things will start to go wrong. If you have statistics configured, a counter named `collector_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the throughput of traces (e.g. traces / second started) and multiply that by the maximum duration of a trace (say, 3 seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom. - -Determining the number of machines necessary in the cluster is not an exact science, and is best influenced by watching for buffer overruns. But for a rough heuristic, count on a single machine using about 2G of memory to handle 5000 incoming events and tracking 500 sub-second traces per second (for each full trace lasting less than a second and an average size of 10 spans per trace). - -## Understanding Regular Operation - -tracing-proxy emits a number of metrics to give some indication about the health of the process. These metrics can be exposed to Prometheus or sent up to Honeycomb. The interesting ones to watch are: - -- Sample rates: how many traces are kept / dropped, and what does the sample rate distribution look like? -- [incoming|peer]_router_\*: how many events (no trace info) vs. spans (have trace info) have been accepted, and how many sent on to peers? -- collector_cache_buffer_overrun: this should remain zero; a positive value indicates the need to grow the size of the collector's circular buffer (via configuration `CacheCapacity`). -- process_uptime_seconds: records the uptime of each process; look for unexpected restarts as a key towards memory constraints. - -## Troubleshooting - -### Logging - -The default logging level of `warn` is almost entirely silent. The `debug` level emits too much data to be used in production, but contains excellent information in a pre-production environment. Setting the logging level to `debug` during initial configuration will help understand what's working and what's not, but when traffic volumes increase it should be set to `warn`. - -### Configuration - -Because the normal configuration file formats (TOML and YAML) can sometimes be confusing to read and write, it may be valuable to check the loaded configuration by using one of the `/query` endpoints from the command line on a server that can access a refinery host. - -The `/query` endpoints are protected and can be enabled by specifying `QueryAuthToken` in the configuration file or specifying `REFINERY_QUERY_AUTH_TOKEN` in the environment. All requests to any `/query` endpoint must include the header `X-Honeycomb-Refinery-Query` set to the value of the specified token. - -`curl --include --get $REFINERY_HOST/query/allrules/$FORMAT --header "x-honeycomb-refinery-query: my-local-token"` will retrieve the entire rules configuration. - -`curl --include --get $REFINERY_HOST/query/rules/$FORMAT/$DATASET --header "x-honeycomb-refinery-query: my-local-token"` will retrieve the rule set that refinery will use for the specified dataset. It comes back as a map of the sampler type to its rule set. - -`curl --include --get $REFINERY_HOST/query/configmetadata --header "x-honeycomb-refinery-query: my-local-token"` will retrieve information about the configurations currently in use, including the timestamp when the configuration was last loaded. - -For file-based configurations (the only type currently supported), the `hash` value is identical to the value generated by the `md5sum` command for the given config file. - -For all of these commands: -- `$REFINERY_HOST` should be the url of your refinery. -- `$FORMAT` can be one of `json`, `yaml`, or `toml`. -- `$DATASET` is the name of the dataset you want to check. - -### Sampling - -Refinery can send telemetry that includes information that can help debug the sampling decisions that are made. To enable it, in the config file, set `AddRuleReasonToTrace` to `true`. This will cause traces that are sent to Honeycomb to include a field `meta.refinery.reason`, which will contain text indicating which rule was evaluated that caused the trace to be included. - -## Restarts - -tracing-proxy does not yet buffer traces or sampling decisions to disk. When you restart the process all in-flight traces will be flushed (sent upstream to Honeycomb), but you will lose the record of past trace decisions. When started back up, it will start with a clean slate. - -## Architecture of tracing-proxy itself (for contributors) - -Within each directory, the interface the dependency exports is in the file with the same name as the directory and then (for the most part) each of the other files are alternative implementations of that interface. For example, in `logger`, `/logger/logger.go` contains the interface definition and `logger/honeycomb.go` contains the implementation of the `logger` interface that will send logs to Honeycomb. - -`main.go` sets up the app and makes choices about which versions of dependency implementations to use (eg which logger, which sampler, etc.) It starts up everything and then launches `App` - -`app/app.go` is the main control point. When its `Start` function ends, the program shuts down. It launches two `Router`s which listen for incoming events. - -`route/route.go` listens on the network for incoming traffic. There are two routers running and they handle different types of incoming traffic: events coming from the outside world (the `incoming` router) and events coming from another member of the tracing-proxy cluster (`peer` traffic). Once it gets an event, it decides where it should go next: is this incoming request an event (or batch of events), and if so, does it have a trace ID? Everything that is not an event or an event that does not have a trace ID is immediately handed to `transmission` to be forwarded on to Honeycomb. If it is an event with a trace ID, the router extracts the trace ID and then uses the `sharder` to decide which member of the tracing-proxy cluster should handle this trace. If it's a peer, the event will be forwarded to that peer. If it's us, the event will be transformed into an internal representation and handed to the `collector` to bundle spans into traces. - -`collect/collect.go` the collector is responsible for bundling spans together into traces and deciding when to send them to Honeycomb or if they should be dropped. The first time a trace ID is seen, the collector starts a timer. If the root span (aka a span with a trace ID and no parent ID) arrives before the timer expires, then the trace is considered complete. The trace is sent and the timer is canceled. If the timer expires before the root span arrives, the trace will be sent whether or not it is complete. Just before sending, the collector asks the `sampler` for a sample rate and whether or not to keep the trace. The collector obeys this sampling decision and records it (the record is applied to any spans that may come in as part of the trace after the decision has been made). After making the sampling decision, if the trace is to be kept, it is passed along to the `transmission` for actual sending. - -`transmit/transmit.go` is a wrapper around the HTTP interactions with the Honeycomb API. It handles batching events together and sending them upstream. - -`logger` and `metrics` are for managing the logs and metrics that tracing-proxy itself produces. - -`sampler` contains algorithms to compute sample rates based on the traces provided. - -`sharder` determines which peer in a clustered tracing-proxy config is supposed to handle an individual trace. - -`types` contains a few type definitions that are used to hand data in between packages. +Tracing-Proxy uses bounded queues and circular buffers to manage allocating traces, so even under high volume memory use +shouldn't expand dramatically. However, given that traces are stored in a circular buffer, when the throughput of traces +exceeds the size of the buffer, things will start to go wrong. If you have statistics configured, a counter +named `collector_cache_buffer_overrun` will be incremented each time this happens. The symptoms of this will be that +traces will stop getting accumulated together, and instead spans that should be part of the same trace will be treated +as two separate traces. All traces will continue to be sent (and sampled) but the sampling decisions will be +inconsistent so you'll wind up with partial traces making it through the sampler and it will be very confusing. The size +of the circular buffer is a configuration option named `CacheCapacity`. To choose a good value, you should consider the +throughput of traces (e.g. traces / second started) and multiply that by the maximum duration of a trace (say, 3 +seconds), then multiply that by some large buffer (maybe 10x). This will give you good headroom. + +Determining the number of machines necessary in the cluster is not an exact science, and is best influenced by watching +for buffer overruns. But for a rough heuristic, count on a single machine using about 2G of memory to handle 5000 +incoming events and tracking 500 sub-second traces per second (for each full trace lasting less than a second and an +average size of 10 spans per trace). From d218fda979f49eddfc6c5b76b320e68457d2c01f Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Fri, 11 Aug 2023 09:17:22 +0530 Subject: [PATCH 343/351] hpe deployment files --- build/kubernetes/yaml/k8s-config-cm.yaml | 333 +++++++++-------- build/kubernetes/yaml/k8s-deployment.yaml | 21 +- deploy/app-brigade-manifest.json | 38 ++ deploy/tracing-proxy-config-cm.yml | 416 ++++++++++++++++++++++ deploy/tracing-proxy-deployment.yml | 64 ++++ deploy/tracing-proxy-ns.yml | 5 + deploy/tracing-proxy-rules-cm.yml | 221 ++++++++++++ deploy/tracing-proxy-svc.yml | 26 ++ 8 files changed, 955 insertions(+), 169 deletions(-) create mode 100644 deploy/app-brigade-manifest.json create mode 100644 deploy/tracing-proxy-config-cm.yml create mode 100644 deploy/tracing-proxy-deployment.yml create mode 100644 deploy/tracing-proxy-ns.yml create mode 100644 deploy/tracing-proxy-rules-cm.yml create mode 100644 deploy/tracing-proxy-svc.yml diff --git a/build/kubernetes/yaml/k8s-config-cm.yaml b/build/kubernetes/yaml/k8s-config-cm.yaml index 1c9dbc879d..ca4a809bb2 100644 --- a/build/kubernetes/yaml/k8s-config-cm.yaml +++ b/build/kubernetes/yaml/k8s-config-cm.yaml @@ -125,7 +125,7 @@ data: # If a field is not present in the span, it will not be present in the error log. # Default is ["trace.span_id"]. AdditionalErrorFields: - - trace.span_id + - trace.span_id # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate # the number of child spans on the trace at the time the sampling decision was made. @@ -147,48 +147,48 @@ data: ## Retry Configuration ## ######################### RetryConfiguration: - # InitialInterval the time to wait after the first failure before retrying. - InitialInterval: 500ms - # RandomizationFactor is a random factor used to calculate next backoff - # Randomized interval = RetryInterval * (1 ± RandomizationFactor) - RandomizationFactor: 0.5 - # Multiplier is the value multiplied by the backoff interval bounds - Multiplier: 1.5 - # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between - # consecutive retries will always be `MaxInterval`. - MaxInterval: 60s - # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. - # Once this value is reached, the data is discarded. - MaxElapsedTime: 15m - + # InitialInterval the time to wait after the first failure before retrying. + InitialInterval: 500ms + # RandomizationFactor is a random factor used to calculate next backoff + # Randomized interval = RetryInterval * (1 ± RandomizationFactor) + RandomizationFactor: 0.5 + # Multiplier is the value multiplied by the backoff interval bounds + Multiplier: 1.5 + # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between + # consecutive retries will always be `MaxInterval`. + MaxInterval: 60s + # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. + # Once this value is reached, the data is discarded. + MaxElapsedTime: 15m + ######################### ## Proxy Configuration ## ######################### ProxyConfiguration: - # Protocol accepts http and https - Protocol: "http" - # Host takes the proxy server address - Host: "" - # Port takes the proxy server port - Port: 3128 - # UserName takes the proxy username - Username: "" - # Password takes the proxy password - Password: "" - + # Protocol accepts http and https + Protocol: "http" + # Host takes the proxy server address + Host: "" + # Port takes the proxy server port + Port: 3128 + # UserName takes the proxy username + Username: "" + # Password takes the proxy password + Password: "" + ################################## ## Authentication Configuration ## ################################## AuthConfiguration: - # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made - Endpoint: "" - # Key - authentication key provided in OpsRamp Portal - Key: "" - # Secret - authentication Secret provided in OpsRamp Portal - Secret: "" - # TenantId - tenant/client id to which the traces are to be posted - TenantId: "" - + # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made + Endpoint: "" + # Key - authentication key provided in OpsRamp Portal + Key: "" + # Secret - authentication Secret provided in OpsRamp Portal + Secret: "" + # TenantId - tenant/client id to which the traces are to be posted + TenantId: "" + ############################ ## Implementation Choices ## ############################ @@ -204,148 +204,145 @@ data: # InMemCollector brings together all the settings that are relevant to # collecting spans together to make traces. InMemCollector: - - # The collection cache is used to collect all spans into a trace as well as - # remember the sampling decision for any spans that might come in after the - # trace has been marked "complete" (either by timing out or seeing the root - # span). The number of traces in the cache should be many multiples (100x to - # 1000x) of the total number of concurrently active traces (trace throughput * - # trace duration). - CacheCapacity: 1000 - - # MaxAlloc is optional. If set, it must be an integer >= 0. - # If set to a non-zero value, once per tick (see SendTicker) the collector - # will compare total allocated bytes to this value. If allocation is too - # high, cache capacity will be reduced and an error will be logged. - # Useful values for this setting are generally in the range of 75%-90% of - # available system memory. Using 80% is the recommended. - # This value should be set in according to the resources.limits.memory - # By default that setting is 4GB, and this is set to 85% of that limit - # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 - # MaxAlloc: 3435973836 - MaxAlloc: 0 - + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + CacheCapacity: 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. Using 80% is the recommended. + # This value should be set in according to the resources.limits.memory + # By default that setting is 4GB, and this is set to 85% of that limit + # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 + # MaxAlloc: 3435973836 + MaxAlloc: 0 + ##################### ## Peer Management ## ##################### - + # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed PeerManagement: - # Strategy controls the way that traces are assigned to Trace Proxy nodes. - # The "legacy" strategy uses a simple algorithm that unfortunately causes - # 1/2 of the in-flight traces to be assigned to a different node whenever the - # number of nodes changes. - # The legacy strategy is deprecated and is intended to be removed in a future release. - # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the - # number of nodes) are disrupted when the node count changes. - # Not eligible for live reload. - Strategy: "hash" - - ########################################################### - ###### File (Suitable only for VM based deployments) ###### - ########################################################### - Type: "file" - - # Peers is the list of all servers participating in this proxy cluster. Events - # will be sharded evenly across all peers based on the Trace ID. Values here - # should be the base URL used to access the peer, and should include scheme, - # hostname (or ip address) and port. All servers in the cluster should be in - # this list, including this host. - Peers: [ - "http://127.0.0.1:8084", #only grpc peer listener used - # "http://127.0.0.1:8083", - # "http://10.1.2.3.4:8080", - # "http://tracing-proxy-1231:8080", - # "http://peer-3.fqdn" // assumes port 80 - ] - ########################################################### - - ########################################################### - ###### Redis (Suitable for all types of deployments) ###### - ########################################################### - # # The type should always be redis when deployed to Kubernetes environments - # Type: "redis" - # - # # RedisHost is used to connect to redis for peer cluster membership management. - # # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes - # # precedence and this value is ignored. - # # Not eligible for live reload. - # # RedisHost will default to the name used for the release or name overrides depending on what is used, - # # but can be overriden to a specific value. - # RedisHost: localhost:6379 - # - # # RedisUsername is the username used to connect to redis for peer cluster membership management. - # # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes - # # precedence and this value is ignored. - # # Not eligible for live reload. - # RedisUsername: "" - # - # # RedisPassword is the password used to connect to redis for peer cluster membership management. - # # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes - # # precedence and this value is ignored. - # # Not eligible for live reload. - # RedisPassword: "" - # - # # RedisPrefix is a string used as a prefix for the keys in redis while storing - # # the peer membership. It might be useful to set this in any situation where - # # multiple trace-proxy clusters or multiple applications want to share a single - # # Redis instance. It may not be blank. - # RedisPrefix: "tracing-proxy" - # - # # RedisDatabase is an integer from 0-15 indicating the database number to use - # # for the Redis instance storing the peer membership. It might be useful to set - # # this in any situation where multiple trace-proxy clusters or multiple - # # applications want to share a single Redis instance. - # RedisDatabase: 0 - # - # # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. - # # Not eligible for live reload. - # UseTLS: false - # - # # UseTLSInsecure disables certificate checks - # # Not eligible for live reload. - # UseTLSInsecure: false - # - # # IdentifierInterfaceName is optional. - # # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. - # # When configured the pod's IP will be used in the peer list - # # IdentifierInterfaceName: eth0 - # - # # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first - # # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use - # # the first IPV6 unicast address found. - # UseIPV6Identifier: false - ########################################################### - + # Strategy controls the way that traces are assigned to Trace Proxy nodes. + # The "legacy" strategy uses a simple algorithm that unfortunately causes + # 1/2 of the in-flight traces to be assigned to a different node whenever the + # number of nodes changes. + # The legacy strategy is deprecated and is intended to be removed in a future release. + # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the + # number of nodes) are disrupted when the node count changes. + # Not eligible for live reload. + Strategy: "hash" + + ########################################################### + ###### File (Suitable only for VM based deployments ###### + ###### and single replica k8s deployments) ###### + ########################################################### + Type: "file" + + # Peers is the list of all servers participating in this proxy cluster. Events + # will be sharded evenly across all peers based on the Trace ID. Values here + # should be the base URL used to access the peer, and should include scheme, + # hostname (or ip address) and port. All servers in the cluster should be in + # this list, including this host. + Peers: [ + "http://127.0.0.1:8084", #only grpc peer listener used + ] + ########################################################### + + ########################################################### + ###### Redis (Suitable for all types of deployments) ###### + ########################################################### + ## The type should always be redis when deployed to Kubernetes environments + #Type: "redis" + + ## RedisHost is used to connect to redis for peer cluster membership management. + ## Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes + ## precedence and this value is ignored. + ## Not eligible for live reload. + ## RedisHost will default to the name used for the release or name overrides depending on what is used, + ## but can be overriden to a specific value. + #RedisHost: '{{include "opsramp-tracing-proxy.redis.fullname" .}}:6379' + + ## RedisUsername is the username used to connect to redis for peer cluster membership management. + ## If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes + ## precedence and this value is ignored. + ## Not eligible for live reload. + #RedisUsername: "" + + ## RedisPassword is the password used to connect to redis for peer cluster membership management. + ## If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes + ## precedence and this value is ignored. + ## Not eligible for live reload. + #RedisPassword: "" + + ## RedisPrefix is a string used as a prefix for the keys in redis while storing + ## the peer membership. It might be useful to set this in any situation where + ## multiple trace-proxy clusters or multiple applications want to share a single + ## Redis instance. It may not be blank. + #RedisPrefix: "tracing-proxy" + + ## RedisDatabase is an integer from 0-15 indicating the database number to use + ## for the Redis instance storing the peer membership. It might be useful to set + ## this in any situation where multiple trace-proxy clusters or multiple + ## applications want to share a single Redis instance. + #RedisDatabase: 0 + + ## UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + ## Not eligible for live reload. + #UseTLS: false + + ## UseTLSInsecure disables certificate checks + ## Not eligible for live reload. + #UseTLSInsecure: false + + ## IdentifierInterfaceName is optional. + ## Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. + ## When configured the pod's IP will be used in the peer list + #IdentifierInterfaceName: eth0 + + ## UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first + ## IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + ## the first IPV6 unicast address found. + #UseIPV6Identifier: false + ########################################################### + # LogrusLogger is a section of the config only used if you are using the # LogrusLogger to send all logs to STDOUT using the logrus package. LogrusLogger: - # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] - LogFormatter: 'json' - # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] - LogOutput: 'stdout' - + # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] + LogFormatter: 'json' + # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"] + LogOutput: 'stdout' + MetricsConfig: - # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp - Enable: true - - # ListenAddr determines the interface and port on which Prometheus will - # listen for requests for /metrics. Must be different from the main Trace Proxy - # listener. - ListenAddr: '0.0.0.0:2112' - - # OpsRampAPI is the URL for the upstream OpsRamp API. - OpsRampAPI: "" - - # ReportingInterval is the frequency specified in seconds at which - # the metrics are collected and sent to OpsRamp - ReportingInterval: 10 - - # MetricsList is a list of regular expressions which match the metric - # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. - # Internally, all the items in the list are concatenated using '|' to make the computation faster. - MetricsList: [ ".*" ] - + # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp + Enable: true + + # ListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Trace Proxy + # listener. + ListenAddr: '0.0.0.0:2112' + + # OpsRampAPI is the URL for the upstream OpsRamp API. + OpsRampAPI: "" + + # ReportingInterval is the frequency specified in seconds at which + # the metrics are collected and sent to OpsRamp + ReportingInterval: 10 + + # MetricsList is a list of regular expressions which match the metric + # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. + # Internally, all the items in the list are concatenated using '|' to make the computation faster. + MetricsList: [ ".*" ] + GRPCServerParameters: # MaxConnectionIdle is a duration for the amount of time after which an # idle connection would be closed by sending a GoAway. Idleness duration is diff --git a/build/kubernetes/yaml/k8s-deployment.yaml b/build/kubernetes/yaml/k8s-deployment.yaml index 7a7a3ebff6..95f5a91010 100644 --- a/build/kubernetes/yaml/k8s-deployment.yaml +++ b/build/kubernetes/yaml/k8s-deployment.yaml @@ -24,6 +24,12 @@ spec: - name: opsramp-tracing-proxy image: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy imagePullPolicy: Always + command: + - "/usr/bin/tracing-proxy" + - "-c" + - "/etc/tracing-proxy/config.yaml" + - "-r" + - "/etc/tracing-proxy/rules.yaml" ports: - name: http containerPort: 8082 @@ -31,6 +37,11 @@ spec: - name: grpc containerPort: 9090 protocol: TCP + - name: peer + containerPort: 8083 + protocol: TCP + - containerPort: 8084 + name: grpc-peer resources: requests: memory: "2048Mi" @@ -72,4 +83,12 @@ spec: - protocol: TCP port: 8082 targetPort: 8082 - name: http \ No newline at end of file + name: http + - protocol: TCP + port: 8083 + targetPort: 8083 + name: peer + - protocol: TCP + port: 8084 + targetPort: 8084 + name: grpc-peer \ No newline at end of file diff --git a/deploy/app-brigade-manifest.json b/deploy/app-brigade-manifest.json new file mode 100644 index 0000000000..fa867f38bb --- /dev/null +++ b/deploy/app-brigade-manifest.json @@ -0,0 +1,38 @@ +{ + "payload": [ + { + "appid": "3e148737-ea6f-48e3-a62e-ae35cf135520", + "stages": [ + { + "stagename": "deployment", + "payload": [ + { + "filename": "tracing-proxy-ns.yml" + }, + { + "filename": "tracing-proxy-svc.yml" + }, + { + "filename": "tracing-proxy-config-cm.yml" + }, + { + "filename": "tracing-proxy-rules-cm.yml" + }, + { + "filename": "tracing-proxy-deployment.yml" + } + ] + } + ], + "Version": "${version}" + } + ], + "configmap": { + "name": "tracing-proxy", + "comment": "Please include configmap file paths in docker/Dockerfile as needed for tini", + "infra": [ + "elasticache" + ] + }, + "multi-region": "supported" +} \ No newline at end of file diff --git a/deploy/tracing-proxy-config-cm.yml b/deploy/tracing-proxy-config-cm.yml new file mode 100644 index 0000000000..222c11674c --- /dev/null +++ b/deploy/tracing-proxy-config-cm.yml @@ -0,0 +1,416 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: opsramp-tracing-proxy-config + labels: + name: opsramp-tracing-proxy-config + namespace: opsramp-tracing-proxy +data: + config.yaml: |- + ######################## + ## Trace Proxy Config ## + ######################## + + # ListenAddr is the IP and port on which to listen for incoming events. Incoming + # traffic is expected to be HTTP, so if using SSL put something like nginx in + # front to do the TLS Termination. + ListenAddr: 0.0.0.0:8082 + + # GRPCListenAddr is the IP and port on which to listen for incoming events over + # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in + # front to do the TLS Termination. + GRPCListenAddr: 0.0.0.0:9090 + + # PeerListenAddr is the IP and port on which to listen for traffic being + # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL + # put something like nginx in front to do the decryption. Must be different from + # ListenAddr + PeerListenAddr: 0.0.0.0:8083 + + GRPCPeerListenAddr: 0.0.0.0:8084 + + # CompressPeerCommunication determines whether to compress span data + # it forwards to peers. If it costs money to transmit data between different + # instances (e.g. they're spread across AWS availability zones), then you + # almost certainly want compression enabled to reduce your bill. The option to + # disable it is provided as an escape hatch for deployments that value lower CPU + # utilization over data transfer costs. + CompressPeerCommunication: true + + # OpsrampAPI is the URL for the upstream Opsramp API. + OpsrampAPI: "" + + # Dataset you want to use for sampling + Dataset: "ds" + + #Tls Options + UseTls: true + UseTlsInsecure: false + + # LoggingLevel valid options are "debug", "info", "error", and "panic". + LoggingLevel: error + + # SendDelay is a short timer that will be triggered when a trace is complete. + # Trace Proxy will wait for this duration before actually sending the trace. The + # reason for this short delay is to allow for small network delays or clock + # jitters to elapse and any final spans to arrive before actually sending the + # trace. This supports duration strings with supplied units. Set to 0 for + # immediate sends. + SendDelay: 2s + + # BatchTimeout dictates how frequently to send unfulfilled batches. By default + # this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms. + # Eligible for live reload. + BatchTimeout: 1s + + # TraceTimeout is a long timer; it represents the outside boundary of how long + # to wait before sending an incomplete trace. Normally traces are sent when the + # root span arrives. Sometimes the root span never arrives (due to crashes or + # whatever), and this timer will send a trace even without having received the + # root span. If you have particularly long-lived traces you should increase this + # timer. This supports duration strings with supplied units. + TraceTimeout: 60s + + # MaxBatchSize is the number of events to be included in the batch for sending + MaxBatchSize: 500 + + # SendTicker is a short timer; it determines the duration to use to check for traces to send + SendTicker: 100ms + + # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use + # when buffering events that will be forwarded to peers or the upstream API. + UpstreamBufferSize: 1000 + PeerBufferSize: 1000 + + # AddHostMetadataToTrace determines whether to add information about + # the host that tracing proxy is running on to the spans that it processes. + # If enabled, information about the host will be added to each span with the + # key 'meta.local_hostname'. + AddHostMetadataToTrace: false + + # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics + # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} + # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 + # based on sorted order of keys + # "app" label is mandatory + AddAdditionalMetadata: { "app": "default" } + + # EnvironmentCacheTTL is the amount of time a cache entry will live that associates + # an API key with an environment name. + # Cache misses lookup the environment name using OpsRampAPI config value. + # Default is 1 hour ("1h"). + EnvironmentCacheTTL: "1h" + + # QueryAuthToken, if specified, provides a token that must be specified with + # the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed. + # These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and + # are not typically needed in normal operation. + # Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN. + # If left unspecified, the /query endpoints are inaccessible. + # QueryAuthToken: "some-random-value" + + # AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which + # contains text indicating which rule was evaluated that caused the trace to be included. + AddRuleReasonToTrace: true + + # AdditionalErrorFields should be a list of span fields that should be included when logging + # errors that happen during ingestion of events (for example, the span too large error). + # This is primarily useful in trying to track down misbehaving senders in a large installation. + # The fields `dataset`, `apihost`, and `environment` are always included. + # If a field is not present in the span, it will not be present in the error log. + # Default is ["trace.span_id"]. + AdditionalErrorFields: + - trace.span_id + + # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate + # the number of child spans on the trace at the time the sampling decision was made. + # This value is available to the rules-based sampler, making it possible to write rules that + # are dependent upon the number of spans in the trace. + # Default is false. + AddSpanCountToRoot: false + + # CacheOverrunStrategy controls the cache management behavior under memory pressure. + # "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, + # which is generally not helpful unless it occurs because of a permanent change in traffic patterns. + # In the "impact" strategy, the items having the most impact on the cache size are + # ejected from the cache earlier than normal but the cache is not resized. + # In all cases, it only applies if MaxAlloc is nonzero. + # Default is "resize" for compatibility but "impact" is recommended for most installations. + CacheOverrunStrategy: "impact" + + ######################### + ## Retry Configuration ## + ######################### + RetryConfiguration: + # InitialInterval the time to wait after the first failure before retrying. + InitialInterval: 500ms + # RandomizationFactor is a random factor used to calculate next backoff + # Randomized interval = RetryInterval * (1 ± RandomizationFactor) + RandomizationFactor: 0.5 + # Multiplier is the value multiplied by the backoff interval bounds + Multiplier: 1.5 + # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between + # consecutive retries will always be `MaxInterval`. + MaxInterval: 60s + # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. + # Once this value is reached, the data is discarded. + MaxElapsedTime: 15m + + ######################### + ## Proxy Configuration ## + ######################### + ProxyConfiguration: + # Protocol accepts http and https + Protocol: "http" + # Host takes the proxy server address + Host: "" + # Port takes the proxy server port + Port: 3128 + # UserName takes the proxy username + Username: "" + # Password takes the proxy password + Password: "" + + ################################## + ## Authentication Configuration ## + ################################## + AuthConfiguration: + # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made + Endpoint: "" + # Key - authentication key provided in OpsRamp Portal + Key: "" + # Secret - authentication Secret provided in OpsRamp Portal + Secret: "" + # TenantId - tenant/client id to which the traces are to be posted + TenantId: "" + + ############################ + ## Implementation Choices ## + ############################ + # Each of the config options below chooses an implementation of a Trace Proxy + # component to use. Depending on the choice, there may be more configuration + # required below in the section for that choice. Changing implementation choices + # requires a process restart. + # Collector describes which collector to use for collecting traces. The only + # current valid option is "InMemCollector". More can be added by adding + # implementations of the Collector interface. + Collector: "InMemCollector" + + # InMemCollector brings together all the settings that are relevant to + # collecting spans together to make traces. + InMemCollector: + + # The collection cache is used to collect all spans into a trace as well as + # remember the sampling decision for any spans that might come in after the + # trace has been marked "complete" (either by timing out or seeing the root + # span). The number of traces in the cache should be many multiples (100x to + # 1000x) of the total number of concurrently active traces (trace throughput * + # trace duration). + CacheCapacity: 1000 + + # MaxAlloc is optional. If set, it must be an integer >= 0. + # If set to a non-zero value, once per tick (see SendTicker) the collector + # will compare total allocated bytes to this value. If allocation is too + # high, cache capacity will be reduced and an error will be logged. + # Useful values for this setting are generally in the range of 75%-90% of + # available system memory. Using 80% is the recommended. + # This value should be set in according to the resources.limits.memory + # By default that setting is 4GB, and this is set to 85% of that limit + # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 + # MaxAlloc: 3435973836 + MaxAlloc: 0 + + ##################### + ## Peer Management ## + ##################### + + # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed + PeerManagement: + # Strategy controls the way that traces are assigned to Trace Proxy nodes. + # The "legacy" strategy uses a simple algorithm that unfortunately causes + # 1/2 of the in-flight traces to be assigned to a different node whenever the + # number of nodes changes. + # The legacy strategy is deprecated and is intended to be removed in a future release. + # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the + # number of nodes) are disrupted when the node count changes. + # Not eligible for live reload. + Strategy: "hash" + + ########################################################### + ###### File (Suitable only for VM based deployments ###### + ###### and single replica k8s deployments) ###### + ########################################################### + Type: "file" + + # Peers is the list of all servers participating in this proxy cluster. Events + # will be sharded evenly across all peers based on the Trace ID. Values here + # should be the base URL used to access the peer, and should include scheme, + # hostname (or ip address) and port. All servers in the cluster should be in + # this list, including this host. + Peers: [ + "http://127.0.0.1:8084", #only grpc peer listener used + ] + ########################################################### + + ########################################################### + ###### Redis (Suitable for all types of deployments) ###### + ########################################################### + ## The type should always be redis when deployed to Kubernetes environments + #Type: "redis" + + ## RedisHost is used to connect to redis for peer cluster membership management. + ## Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes + ## precedence and this value is ignored. + ## Not eligible for live reload. + ## RedisHost will default to the name used for the release or name overrides depending on what is used, + ## but can be overriden to a specific value. + #RedisHost: '{{include "opsramp-tracing-proxy.redis.fullname" .}}:6379' + + ## RedisUsername is the username used to connect to redis for peer cluster membership management. + ## If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes + ## precedence and this value is ignored. + ## Not eligible for live reload. + #RedisUsername: "" + + ## RedisPassword is the password used to connect to redis for peer cluster membership management. + ## If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes + ## precedence and this value is ignored. + ## Not eligible for live reload. + #RedisPassword: "" + + ## RedisPrefix is a string used as a prefix for the keys in redis while storing + ## the peer membership. It might be useful to set this in any situation where + ## multiple trace-proxy clusters or multiple applications want to share a single + ## Redis instance. It may not be blank. + #RedisPrefix: "tracing-proxy" + + ## RedisDatabase is an integer from 0-15 indicating the database number to use + ## for the Redis instance storing the peer membership. It might be useful to set + ## this in any situation where multiple trace-proxy clusters or multiple + ## applications want to share a single Redis instance. + #RedisDatabase: 0 + + ## UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + ## Not eligible for live reload. + #UseTLS: false + + ## UseTLSInsecure disables certificate checks + ## Not eligible for live reload. + #UseTLSInsecure: false + + ## IdentifierInterfaceName is optional. + ## Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. + ## When configured the pod's IP will be used in the peer list + #IdentifierInterfaceName: eth0 + + ## UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first + ## IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + ## the first IPV6 unicast address found. + #UseIPV6Identifier: false + ########################################################### + + # LogrusLogger is a section of the config only used if you are using the + # LogrusLogger to send all logs to STDOUT using the logrus package. + LogrusLogger: + # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] + LogFormatter: 'json' + # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"] + LogOutput: 'stdout' + + MetricsConfig: + # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp + Enable: true + + # ListenAddr determines the interface and port on which Prometheus will + # listen for requests for /metrics. Must be different from the main Trace Proxy + # listener. + ListenAddr: '0.0.0.0:2112' + + # OpsRampAPI is the URL for the upstream OpsRamp API. + OpsRampAPI: "" + + # ReportingInterval is the frequency specified in seconds at which + # the metrics are collected and sent to OpsRamp + ReportingInterval: 10 + + # MetricsList is a list of regular expressions which match the metric + # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. + # Internally, all the items in the list are concatenated using '|' to make the computation faster. + MetricsList: [ ".*" ] + + GRPCServerParameters: + # MaxConnectionIdle is a duration for the amount of time after which an + # idle connection would be closed by sending a GoAway. Idleness duration is + # defined since the most recent time the number of outstanding RPCs became + # zero or the connection establishment. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 + # MaxConnectionIdle: "1m" + + # MaxConnectionAge is a duration for the maximum amount of time a + # connection may exist before it will be closed by sending a GoAway. A + # random jitter of +/-10% will be added to MaxConnectionAge to spread out + # connection storms. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 + # MaxConnectionAge: "0s" + + # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + # which the connection will be forcibly closed. + # 0s sets duration to infinity which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 + # MaxConnectionAgeGrace: "0s" + + # After a duration of this time if the server doesn't see any activity it + # pings the client to see if the transport is still alive. + # If set below 1s, a minimum value of 1s will be used instead. + # 0s sets duration to 2 hours which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 + # Time: "10s" + + # After having pinged for keepalive check, the server waits for a duration + # of Timeout and if no activity is seen even after that the connection is + # closed. + # 0s sets duration to 20 seconds which is the default: + # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 + # Timeout: "2s" + + ################################ + ## Sample Cache Configuration ## + ################################ + + # Sample Cache Configuration controls the sample cache used to retain information about trace + # status after the sampling decision has been made. + SampleCacheConfig: + # Type controls the type of sample cache used. + # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is + # 5x the size of the trace cache. This is tracing proxy's original sample cache strategy. + # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember + # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. + # It is also more configurable. The cuckoo filter is recommended for most installations. + # Default is "legacy". + # Type: "cuckoo" + + # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. + # tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some + # statistical information. This is most useful in cases where the trace was sent before sending + # the root span, so that the root span can be decorated with accurate metadata. + # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). + # It Does not apply to the "legacy" type of cache. + # KeptSize: 10_000 + + # DroppedSize controls the size of the cuckoo dropped traces cache. + # This cache consumes 4-6 bytes per trace at a scale of millions of traces. + # Changing its size with live reload sets a future limit, but does not have an immediate effect. + # Default is 1_000_000 traces. + # It Does not apply to the "legacy" type of cache. + # DroppedSize: 1_000_000 + + # SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates + # the remaining capacity of its dropped traces cache and possibly cycles it. + # This cache is quite resilient so it doesn't need to happen very often, but the + # operation is also inexpensive. + # Default is 10 seconds. + # It Does not apply to the "legacy" type of cache. + # SizeCheckInterval: "10s" diff --git a/deploy/tracing-proxy-deployment.yml b/deploy/tracing-proxy-deployment.yml new file mode 100644 index 0000000000..b010828bbf --- /dev/null +++ b/deploy/tracing-proxy-deployment.yml @@ -0,0 +1,64 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: opsramp-tracing-proxy + namespace: opsramp-tracing-proxy + labels: + app: opsramp-tracing-proxy +spec: + replicas: 1 + selector: + matchLabels: + app: opsramp-tracing-proxy + template: + metadata: + labels: + app: opsramp-tracing-proxy + spec: + containers: + - name: opsramp-tracing-proxy + image: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy + imagePullPolicy: Always + command: + - "/usr/bin/tracing-proxy" + - "-c" + - "/etc/tracing-proxy/config.yaml" + - "-r" + - "/etc/tracing-proxy/rules.yaml" + ports: + - name: http + containerPort: 8082 + protocol: TCP + - name: grpc + containerPort: 9090 + protocol: TCP + - name: peer + containerPort: 8083 + protocol: TCP + - containerPort: 8084 + name: grpc-peer + resources: + requests: + memory: "2048Mi" + cpu: "2" + limits: + memory: "8096Mi" + cpu: "4" + volumeMounts: + - name: opsramp-tracing-rules + mountPath: /etc/tracing-proxy/rules.yaml + subPath: rules.yaml + readOnly: true + - name: opsramp-tracing-config + mountPath: /etc/tracing-proxy/config.yaml + subPath: config.yaml + readOnly: true + volumes: + - configMap: + name: opsramp-tracing-proxy-rules + name: opsramp-tracing-rules + - configMap: + name: opsramp-tracing-proxy-config + name: opsramp-tracing-config + diff --git a/deploy/tracing-proxy-ns.yml b/deploy/tracing-proxy-ns.yml new file mode 100644 index 0000000000..ddbf0e09cd --- /dev/null +++ b/deploy/tracing-proxy-ns.yml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: opsramp-tracing-proxy \ No newline at end of file diff --git a/deploy/tracing-proxy-rules-cm.yml b/deploy/tracing-proxy-rules-cm.yml new file mode 100644 index 0000000000..b3fdf2942d --- /dev/null +++ b/deploy/tracing-proxy-rules-cm.yml @@ -0,0 +1,221 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: opsramp-tracing-proxy-rules + labels: + name: opsramp-tracing-proxy-rules + namespace: opsramp-tracing-proxy +data: + rules.yaml: |- + ############################ + ## Sampling Rules Config ## + ############################ + + # DryRun - If enabled, marks traces that would be dropped given current sampling rules, + # and sends all traces regardless + DryRun: true + + # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to trace_proxy_kept + DryRunFieldName: trace_proxy_kept + + # DeterministicSampler is a section of the config for manipulating the + # Deterministic Sampler implementation. This is the simplest sampling algorithm + # - it is a static sample rate, choosing traces randomly to either keep or send + # (at the appropriate rate). It is not influenced by the contents of the trace. + Sampler: DeterministicSampler + + # SampleRate is the rate at which to sample. It indicates a ratio, where one + # sample trace is kept for every n traces seen. For example, a SampleRate of 30 + # will keep 1 out of every 30 traces. The choice on whether to keep any specific + # trace is random, so the rate is approximate. + # Eligible for live reload. + SampleRate: 1 + + #dataset1: + # + # # Note: If your dataset name contains a space, you will have to escape the dataset name + # # using single quotes, such as ['dataset 1'] + # + # # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler + # # implementation. This sampler collects the values of a number of fields from a + # # trace and uses them to form a key. This key is handed to the standard dynamic + # # sampler algorithm which generates a sample rate based on the frequency with + # # which that key has appeared in the previous ClearFrequencySec seconds.This + # # sampler uses the AvgSampleRate algorithm from + # # that package. + # Sampler: DynamicSampler + # + # # SampleRate is the goal rate at which to sample. It indicates a ratio, where + # # one sample trace is kept for every n traces seen. For example, a SampleRate of + # # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # # sampler, who assigns a sample rate for each trace based on the fields selected + # # from that trace. + # SampleRate: 2 + # + # # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. + # # The combination of values from all of these fields should reflect how interesting the trace is compared to + # # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for + # # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent + # # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of + # # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is + # # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a + # # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can + # # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling + # # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. + # # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of + # # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. + # # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore + # # interesting traces, like traces that experienced a `500`, might not be sampled. + # # Field names may come from any span in the trace. + # FieldList: + # - "" + # + # # UseTraceLength will add the number of spans in the trace in to the dynamic + # # sampler as part of the key. The number of spans is exact, so if there are + # # normally small variations in trace length you may want to leave this off. If + # # traces are consistent lengths and changes in trace length is a useful + # # indicator of traces you'd like to see in OpsRamp, set this to true. + # UseTraceLength: true + # + # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # # to the root span of the trace containing the key used by the sampler to decide + # # the sample rate. This can be helpful in understanding why the sampler is + # # making certain decisions about sample rate and help you understand how to + # # better choose the sample rate key (aka the FieldList setting above) to use. + # AddSampleRateKeyToTrace: true + # + # # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # # when adding the sample rate key to the trace. This setting is only used when + # # AddSampleRateKeyToTrace is true. + # AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key + # + # # ClearFrequencySec is the name of the field the sampler will use to determine + # # the period over which it will calculate the sample rate. This setting defaults + # # to 30. + # ClearFrequencySec: 60 + #dataset2: + # + # # EMADynamicSampler is a section of the config for manipulating the Exponential + # # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, + # # it attempts to average a given sample rate, weighting rare traffic and frequent + # # traffic differently so as to end up with the correct average. + # # + # # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended + # # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs + # # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential + # # Moving Average of counts seen per key, and adjusts this average at regular intervals. + # # The weight applied to more recent intervals is defined by `weight`, a number between + # # (0, 1) - larger values weight the average more toward recent observations. In other words, + # # a larger weight will cause sample rates more quickly adapt to traffic patterns, + # # while a smaller weight will result in sample rates that are less sensitive to bursts or drops + # # in traffic and thus more consistent over time. + # # + # # Keys that are not found in the EMA will always have a sample + # # rate of 1. Keys that occur more frequently will be sampled on a logarithmic + # # curve. In other words, every key will be represented at least once in any + # # given window and more frequent keys will have their sample rate + # # increased proportionally to wind up with the goal sample rate. + # Sampler: EMADynamicSampler + # + # # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where + # # one sample trace is kept for every n traces seen. For example, a SampleRate of + # # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic + # # sampler, who assigns a sample rate for each trace based on the fields selected + # # from that trace. + # GoalSampleRate: 2 + # + # # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. + # # The combination of values from all of these fields should reflect how interesting the trace is compared to + # # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for + # # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent + # # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of + # # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is + # # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a + # # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can + # # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling + # # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. + # # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of + # # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. + # # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore + # # interesting traces, like traces that experienced a `500`, might not be sampled. + # # Field names may come from any span in the trace. + # FieldList: [] + # + # # UseTraceLength will add the number of spans in the trace in to the dynamic + # # sampler as part of the key. The number of spans is exact, so if there are + # # normally small variations in trace length you may want to leave this off. If + # # traces are consistent lengths and changes in trace length is a useful + # # indicator of traces you'd like to see in OpsRamp, set this to true. + # UseTraceLength: true + # + # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field + # # to the root span of the trace containing the key used by the sampler to decide + # # the sample rate. This can be helpful in understanding why the sampler is + # # making certain decisions about sample rate and help you understand how to + # # better choose the sample rate key (aka the FieldList setting above) to use. + # AddSampleRateKeyToTrace: true + # + # # AddSampleRateKeyToTraceField is the name of the field the sampler will use + # # when adding the sample rate key to the trace. This setting is only used when + # # AddSampleRateKeyToTrace is true. + # AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key + # + # # AdjustmentInterval defines how often (in seconds) we adjust the moving average from + # # recent observations. Default 15s + # AdjustmentInterval: 15 + # + # # Weight is a value between (0, 1) indicating the weighting factor used to adjust + # # the EMA. With larger values, newer data will influence the average more, and older + # # values will be factored out more quickly. In mathematical literature concerning EMA, + # # this is referred to as the `alpha` constant. + # # Default is 0.5 + # Weight: 0.5 + # + # # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. + # # Once MaxKeys is reached, new keys will not be included in the sample rate map, but + # # existing keys will continue to be be counted. You can use this to keep the sample rate + # # map size under control. + # MaxKeys: 0 + # + # # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key + # # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to + # # decide what constitutes "zero". Keys with averages below this threshold will be removed + # # from the EMA. Default is the same as Weight, as this prevents a key with the smallest + # # integer value (1) from being aged out immediately. This value should generally be <= Weight, + # # unless you have very specific reasons to set it higher. + # AgeOutValue: 0.5 + # + # # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define + # # the burst detection threshold. If total counts observed for a given interval exceed the threshold + # # EMA is updated immediately, rather than waiting on the AdjustmentInterval. + # # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, + # # burst detection will kick in. + # BurstMultiple: 2 + # + # # BurstDetectionDelay indicates the number of intervals to run after Start is called before + # # burst detection kicks in. + # # Defaults to 3 + # BurstDetectionDelay: 3 + #dataset3: + # Sampler: DeterministicSampler + # SampleRate: 10 + #dataset4: + # Sampler: RulesBasedSampler + # CheckNestedFields: false + # rule: + # # Rule name + # - name: "" + # # Drop Condition (examples: true, false) + # drop: + # condition: + # # Field Name (example: status_code) + # - field: "" + # # Operator Value (example: =) + # operator: "" + # # Field Value (example: 500) + # value: "" + #dataset5: + # Sampler: TotalThroughputSampler + # GoalThroughputPerSec: 100 + # FieldList: '' diff --git a/deploy/tracing-proxy-svc.yml b/deploy/tracing-proxy-svc.yml new file mode 100644 index 0000000000..c6427da19e --- /dev/null +++ b/deploy/tracing-proxy-svc.yml @@ -0,0 +1,26 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: opsramp-tracing-proxy + namespace: opsramp-tracing-proxy +spec: + selector: + app: opsramp-tracing-proxy + ports: + - protocol: TCP + port: 9090 + targetPort: 9090 + name: grpc + - protocol: TCP + port: 8082 + targetPort: 8082 + name: http + - protocol: TCP + port: 8083 + targetPort: 8083 + name: peer + - protocol: TCP + port: 8084 + targetPort: 8084 + name: grpc-peer \ No newline at end of file From 9f2352293f1d08a5378305636d7eabe71e0707ca Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 16 Aug 2023 19:17:18 +0530 Subject: [PATCH 344/351] adding configs for hpe deployments --- Dockerfile | 10 ++-- deploy/app-brigade-manifest.json | 5 +- deploy/tracing-proxy-config-cm.yml | 90 ++++++++++++++--------------- deploy/tracing-proxy-deployment.yml | 16 +++-- start.sh | 72 +++++++++++++++++++++++ 5 files changed, 137 insertions(+), 56 deletions(-) create mode 100755 start.sh diff --git a/Dockerfile b/Dockerfile index 82dab3cfe7..4b51ac69cd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:alpine as builder +FROM --platform=$BUILDPLATFORM golang:alpine as builder RUN apk update && apk add --no-cache git bash ca-certificates && update-ca-certificates @@ -20,13 +20,15 @@ RUN CGO_ENABLED=0 \ -o tracing-proxy \ ./cmd/tracing-proxy -FROM alpine:3.17 +FROM --platform=$BUILDPLATFORM alpine:3.17 -RUN apk update && apk add --no-cache bash ca-certificates && update-ca-certificates +RUN apk update && apk add --no-cache bash jq ca-certificates && update-ca-certificates COPY --from=builder /app/config_complete.yaml /etc/tracing-proxy/config.yaml COPY --from=builder /app/rules_complete.yaml /etc/tracing-proxy/rules.yaml COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy -CMD ["/usr/bin/tracing-proxy", "--config", "/etc/tracing-proxy/config.yaml", "--rules_config", "/etc/tracing-proxy/rules.yaml"] \ No newline at end of file +COPY --from=builder /app/start.sh /usr/bin/start.sh + +CMD ["/usr/bin/start.sh"] \ No newline at end of file diff --git a/deploy/app-brigade-manifest.json b/deploy/app-brigade-manifest.json index fa867f38bb..027b0a5862 100644 --- a/deploy/app-brigade-manifest.json +++ b/deploy/app-brigade-manifest.json @@ -28,10 +28,13 @@ } ], "configmap": { - "name": "tracing-proxy", + "name": "tracing-proxy-cm", "comment": "Please include configmap file paths in docker/Dockerfile as needed for tini", "infra": [ "elasticache" + ], + "config": [ + "opsramp-tracing-proxy-creds" ] }, "multi-region": "supported" diff --git a/deploy/tracing-proxy-config-cm.yml b/deploy/tracing-proxy-config-cm.yml index 222c11674c..80055df7d7 100644 --- a/deploy/tracing-proxy-config-cm.yml +++ b/deploy/tracing-proxy-config-cm.yml @@ -39,7 +39,7 @@ data: CompressPeerCommunication: true # OpsrampAPI is the URL for the upstream Opsramp API. - OpsrampAPI: "" + OpsrampAPI: "" # Dataset you want to use for sampling Dataset: "ds" @@ -156,7 +156,7 @@ data: # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. # Once this value is reached, the data is discarded. MaxElapsedTime: 15m - + ######################### ## Proxy Configuration ## ######################### @@ -171,20 +171,20 @@ data: Username: "" # Password takes the proxy password Password: "" - + ################################## ## Authentication Configuration ## ################################## AuthConfiguration: # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made - Endpoint: "" + Endpoint: "" # Key - authentication key provided in OpsRamp Portal - Key: "" + Key: "" # Secret - authentication Secret provided in OpsRamp Portal - Secret: "" + Secret: "" # TenantId - tenant/client id to which the traces are to be posted - TenantId: "" - + TenantId: "" + ############################ ## Implementation Choices ## ############################ @@ -200,7 +200,7 @@ data: # InMemCollector brings together all the settings that are relevant to # collecting spans together to make traces. InMemCollector: - + # The collection cache is used to collect all spans into a trace as well as # remember the sampling decision for any spans that might come in after the # trace has been marked "complete" (either by timing out or seeing the root @@ -208,7 +208,7 @@ data: # 1000x) of the total number of concurrently active traces (trace throughput * # trace duration). CacheCapacity: 1000 - + # MaxAlloc is optional. If set, it must be an integer >= 0. # If set to a non-zero value, once per tick (see SendTicker) the collector # will compare total allocated bytes to this value. If allocation is too @@ -220,11 +220,11 @@ data: # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 # MaxAlloc: 3435973836 MaxAlloc: 0 - + ##################### ## Peer Management ## ##################### - + # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed PeerManagement: # Strategy controls the way that traces are assigned to Trace Proxy nodes. @@ -241,75 +241,75 @@ data: ###### File (Suitable only for VM based deployments ###### ###### and single replica k8s deployments) ###### ########################################################### - Type: "file" + #Type: "file" # Peers is the list of all servers participating in this proxy cluster. Events # will be sharded evenly across all peers based on the Trace ID. Values here # should be the base URL used to access the peer, and should include scheme, # hostname (or ip address) and port. All servers in the cluster should be in # this list, including this host. - Peers: [ - "http://127.0.0.1:8084", #only grpc peer listener used - ] + #Peers: [ + # "http://127.0.0.1:8084", #only grpc peer listener used + #] ########################################################### - + ########################################################### ###### Redis (Suitable for all types of deployments) ###### ########################################################### ## The type should always be redis when deployed to Kubernetes environments - #Type: "redis" - + Type: "redis" + ## RedisHost is used to connect to redis for peer cluster membership management. ## Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes ## precedence and this value is ignored. ## Not eligible for live reload. ## RedisHost will default to the name used for the release or name overrides depending on what is used, ## but can be overriden to a specific value. - #RedisHost: '{{include "opsramp-tracing-proxy.redis.fullname" .}}:6379' - + RedisHost: "" + ## RedisUsername is the username used to connect to redis for peer cluster membership management. ## If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes ## precedence and this value is ignored. ## Not eligible for live reload. - #RedisUsername: "" - + RedisUsername: "" + ## RedisPassword is the password used to connect to redis for peer cluster membership management. ## If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes ## precedence and this value is ignored. ## Not eligible for live reload. - #RedisPassword: "" - + RedisPassword: "" + ## RedisPrefix is a string used as a prefix for the keys in redis while storing ## the peer membership. It might be useful to set this in any situation where ## multiple trace-proxy clusters or multiple applications want to share a single ## Redis instance. It may not be blank. - #RedisPrefix: "tracing-proxy" - + RedisPrefix: "tracing-proxy" + ## RedisDatabase is an integer from 0-15 indicating the database number to use ## for the Redis instance storing the peer membership. It might be useful to set ## this in any situation where multiple trace-proxy clusters or multiple ## applications want to share a single Redis instance. - #RedisDatabase: 0 - + RedisDatabase: 0 + ## UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. ## Not eligible for live reload. - #UseTLS: false - + UseTLS: + ## UseTLSInsecure disables certificate checks ## Not eligible for live reload. - #UseTLSInsecure: false - + UseTLSInsecure: true + ## IdentifierInterfaceName is optional. ## Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. ## When configured the pod's IP will be used in the peer list - #IdentifierInterfaceName: eth0 - + IdentifierInterfaceName: eth0 + ## UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first ## IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use ## the first IPV6 unicast address found. - #UseIPV6Identifier: false + UseIPV6Identifier: false ########################################################### - + # LogrusLogger is a section of the config only used if you are using the # LogrusLogger to send all logs to STDOUT using the logrus package. LogrusLogger: @@ -317,28 +317,28 @@ data: LogFormatter: 'json' # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"] LogOutput: 'stdout' - + MetricsConfig: # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp Enable: true - + # ListenAddr determines the interface and port on which Prometheus will # listen for requests for /metrics. Must be different from the main Trace Proxy # listener. ListenAddr: '0.0.0.0:2112' - + # OpsRampAPI is the URL for the upstream OpsRamp API. - OpsRampAPI: "" - + OpsRampAPI: "" + # ReportingInterval is the frequency specified in seconds at which # the metrics are collected and sent to OpsRamp ReportingInterval: 10 - + # MetricsList is a list of regular expressions which match the metric # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. # Internally, all the items in the list are concatenated using '|' to make the computation faster. MetricsList: [ ".*" ] - + GRPCServerParameters: # MaxConnectionIdle is a duration for the amount of time after which an # idle connection would be closed by sending a GoAway. Idleness duration is @@ -413,4 +413,4 @@ data: # operation is also inexpensive. # Default is 10 seconds. # It Does not apply to the "legacy" type of cache. - # SizeCheckInterval: "10s" + # SizeCheckInterval: "10s" \ No newline at end of file diff --git a/deploy/tracing-proxy-deployment.yml b/deploy/tracing-proxy-deployment.yml index b010828bbf..8ad61bfda9 100644 --- a/deploy/tracing-proxy-deployment.yml +++ b/deploy/tracing-proxy-deployment.yml @@ -20,12 +20,6 @@ spec: - name: opsramp-tracing-proxy image: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy imagePullPolicy: Always - command: - - "/usr/bin/tracing-proxy" - - "-c" - - "/etc/tracing-proxy/config.yaml" - - "-r" - - "/etc/tracing-proxy/rules.yaml" ports: - name: http containerPort: 8082 @@ -54,6 +48,10 @@ spec: mountPath: /etc/tracing-proxy/config.yaml subPath: config.yaml readOnly: true + - name: elastic-cache + mountPath: /config/data + - name: creds + mountPath: /config/data volumes: - configMap: name: opsramp-tracing-proxy-rules @@ -61,4 +59,10 @@ spec: - configMap: name: opsramp-tracing-proxy-config name: opsramp-tracing-config + - configMap: + name: tracing-proxy-cm + name: elastic-cache + - configMap: + name: opsramp-tracing-proxy-creds + name: creds diff --git a/start.sh b/start.sh new file mode 100755 index 0000000000..4f55c8a362 --- /dev/null +++ b/start.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +ELASTICACHE_PATH='/config/data/infra_elasticache.json' + +# Sample Format for ${ELASTICACHE_PATH} +# { +# "elasticache": { +# "host": "master.testing-non-cluster.89rows.usw2.cache.amazonaws.com", +# "host_ro": "replica.testing-non-cluster.89rows.usw2.cache.amazonaws.com", +# "port": 6379, +# "username": "test_user", +# "password": "xxxxxx", +# "tls_mode": true, +# "cluster_mode": "false" +# } +# } + +OPSRAMP_CREDS_PATH='/config/data/opsramp_creds.json' + +# Sample Format for ${OPSRAMP_CREDS_PATH} +# { +# "traces_api": "test.opsramp.net", +# "metrics_api": "test.opsramp.net", +# "auth_api": "test.opsramp.net", +# "key": "sdjfnsakdflasdflksjdkfjsdklfjals", +# "secret": "***REMOVED***", +# "tenant_id": "123e-fsdf-4r234r-dfbfsdbg" +# } + + +TRACE_PROXY_CONFIG='/etc/tracing-proxy/final_config.yaml' + +# make copy of the config.yaml file +cp /etc/tracing-proxy/config.yaml ${TRACE_PROXY_CONFIG} + +if [ -r ${ELASTICACHE_PATH} ]; then + # check if the configuration is a object or array + TYPE=$(jq <${ELASTICACHE_PATH} -r .elasticache | jq 'if type=="array" then true else false end') + if [ "${TYPE}" = true ]; then + echo "implement me" + else + REDIS_HOST=$(jq <${ELASTICACHE_PATH} -r '(.elasticache.host)+":"+(.elasticache.port|tostring)') + REDIS_USERNAME=$(jq <${ELASTICACHE_PATH} -r .elasticache.username) + REDIS_PASSWORD=$(jq <${ELASTICACHE_PATH} -r .elasticache.password) + REDIS_TLS_MODE=$(jq <${ELASTICACHE_PATH} -r .elasticache.tls_mode | tr '[:upper:]' '[:lower:]') + + sed -i "s//${REDIS_HOST}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${REDIS_USERNAME}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${REDIS_PASSWORD}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${REDIS_TLS_MODE}/g" ${TRACE_PROXY_CONFIG} + fi +fi + +if [ -r ${OPSRAMP_CREDS_PATH} ]; then + + TRACES_API=$(jq <${OPSRAMP_CREDS_PATH} -r .traces_api) + METRICS_API=$(jq <${OPSRAMP_CREDS_PATH} -r .metrics_api) + AUTH_API=$(jq <${OPSRAMP_CREDS_PATH} -r .auth_api) + KEY=$(jq <${OPSRAMP_CREDS_PATH} -r .key) + SECRET=$(jq <${OPSRAMP_CREDS_PATH} -r .secret) + TENANT_ID=$(jq <${OPSRAMP_CREDS_PATH} -r .tenant_id) + + sed -i "s//${TRACES_API}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${METRICS_API}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${AUTH_API}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${KEY}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${SECRET}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${TENANT_ID}/g" ${TRACE_PROXY_CONFIG} +fi + +# start the application +exec /usr/bin/tracing-proxy -c /etc/tracing-proxy/final_config.yaml -r /etc/tracing-proxy/rules.yaml From 1250efb117cdacb459b1ec70cdb2e20cf379786d Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 16 Aug 2023 19:46:14 +0530 Subject: [PATCH 345/351] adding configs for hpe deployments --- deploy/app-brigade-manifest.json | 6 ------ deploy/tracing-proxy-deployment.yml | 23 ++--------------------- 2 files changed, 2 insertions(+), 27 deletions(-) diff --git a/deploy/app-brigade-manifest.json b/deploy/app-brigade-manifest.json index 027b0a5862..e9d389c5f2 100644 --- a/deploy/app-brigade-manifest.json +++ b/deploy/app-brigade-manifest.json @@ -12,12 +12,6 @@ { "filename": "tracing-proxy-svc.yml" }, - { - "filename": "tracing-proxy-config-cm.yml" - }, - { - "filename": "tracing-proxy-rules-cm.yml" - }, { "filename": "tracing-proxy-deployment.yml" } diff --git a/deploy/tracing-proxy-deployment.yml b/deploy/tracing-proxy-deployment.yml index 8ad61bfda9..fbb30dc3c1 100644 --- a/deploy/tracing-proxy-deployment.yml +++ b/deploy/tracing-proxy-deployment.yml @@ -40,29 +40,10 @@ spec: memory: "8096Mi" cpu: "4" volumeMounts: - - name: opsramp-tracing-rules - mountPath: /etc/tracing-proxy/rules.yaml - subPath: rules.yaml - readOnly: true - - name: opsramp-tracing-config - mountPath: /etc/tracing-proxy/config.yaml - subPath: config.yaml - readOnly: true - - name: elastic-cache - mountPath: /config/data - - name: creds + - name: tracing-configs mountPath: /config/data volumes: - - configMap: - name: opsramp-tracing-proxy-rules - name: opsramp-tracing-rules - - configMap: - name: opsramp-tracing-proxy-config - name: opsramp-tracing-config - configMap: name: tracing-proxy-cm - name: elastic-cache - - configMap: - name: opsramp-tracing-proxy-creds - name: creds + name: tracing-configs From d0616f6b4622e87ad98825b630d41a03416e5406 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Wed, 16 Aug 2023 20:46:02 +0530 Subject: [PATCH 346/351] changing configs in hpe deploy directory --- config_complete.yaml | 139 +++++++++++++++++++------------------------ start.sh | 34 ++++++----- 2 files changed, 81 insertions(+), 92 deletions(-) diff --git a/config_complete.yaml b/config_complete.yaml index 1a17e715c9..240c5eb3d7 100644 --- a/config_complete.yaml +++ b/config_complete.yaml @@ -29,7 +29,7 @@ GRPCPeerListenAddr: 0.0.0.0:8084 CompressPeerCommunication: true # OpsrampAPI is the URL for the upstream Opsramp API. -OpsrampAPI: "" +OpsrampAPI: "" # Dataset you want to use for sampling Dataset: "ds" @@ -167,13 +167,13 @@ ProxyConfiguration: ################################## AuthConfiguration: # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made - Endpoint: "" + Endpoint: "" # Key - authentication key provided in OpsRamp Portal - Key: "" + Key: "" # Secret - authentication Secret provided in OpsRamp Portal - Secret: "" + Secret: "" # TenantId - tenant/client id to which the traces are to be posted - TenantId: "" + TenantId: "" ############################ ## Implementation Choices ## @@ -228,78 +228,75 @@ PeerManagement: Strategy: "hash" ########################################################### - ###### File (Suitable only for VM based deployments) ###### + ###### File (Suitable only for VM based deployments ###### + ###### and single replica k8s deployments) ###### ########################################################### - # Type: "file" - # - # # Peers is the list of all servers participating in this proxy cluster. Events - # # will be sharded evenly across all peers based on the Trace ID. Values here - # # should be the base URL used to access the peer, and should include scheme, - # # hostname (or ip address) and port. All servers in the cluster should be in - # # this list, including this host. - # Peers: [ - # "http://127.0.0.1:8084", #only grpc peer listener used - # # "http://127.0.0.1:8083", - # # "http://10.1.2.3.4:8080", - # # "http://tracing proxy-1231:8080", - # # "http://peer-3.fqdn" // assumes port 80 - # ] + #Type: "file" + + # Peers is the list of all servers participating in this proxy cluster. Events + # will be sharded evenly across all peers based on the Trace ID. Values here + # should be the base URL used to access the peer, and should include scheme, + # hostname (or ip address) and port. All servers in the cluster should be in + # this list, including this host. + #Peers: [ + # "http://127.0.0.1:8084", #only grpc peer listener used + #] ########################################################### ########################################################### ###### Redis (Suitable for all types of deployments) ###### ########################################################### - # The type should always be redis when deployed to Kubernetes environments + ## The type should always be redis when deployed to Kubernetes environments Type: "redis" - # RedisHost is used to connect to redis for peer cluster membership management. - # Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes - # precedence and this value is ignored. - # Not eligible for live reload. - # RedisHost will default to the name used for the release or name overrides depending on what is used, - # but can be overriden to a specific value. - RedisHost: localhost:6379 - - # RedisUsername is the username used to connect to redis for peer cluster membership management. - # If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes - # precedence and this value is ignored. - # Not eligible for live reload. - RedisUsername: "" - - # RedisPassword is the password used to connect to redis for peer cluster membership management. - # If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes - # precedence and this value is ignored. - # Not eligible for live reload. - RedisPassword: "" - - # RedisPrefix is a string used as a prefix for the keys in redis while storing - # the peer membership. It might be useful to set this in any situation where - # multiple trace-proxy clusters or multiple applications want to share a single - # Redis instance. It may not be blank. + ## RedisHost is used to connect to redis for peer cluster membership management. + ## Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes + ## precedence and this value is ignored. + ## Not eligible for live reload. + ## RedisHost will default to the name used for the release or name overrides depending on what is used, + ## but can be overriden to a specific value. + RedisHost: "" + + ## RedisUsername is the username used to connect to redis for peer cluster membership management. + ## If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes + ## precedence and this value is ignored. + ## Not eligible for live reload. + RedisUsername: "" + + ## RedisPassword is the password used to connect to redis for peer cluster membership management. + ## If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes + ## precedence and this value is ignored. + ## Not eligible for live reload. + RedisPassword: "" + + ## RedisPrefix is a string used as a prefix for the keys in redis while storing + ## the peer membership. It might be useful to set this in any situation where + ## multiple trace-proxy clusters or multiple applications want to share a single + ## Redis instance. It may not be blank. RedisPrefix: "tracing-proxy" - # RedisDatabase is an integer from 0-15 indicating the database number to use - # for the Redis instance storing the peer membership. It might be useful to set - # this in any situation where multiple trace-proxy clusters or multiple - # applications want to share a single Redis instance. + ## RedisDatabase is an integer from 0-15 indicating the database number to use + ## for the Redis instance storing the peer membership. It might be useful to set + ## this in any situation where multiple trace-proxy clusters or multiple + ## applications want to share a single Redis instance. RedisDatabase: 0 - # UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. - # Not eligible for live reload. - UseTLS: false + ## UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. + ## Not eligible for live reload. + UseTLS: - # UseTLSInsecure disables certificate checks - # Not eligible for live reload. - UseTLSInsecure: false + ## UseTLSInsecure disables certificate checks + ## Not eligible for live reload. + UseTLSInsecure: true - # IdentifierInterfaceName is optional. - # Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. - # When configured the pod's IP will be used in the peer list - # IdentifierInterfaceName: eth0 + ## IdentifierInterfaceName is optional. + ## Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. + ## When configured the pod's IP will be used in the peer list + IdentifierInterfaceName: eth0 - # UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first - # IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use - # the first IPV6 unicast address found. + ## UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first + ## IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use + ## the first IPV6 unicast address found. UseIPV6Identifier: false ########################################################### @@ -308,21 +305,9 @@ PeerManagement: LogrusLogger: # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] LogFormatter: 'json' - # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr", "file"] + # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"] LogOutput: 'stdout' - # specifies configs for logs when LogOutput is set to "file" - File: - # FileName specifies the location where the logs are supposed be stored - FileName: "/var/log/opsramp/tracing-proxy.log" - # MaxSize is the maximum size in megabytes of the log file before it gets rotated. - MaxSize: 1 - # MaxBackups is the maximum number of old log files to retain. - MaxBackups: 3 - # Compress determines if the rotated log files should be compressed - # using gzip. - Compress: true - MetricsConfig: # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp Enable: true @@ -333,7 +318,7 @@ MetricsConfig: ListenAddr: '0.0.0.0:2112' # OpsRampAPI is the URL for the upstream OpsRamp API. - OpsRampAPI: "" + OpsRampAPI: "" # ReportingInterval is the frequency specified in seconds at which # the metrics are collected and sent to OpsRamp @@ -418,4 +403,4 @@ SampleCacheConfig: # operation is also inexpensive. # Default is 10 seconds. # It Does not apply to the "legacy" type of cache. -# SizeCheckInterval: "10s" +# SizeCheckInterval: "10s" \ No newline at end of file diff --git a/start.sh b/start.sh index 4f55c8a362..f5194c9525 100755 --- a/start.sh +++ b/start.sh @@ -15,23 +15,27 @@ ELASTICACHE_PATH='/config/data/infra_elasticache.json' # } # } -OPSRAMP_CREDS_PATH='/config/data/opsramp_creds.json' +OPSRAMP_CREDS_PATH='/config/data/config_opsramp-tracing-proxy-creds.json' # Sample Format for ${OPSRAMP_CREDS_PATH} # { -# "traces_api": "test.opsramp.net", -# "metrics_api": "test.opsramp.net", -# "auth_api": "test.opsramp.net", -# "key": "sdjfnsakdflasdflksjdkfjsdklfjals", -# "secret": "***REMOVED***", -# "tenant_id": "123e-fsdf-4r234r-dfbfsdbg" +# "opsramp-tracing-proxy-creds": { +# "traces_api": "test.opsramp.net", +# "metrics_api": "test.opsramp.net", +# "auth_api": "test.opsramp.net", +# "key": "sdjfnsakdflasdflksjdkfjsdklfjals", +# "secret": "***REMOVED***", +# "tenant_id": "123e-fsdf-4r234r-dfbfsdbg" +# } # } TRACE_PROXY_CONFIG='/etc/tracing-proxy/final_config.yaml' +TRACE_PROXY_RULES='/etc/tracing-proxy/final_rules.yaml' -# make copy of the config.yaml file +# make copy of the config.yaml & rules.yaml to make sure it works if config maps are mounted cp /etc/tracing-proxy/config.yaml ${TRACE_PROXY_CONFIG} +cp /etc/tracing-proxy/rules.yaml ${TRACE_PROXY_RULES} if [ -r ${ELASTICACHE_PATH} ]; then # check if the configuration is a object or array @@ -53,12 +57,12 @@ fi if [ -r ${OPSRAMP_CREDS_PATH} ]; then - TRACES_API=$(jq <${OPSRAMP_CREDS_PATH} -r .traces_api) - METRICS_API=$(jq <${OPSRAMP_CREDS_PATH} -r .metrics_api) - AUTH_API=$(jq <${OPSRAMP_CREDS_PATH} -r .auth_api) - KEY=$(jq <${OPSRAMP_CREDS_PATH} -r .key) - SECRET=$(jq <${OPSRAMP_CREDS_PATH} -r .secret) - TENANT_ID=$(jq <${OPSRAMP_CREDS_PATH} -r .tenant_id) + TRACES_API=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.traces_api) + METRICS_API=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.metrics_api) + AUTH_API=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.auth_api) + KEY=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.key) + SECRET=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.secret) + TENANT_ID=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.tenant_id) sed -i "s//${TRACES_API}/g" ${TRACE_PROXY_CONFIG} sed -i "s//${METRICS_API}/g" ${TRACE_PROXY_CONFIG} @@ -69,4 +73,4 @@ if [ -r ${OPSRAMP_CREDS_PATH} ]; then fi # start the application -exec /usr/bin/tracing-proxy -c /etc/tracing-proxy/final_config.yaml -r /etc/tracing-proxy/rules.yaml +exec /usr/bin/tracing-proxy -c /etc/tracing-proxy/final_config.yaml -r /etc/tracing-proxy/final_rules.yaml From 70f23ed68d0dcafa1b0a5ea6fe18e194e57d2427 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Thu, 17 Aug 2023 11:54:03 +0530 Subject: [PATCH 347/351] updating hpe deployments to support tini and multiregion --- Dockerfile | 31 ++- deploy/app-brigade-manifest.json | 11 +- deploy/tracing-proxy-config-cm.yml | 416 ---------------------------- deploy/tracing-proxy-deployment.yml | 26 +- deploy/tracing-proxy-ns.yml | 5 - deploy/tracing-proxy-rules-cm.yml | 221 --------------- deploy/tracing-proxy-svc.yml | 1 - start.sh | 73 ++++- 8 files changed, 119 insertions(+), 665 deletions(-) delete mode 100644 deploy/tracing-proxy-config-cm.yml delete mode 100644 deploy/tracing-proxy-ns.yml delete mode 100644 deploy/tracing-proxy-rules-cm.yml diff --git a/Dockerfile b/Dockerfile index 4b51ac69cd..8d26e33c0c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,7 @@ RUN CGO_ENABLED=0 \ FROM --platform=$BUILDPLATFORM alpine:3.17 -RUN apk update && apk add --no-cache bash jq ca-certificates && update-ca-certificates +RUN apk update && apk add --no-cache bash jq ca-certificates curl && update-ca-certificates COPY --from=builder /app/config_complete.yaml /etc/tracing-proxy/config.yaml COPY --from=builder /app/rules_complete.yaml /etc/tracing-proxy/rules.yaml @@ -31,4 +31,33 @@ COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy COPY --from=builder /app/start.sh /usr/bin/start.sh +#Setting up tini +ENV TINI_URL_ARM="https://coreupdate.central.arubanetworks.com/packages/tini-arm64" +ENV TINI_ESUM_ARM="c3c8377b2b6bd62e8086be40ce967dd4a6910cec69b475992eff1800ec44b08e" +ENV TINI_ESUM_AMD="57a120ebc06d16b3fae6a60b6b16da5a20711db41f8934c2089dea0d3eaa4f70" +ENV TINI_URL_AMD="https://coreupdate.central.arubanetworks.com/packages/tini-amd64" + +RUN set -eux; \ + ARCH="$(uname -m)"; \ + case "${ARCH}" in \ + aarch64|arm64) \ + ESUM=$TINI_ESUM_ARM; \ + BINARY_URL=$TINI_URL_ARM; \ + ;; \ + amd64|x86_64) \ + ESUM=$TINI_ESUM_AMD; \ + BINARY_URL=$TINI_URL_AMD; \ + ;; \ + esac; \ + \ + curl -fL -o /usr/local/bin/tini "${BINARY_URL}"; \ + echo "${ESUM} /usr/local/bin/tini" | sha256sum -c -; \ + chmod +x /usr/local/bin/tini + +ENTRYPOINT ["tini", \ + "-F", "/config/data/infra_elasticache.json", \ + "-F", "/config/data/infra_clusterinfo.json", \ + "-F", "/config/data/config_tracing-proxy.json", \ + "--"] + CMD ["/usr/bin/start.sh"] \ No newline at end of file diff --git a/deploy/app-brigade-manifest.json b/deploy/app-brigade-manifest.json index e9d389c5f2..386f274deb 100644 --- a/deploy/app-brigade-manifest.json +++ b/deploy/app-brigade-manifest.json @@ -6,9 +6,6 @@ { "stagename": "deployment", "payload": [ - { - "filename": "tracing-proxy-ns.yml" - }, { "filename": "tracing-proxy-svc.yml" }, @@ -25,11 +22,13 @@ "name": "tracing-proxy-cm", "comment": "Please include configmap file paths in docker/Dockerfile as needed for tini", "infra": [ - "elasticache" + "elasticache", + "clusterinfo" ], "config": [ - "opsramp-tracing-proxy-creds" + "tracing-proxy" ] }, - "multi-region": "supported" + "multi-region": "supported", + "namespace": "opsramp-tracing-proxy" } \ No newline at end of file diff --git a/deploy/tracing-proxy-config-cm.yml b/deploy/tracing-proxy-config-cm.yml deleted file mode 100644 index 80055df7d7..0000000000 --- a/deploy/tracing-proxy-config-cm.yml +++ /dev/null @@ -1,416 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: opsramp-tracing-proxy-config - labels: - name: opsramp-tracing-proxy-config - namespace: opsramp-tracing-proxy -data: - config.yaml: |- - ######################## - ## Trace Proxy Config ## - ######################## - - # ListenAddr is the IP and port on which to listen for incoming events. Incoming - # traffic is expected to be HTTP, so if using SSL put something like nginx in - # front to do the TLS Termination. - ListenAddr: 0.0.0.0:8082 - - # GRPCListenAddr is the IP and port on which to listen for incoming events over - # gRPC. Incoming traffic is expected to be unencrypted, so if using SSL put something like nginx in - # front to do the TLS Termination. - GRPCListenAddr: 0.0.0.0:9090 - - # PeerListenAddr is the IP and port on which to listen for traffic being - # rerouted from a peer. Peer traffic is expected to be HTTP, so if using SSL - # put something like nginx in front to do the decryption. Must be different from - # ListenAddr - PeerListenAddr: 0.0.0.0:8083 - - GRPCPeerListenAddr: 0.0.0.0:8084 - - # CompressPeerCommunication determines whether to compress span data - # it forwards to peers. If it costs money to transmit data between different - # instances (e.g. they're spread across AWS availability zones), then you - # almost certainly want compression enabled to reduce your bill. The option to - # disable it is provided as an escape hatch for deployments that value lower CPU - # utilization over data transfer costs. - CompressPeerCommunication: true - - # OpsrampAPI is the URL for the upstream Opsramp API. - OpsrampAPI: "" - - # Dataset you want to use for sampling - Dataset: "ds" - - #Tls Options - UseTls: true - UseTlsInsecure: false - - # LoggingLevel valid options are "debug", "info", "error", and "panic". - LoggingLevel: error - - # SendDelay is a short timer that will be triggered when a trace is complete. - # Trace Proxy will wait for this duration before actually sending the trace. The - # reason for this short delay is to allow for small network delays or clock - # jitters to elapse and any final spans to arrive before actually sending the - # trace. This supports duration strings with supplied units. Set to 0 for - # immediate sends. - SendDelay: 2s - - # BatchTimeout dictates how frequently to send unfulfilled batches. By default - # this will use the DefaultBatchTimeout in libtrace as its value, which is 100ms. - # Eligible for live reload. - BatchTimeout: 1s - - # TraceTimeout is a long timer; it represents the outside boundary of how long - # to wait before sending an incomplete trace. Normally traces are sent when the - # root span arrives. Sometimes the root span never arrives (due to crashes or - # whatever), and this timer will send a trace even without having received the - # root span. If you have particularly long-lived traces you should increase this - # timer. This supports duration strings with supplied units. - TraceTimeout: 60s - - # MaxBatchSize is the number of events to be included in the batch for sending - MaxBatchSize: 500 - - # SendTicker is a short timer; it determines the duration to use to check for traces to send - SendTicker: 100ms - - # UpstreamBufferSize and PeerBufferSize control how large of an event queue to use - # when buffering events that will be forwarded to peers or the upstream API. - UpstreamBufferSize: 1000 - PeerBufferSize: 1000 - - # AddHostMetadataToTrace determines whether to add information about - # the host that tracing proxy is running on to the spans that it processes. - # If enabled, information about the host will be added to each span with the - # key 'meta.local_hostname'. - AddHostMetadataToTrace: false - - # AddAdditionalMetadata adds all the specified key value pairs to the traces and metrics - # the values must be a valid json key value pair like eg: {"key_1":"value_1", "key_2":"value_2"} - # max number of additional keys supported is 5, if the limit exceeds then we considered the first 5 - # based on sorted order of keys - # "app" label is mandatory - AddAdditionalMetadata: { "app": "default" } - - # EnvironmentCacheTTL is the amount of time a cache entry will live that associates - # an API key with an environment name. - # Cache misses lookup the environment name using OpsRampAPI config value. - # Default is 1 hour ("1h"). - EnvironmentCacheTTL: "1h" - - # QueryAuthToken, if specified, provides a token that must be specified with - # the header "X-OpsRamp-Tracing-Proxy-Query" in order for a /query request to succeed. - # These /query requests are intended for debugging OpsRamp-Tracing-Proxy installations and - # are not typically needed in normal operation. - # Can be specified in the environment as TRACING_PROXY_QUERY_AUTH_TOKEN. - # If left unspecified, the /query endpoints are inaccessible. - # QueryAuthToken: "some-random-value" - - # AddRuleReasonToTrace causes traces that are sent to OpsRamp to include a field which - # contains text indicating which rule was evaluated that caused the trace to be included. - AddRuleReasonToTrace: true - - # AdditionalErrorFields should be a list of span fields that should be included when logging - # errors that happen during ingestion of events (for example, the span too large error). - # This is primarily useful in trying to track down misbehaving senders in a large installation. - # The fields `dataset`, `apihost`, and `environment` are always included. - # If a field is not present in the span, it will not be present in the error log. - # Default is ["trace.span_id"]. - AdditionalErrorFields: - - trace.span_id - - # AddSpanCountToRoot adds a new metadata field, `meta.span_count` to root spans to indicate - # the number of child spans on the trace at the time the sampling decision was made. - # This value is available to the rules-based sampler, making it possible to write rules that - # are dependent upon the number of spans in the trace. - # Default is false. - AddSpanCountToRoot: false - - # CacheOverrunStrategy controls the cache management behavior under memory pressure. - # "resize" means that when a cache overrun occurs, the cache is shrunk and never grows again, - # which is generally not helpful unless it occurs because of a permanent change in traffic patterns. - # In the "impact" strategy, the items having the most impact on the cache size are - # ejected from the cache earlier than normal but the cache is not resized. - # In all cases, it only applies if MaxAlloc is nonzero. - # Default is "resize" for compatibility but "impact" is recommended for most installations. - CacheOverrunStrategy: "impact" - - ######################### - ## Retry Configuration ## - ######################### - RetryConfiguration: - # InitialInterval the time to wait after the first failure before retrying. - InitialInterval: 500ms - # RandomizationFactor is a random factor used to calculate next backoff - # Randomized interval = RetryInterval * (1 ± RandomizationFactor) - RandomizationFactor: 0.5 - # Multiplier is the value multiplied by the backoff interval bounds - Multiplier: 1.5 - # MaxInterval is the upper bound on backoff interval. Once this value is reached, the delay between - # consecutive retries will always be `MaxInterval`. - MaxInterval: 60s - # MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request. - # Once this value is reached, the data is discarded. - MaxElapsedTime: 15m - - ######################### - ## Proxy Configuration ## - ######################### - ProxyConfiguration: - # Protocol accepts http and https - Protocol: "http" - # Host takes the proxy server address - Host: "" - # Port takes the proxy server port - Port: 3128 - # UserName takes the proxy username - Username: "" - # Password takes the proxy password - Password: "" - - ################################## - ## Authentication Configuration ## - ################################## - AuthConfiguration: - # Endpoint - the APIServer address provided in OpsRamp Portal to which auth token request is to be made - Endpoint: "" - # Key - authentication key provided in OpsRamp Portal - Key: "" - # Secret - authentication Secret provided in OpsRamp Portal - Secret: "" - # TenantId - tenant/client id to which the traces are to be posted - TenantId: "" - - ############################ - ## Implementation Choices ## - ############################ - # Each of the config options below chooses an implementation of a Trace Proxy - # component to use. Depending on the choice, there may be more configuration - # required below in the section for that choice. Changing implementation choices - # requires a process restart. - # Collector describes which collector to use for collecting traces. The only - # current valid option is "InMemCollector". More can be added by adding - # implementations of the Collector interface. - Collector: "InMemCollector" - - # InMemCollector brings together all the settings that are relevant to - # collecting spans together to make traces. - InMemCollector: - - # The collection cache is used to collect all spans into a trace as well as - # remember the sampling decision for any spans that might come in after the - # trace has been marked "complete" (either by timing out or seeing the root - # span). The number of traces in the cache should be many multiples (100x to - # 1000x) of the total number of concurrently active traces (trace throughput * - # trace duration). - CacheCapacity: 1000 - - # MaxAlloc is optional. If set, it must be an integer >= 0. - # If set to a non-zero value, once per tick (see SendTicker) the collector - # will compare total allocated bytes to this value. If allocation is too - # high, cache capacity will be reduced and an error will be logged. - # Useful values for this setting are generally in the range of 75%-90% of - # available system memory. Using 80% is the recommended. - # This value should be set in according to the resources.limits.memory - # By default that setting is 4GB, and this is set to 85% of that limit - # 4 * 1024 * 1024 * 1024 * 0.80 = 3,435,973,837 - # MaxAlloc: 3435973836 - MaxAlloc: 0 - - ##################### - ## Peer Management ## - ##################### - - # Configure how OpsRamp-Tracing-Proxy peers are discovered and managed - PeerManagement: - # Strategy controls the way that traces are assigned to Trace Proxy nodes. - # The "legacy" strategy uses a simple algorithm that unfortunately causes - # 1/2 of the in-flight traces to be assigned to a different node whenever the - # number of nodes changes. - # The legacy strategy is deprecated and is intended to be removed in a future release. - # The "hash" strategy is strongly recommended, as only 1/N traces (where N is the - # number of nodes) are disrupted when the node count changes. - # Not eligible for live reload. - Strategy: "hash" - - ########################################################### - ###### File (Suitable only for VM based deployments ###### - ###### and single replica k8s deployments) ###### - ########################################################### - #Type: "file" - - # Peers is the list of all servers participating in this proxy cluster. Events - # will be sharded evenly across all peers based on the Trace ID. Values here - # should be the base URL used to access the peer, and should include scheme, - # hostname (or ip address) and port. All servers in the cluster should be in - # this list, including this host. - #Peers: [ - # "http://127.0.0.1:8084", #only grpc peer listener used - #] - ########################################################### - - ########################################################### - ###### Redis (Suitable for all types of deployments) ###### - ########################################################### - ## The type should always be redis when deployed to Kubernetes environments - Type: "redis" - - ## RedisHost is used to connect to redis for peer cluster membership management. - ## Further, if the environment variable 'TRACING_PROXY_REDIS_HOST' is set it takes - ## precedence and this value is ignored. - ## Not eligible for live reload. - ## RedisHost will default to the name used for the release or name overrides depending on what is used, - ## but can be overriden to a specific value. - RedisHost: "" - - ## RedisUsername is the username used to connect to redis for peer cluster membership management. - ## If the environment variable 'TRACING_PROXY_REDIS_USERNAME' is set it takes - ## precedence and this value is ignored. - ## Not eligible for live reload. - RedisUsername: "" - - ## RedisPassword is the password used to connect to redis for peer cluster membership management. - ## If the environment variable 'TRACING_PROXY_REDIS_PASSWORD' is set it takes - ## precedence and this value is ignored. - ## Not eligible for live reload. - RedisPassword: "" - - ## RedisPrefix is a string used as a prefix for the keys in redis while storing - ## the peer membership. It might be useful to set this in any situation where - ## multiple trace-proxy clusters or multiple applications want to share a single - ## Redis instance. It may not be blank. - RedisPrefix: "tracing-proxy" - - ## RedisDatabase is an integer from 0-15 indicating the database number to use - ## for the Redis instance storing the peer membership. It might be useful to set - ## this in any situation where multiple trace-proxy clusters or multiple - ## applications want to share a single Redis instance. - RedisDatabase: 0 - - ## UseTLS enables TLS when connecting to redis for peer cluster membership management, and sets the MinVersion to 1.2. - ## Not eligible for live reload. - UseTLS: - - ## UseTLSInsecure disables certificate checks - ## Not eligible for live reload. - UseTLSInsecure: true - - ## IdentifierInterfaceName is optional. - ## Due to the nature of DNS in Kubernetes, it is recommended to set this value to the 'eth0' interface name. - ## When configured the pod's IP will be used in the peer list - IdentifierInterfaceName: eth0 - - ## UseIPV6Identifier is optional. If using IdentifierInterfaceName, Trace Proxy will default to the first - ## IPv4 unicast address it finds for the specified interface. If UseIPV6Identifier is used, will use - ## the first IPV6 unicast address found. - UseIPV6Identifier: false - ########################################################### - - # LogrusLogger is a section of the config only used if you are using the - # LogrusLogger to send all logs to STDOUT using the logrus package. - LogrusLogger: - # LogFormatter specifies the log format. Accepted values are one of ["logfmt", "json"] - LogFormatter: 'json' - # LogOutput specifies where the logs are supposed to be written. Accpets one of ["stdout", "stderr"] - LogOutput: 'stdout' - - MetricsConfig: - # Enable specifies whether the metrics are supposed to be collected and exported to OpsRamp - Enable: true - - # ListenAddr determines the interface and port on which Prometheus will - # listen for requests for /metrics. Must be different from the main Trace Proxy - # listener. - ListenAddr: '0.0.0.0:2112' - - # OpsRampAPI is the URL for the upstream OpsRamp API. - OpsRampAPI: "" - - # ReportingInterval is the frequency specified in seconds at which - # the metrics are collected and sent to OpsRamp - ReportingInterval: 10 - - # MetricsList is a list of regular expressions which match the metric - # names. Keep the list as small as possible since too many regular expressions can lead to bad performance. - # Internally, all the items in the list are concatenated using '|' to make the computation faster. - MetricsList: [ ".*" ] - - GRPCServerParameters: - # MaxConnectionIdle is a duration for the amount of time after which an - # idle connection would be closed by sending a GoAway. Idleness duration is - # defined since the most recent time the number of outstanding RPCs became - # zero or the connection establishment. - # 0s sets duration to infinity which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L217-L219 - # MaxConnectionIdle: "1m" - - # MaxConnectionAge is a duration for the maximum amount of time a - # connection may exist before it will be closed by sending a GoAway. A - # random jitter of +/-10% will be added to MaxConnectionAge to spread out - # connection storms. - # 0s sets duration to infinity which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L220-L222 - # MaxConnectionAge: "0s" - - # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after - # which the connection will be forcibly closed. - # 0s sets duration to infinity which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L225-L227 - # MaxConnectionAgeGrace: "0s" - - # After a duration of this time if the server doesn't see any activity it - # pings the client to see if the transport is still alive. - # If set below 1s, a minimum value of 1s will be used instead. - # 0s sets duration to 2 hours which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L228-L230 - # Time: "10s" - - # After having pinged for keepalive check, the server waits for a duration - # of Timeout and if no activity is seen even after that the connection is - # closed. - # 0s sets duration to 20 seconds which is the default: - # https://github.com/grpc/grpc-go/blob/60a3a7e969c401ca16dbcd0108ad544fb35aa61c/internal/transport/http2_server.go#L231-L233 - # Timeout: "2s" - - ################################ - ## Sample Cache Configuration ## - ################################ - - # Sample Cache Configuration controls the sample cache used to retain information about trace - # status after the sampling decision has been made. - SampleCacheConfig: - # Type controls the type of sample cache used. - # "legacy" is a strategy where both keep and drop decisions are stored in a circular buffer that is - # 5x the size of the trace cache. This is tracing proxy's original sample cache strategy. - # "cuckoo" is a strategy where dropped traces are preserved in a "Cuckoo Filter", which can remember - # a much larger number of dropped traces, leaving capacity to retain a much larger number of kept traces. - # It is also more configurable. The cuckoo filter is recommended for most installations. - # Default is "legacy". - # Type: "cuckoo" - - # KeptSize controls the number of traces preserved in the cuckoo kept traces cache. - # tracing proxy keeps a record of each trace that was kept and sent to OpsRamp, along with some - # statistical information. This is most useful in cases where the trace was sent before sending - # the root span, so that the root span can be decorated with accurate metadata. - # Default is 10_000 traces (each trace in this cache consumes roughly 200 bytes). - # It Does not apply to the "legacy" type of cache. - # KeptSize: 10_000 - - # DroppedSize controls the size of the cuckoo dropped traces cache. - # This cache consumes 4-6 bytes per trace at a scale of millions of traces. - # Changing its size with live reload sets a future limit, but does not have an immediate effect. - # Default is 1_000_000 traces. - # It Does not apply to the "legacy" type of cache. - # DroppedSize: 1_000_000 - - # SizeCheckInterval controls the duration of how often the cuckoo cache re-evaluates - # the remaining capacity of its dropped traces cache and possibly cycles it. - # This cache is quite resilient so it doesn't need to happen very often, but the - # operation is also inexpensive. - # Default is 10 seconds. - # It Does not apply to the "legacy" type of cache. - # SizeCheckInterval: "10s" \ No newline at end of file diff --git a/deploy/tracing-proxy-deployment.yml b/deploy/tracing-proxy-deployment.yml index fbb30dc3c1..089c875c60 100644 --- a/deploy/tracing-proxy-deployment.yml +++ b/deploy/tracing-proxy-deployment.yml @@ -3,11 +3,12 @@ apiVersion: apps/v1 kind: Deployment metadata: name: opsramp-tracing-proxy - namespace: opsramp-tracing-proxy labels: app: opsramp-tracing-proxy + appid: "${appid}" + version: "${version}" spec: - replicas: 1 + replicas: 3 selector: matchLabels: app: opsramp-tracing-proxy @@ -15,11 +16,28 @@ spec: metadata: labels: app: opsramp-tracing-proxy + name: opsramp-tracing-proxy + version: "${version}" spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: name + operator: In + values: + - opsramp-tracing-proxy + topologyKey: kubernetes.io/hostname + weight: 100 + imagePullSecrets: + - name: quay.io + restartPolicy: Always containers: - name: opsramp-tracing-proxy - image: us-docker.pkg.dev/opsramp-registry/agent-images/trace-proxy - imagePullPolicy: Always + image: ${docker_image} + imagePullPolicy: IfNotPresent ports: - name: http containerPort: 8082 diff --git a/deploy/tracing-proxy-ns.yml b/deploy/tracing-proxy-ns.yml deleted file mode 100644 index ddbf0e09cd..0000000000 --- a/deploy/tracing-proxy-ns.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: opsramp-tracing-proxy \ No newline at end of file diff --git a/deploy/tracing-proxy-rules-cm.yml b/deploy/tracing-proxy-rules-cm.yml deleted file mode 100644 index b3fdf2942d..0000000000 --- a/deploy/tracing-proxy-rules-cm.yml +++ /dev/null @@ -1,221 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: opsramp-tracing-proxy-rules - labels: - name: opsramp-tracing-proxy-rules - namespace: opsramp-tracing-proxy -data: - rules.yaml: |- - ############################ - ## Sampling Rules Config ## - ############################ - - # DryRun - If enabled, marks traces that would be dropped given current sampling rules, - # and sends all traces regardless - DryRun: true - - # DryRunFieldName - the key to add to use to add to event data when using DryRun mode above, defaults to trace_proxy_kept - DryRunFieldName: trace_proxy_kept - - # DeterministicSampler is a section of the config for manipulating the - # Deterministic Sampler implementation. This is the simplest sampling algorithm - # - it is a static sample rate, choosing traces randomly to either keep or send - # (at the appropriate rate). It is not influenced by the contents of the trace. - Sampler: DeterministicSampler - - # SampleRate is the rate at which to sample. It indicates a ratio, where one - # sample trace is kept for every n traces seen. For example, a SampleRate of 30 - # will keep 1 out of every 30 traces. The choice on whether to keep any specific - # trace is random, so the rate is approximate. - # Eligible for live reload. - SampleRate: 1 - - #dataset1: - # - # # Note: If your dataset name contains a space, you will have to escape the dataset name - # # using single quotes, such as ['dataset 1'] - # - # # DynamicSampler is a section of the config for manipulating the simple Dynamic Sampler - # # implementation. This sampler collects the values of a number of fields from a - # # trace and uses them to form a key. This key is handed to the standard dynamic - # # sampler algorithm which generates a sample rate based on the frequency with - # # which that key has appeared in the previous ClearFrequencySec seconds.This - # # sampler uses the AvgSampleRate algorithm from - # # that package. - # Sampler: DynamicSampler - # - # # SampleRate is the goal rate at which to sample. It indicates a ratio, where - # # one sample trace is kept for every n traces seen. For example, a SampleRate of - # # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # # sampler, who assigns a sample rate for each trace based on the fields selected - # # from that trace. - # SampleRate: 2 - # - # # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. - # # The combination of values from all of these fields should reflect how interesting the trace is compared to - # # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for - # # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent - # # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of - # # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is - # # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a - # # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can - # # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling - # # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. - # # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of - # # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. - # # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore - # # interesting traces, like traces that experienced a `500`, might not be sampled. - # # Field names may come from any span in the trace. - # FieldList: - # - "" - # - # # UseTraceLength will add the number of spans in the trace in to the dynamic - # # sampler as part of the key. The number of spans is exact, so if there are - # # normally small variations in trace length you may want to leave this off. If - # # traces are consistent lengths and changes in trace length is a useful - # # indicator of traces you'd like to see in OpsRamp, set this to true. - # UseTraceLength: true - # - # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # # to the root span of the trace containing the key used by the sampler to decide - # # the sample rate. This can be helpful in understanding why the sampler is - # # making certain decisions about sample rate and help you understand how to - # # better choose the sample rate key (aka the FieldList setting above) to use. - # AddSampleRateKeyToTrace: true - # - # # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # # when adding the sample rate key to the trace. This setting is only used when - # # AddSampleRateKeyToTrace is true. - # AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key - # - # # ClearFrequencySec is the name of the field the sampler will use to determine - # # the period over which it will calculate the sample rate. This setting defaults - # # to 30. - # ClearFrequencySec: 60 - #dataset2: - # - # # EMADynamicSampler is a section of the config for manipulating the Exponential - # # Moving Average (EMA) Dynamic Sampler implementation. Like the simple DynamicSampler, - # # it attempts to average a given sample rate, weighting rare traffic and frequent - # # traffic differently so as to end up with the correct average. - # # - # # EMADynamicSampler is an improvement upon the simple DynamicSampler and is recommended - # # for most use cases. Based on the DynamicSampler implementation, EMADynamicSampler differs - # # in that rather than compute rate based on a periodic sample of traffic, it maintains an Exponential - # # Moving Average of counts seen per key, and adjusts this average at regular intervals. - # # The weight applied to more recent intervals is defined by `weight`, a number between - # # (0, 1) - larger values weight the average more toward recent observations. In other words, - # # a larger weight will cause sample rates more quickly adapt to traffic patterns, - # # while a smaller weight will result in sample rates that are less sensitive to bursts or drops - # # in traffic and thus more consistent over time. - # # - # # Keys that are not found in the EMA will always have a sample - # # rate of 1. Keys that occur more frequently will be sampled on a logarithmic - # # curve. In other words, every key will be represented at least once in any - # # given window and more frequent keys will have their sample rate - # # increased proportionally to wind up with the goal sample rate. - # Sampler: EMADynamicSampler - # - # # GoalSampleRate is the goal rate at which to sample. It indicates a ratio, where - # # one sample trace is kept for every n traces seen. For example, a SampleRate of - # # 30 will keep 1 out of every 30 traces. This rate is handed to the dynamic - # # sampler, who assigns a sample rate for each trace based on the fields selected - # # from that trace. - # GoalSampleRate: 2 - # - # # FieldList is a list of all the field names to use to form the key that will be handed to the dynamic sampler. - # # The combination of values from all of these fields should reflect how interesting the trace is compared to - # # another. A good field selection has consistent values for high-frequency, boring traffic, and unique values for - # # outliers and interesting traffic. Including an error field (or something like HTTP status code) is an excellent - # # choice. Using fields with very high cardinality (like `k8s.pod.id`), is a bad choice. If the combination of - # # fields essentially makes them unique, the dynamic sampler will sample everything. If the combination of fields is - # # not unique enough, you will not be guaranteed samples of the most interesting traces. As an example, consider a - # # combination of HTTP endpoint (high-frequency and boring), HTTP method, and status code (normally boring but can - # # become interesting when indicating an error) as a good set of fields since it will allowing proper sampling - # # of all endpoints under normal traffic and call out when there is failing traffic to any endpoint. - # # For example, in contrast, consider a combination of HTTP endpoint, status code, and pod id as a bad set of - # # fields, since it would result in keys that are all unique, and therefore results in sampling 100% of traces. - # # Using only the HTTP endpoint field would be a **bad** choice, as it is not unique enough and therefore - # # interesting traces, like traces that experienced a `500`, might not be sampled. - # # Field names may come from any span in the trace. - # FieldList: [] - # - # # UseTraceLength will add the number of spans in the trace in to the dynamic - # # sampler as part of the key. The number of spans is exact, so if there are - # # normally small variations in trace length you may want to leave this off. If - # # traces are consistent lengths and changes in trace length is a useful - # # indicator of traces you'd like to see in OpsRamp, set this to true. - # UseTraceLength: true - # - # # AddSampleRateKeyToTrace when this is set to true, the sampler will add a field - # # to the root span of the trace containing the key used by the sampler to decide - # # the sample rate. This can be helpful in understanding why the sampler is - # # making certain decisions about sample rate and help you understand how to - # # better choose the sample rate key (aka the FieldList setting above) to use. - # AddSampleRateKeyToTrace: true - # - # # AddSampleRateKeyToTraceField is the name of the field the sampler will use - # # when adding the sample rate key to the trace. This setting is only used when - # # AddSampleRateKeyToTrace is true. - # AddSampleRateKeyToTraceField: meta.tracing-proxy.dynsampler_key - # - # # AdjustmentInterval defines how often (in seconds) we adjust the moving average from - # # recent observations. Default 15s - # AdjustmentInterval: 15 - # - # # Weight is a value between (0, 1) indicating the weighting factor used to adjust - # # the EMA. With larger values, newer data will influence the average more, and older - # # values will be factored out more quickly. In mathematical literature concerning EMA, - # # this is referred to as the `alpha` constant. - # # Default is 0.5 - # Weight: 0.5 - # - # # MaxKeys, if greater than 0, limits the number of distinct keys tracked in EMA. - # # Once MaxKeys is reached, new keys will not be included in the sample rate map, but - # # existing keys will continue to be be counted. You can use this to keep the sample rate - # # map size under control. - # MaxKeys: 0 - # - # # AgeOutValue indicates the threshold for removing keys from the EMA. The EMA of any key - # # will approach 0 if it is not repeatedly observed, but will never truly reach it, so we have to - # # decide what constitutes "zero". Keys with averages below this threshold will be removed - # # from the EMA. Default is the same as Weight, as this prevents a key with the smallest - # # integer value (1) from being aged out immediately. This value should generally be <= Weight, - # # unless you have very specific reasons to set it higher. - # AgeOutValue: 0.5 - # - # # BurstMultiple, if set, is multiplied by the sum of the running average of counts to define - # # the burst detection threshold. If total counts observed for a given interval exceed the threshold - # # EMA is updated immediately, rather than waiting on the AdjustmentInterval. - # # Defaults to 2; negative value disables. With a default of 2, if your traffic suddenly doubles, - # # burst detection will kick in. - # BurstMultiple: 2 - # - # # BurstDetectionDelay indicates the number of intervals to run after Start is called before - # # burst detection kicks in. - # # Defaults to 3 - # BurstDetectionDelay: 3 - #dataset3: - # Sampler: DeterministicSampler - # SampleRate: 10 - #dataset4: - # Sampler: RulesBasedSampler - # CheckNestedFields: false - # rule: - # # Rule name - # - name: "" - # # Drop Condition (examples: true, false) - # drop: - # condition: - # # Field Name (example: status_code) - # - field: "" - # # Operator Value (example: =) - # operator: "" - # # Field Value (example: 500) - # value: "" - #dataset5: - # Sampler: TotalThroughputSampler - # GoalThroughputPerSec: 100 - # FieldList: '' diff --git a/deploy/tracing-proxy-svc.yml b/deploy/tracing-proxy-svc.yml index c6427da19e..9a5e241500 100644 --- a/deploy/tracing-proxy-svc.yml +++ b/deploy/tracing-proxy-svc.yml @@ -3,7 +3,6 @@ apiVersion: v1 kind: Service metadata: name: opsramp-tracing-proxy - namespace: opsramp-tracing-proxy spec: selector: app: opsramp-tracing-proxy diff --git a/start.sh b/start.sh index f5194c9525..ef179b81a4 100755 --- a/start.sh +++ b/start.sh @@ -1,12 +1,13 @@ #!/bin/bash +CLUSTERINFO_PATH='/config/data/infra_clusterinfo.json' ELASTICACHE_PATH='/config/data/infra_elasticache.json' # Sample Format for ${ELASTICACHE_PATH} # { # "elasticache": { -# "host": "master.testing-non-cluster.89rows.usw2.cache.amazonaws.com", -# "host_ro": "replica.testing-non-cluster.89rows.usw2.cache.amazonaws.com", +# "host": "some_url", +# "host_ro": "some_url", # "port": 6379, # "username": "test_user", # "password": "xxxxxx", @@ -14,12 +15,35 @@ ELASTICACHE_PATH='/config/data/infra_elasticache.json' # "cluster_mode": "false" # } # } +# Sample Format for ${ELASTICACHE_PATH} in case of multi region +# { +# "elasticache": [{ +# "us-west-2": { +# "host": "some_url", +# "host_ro": "some_url", +# "port": 6379, +# "username": "", +# "password": "xxxxxx", +# "tls_mode": true, +# "cluster_mode": "false" +# }, +# "us-east-2": { +# "host": "some_url", +# "host_ro": "some_url", +# "port": 6379, +# "username": "", +# "password": "xxxxxx", +# "tls_mode": true, +# "cluster_mode": "false" +# } +# }] +# } -OPSRAMP_CREDS_PATH='/config/data/config_opsramp-tracing-proxy-creds.json' +OPSRAMP_CREDS_PATH='/config/data/config_tracing-proxy.json' # Sample Format for ${OPSRAMP_CREDS_PATH} # { -# "opsramp-tracing-proxy-creds": { +# "tracing-proxy": { # "traces_api": "test.opsramp.net", # "metrics_api": "test.opsramp.net", # "auth_api": "test.opsramp.net", @@ -37,11 +61,38 @@ TRACE_PROXY_RULES='/etc/tracing-proxy/final_rules.yaml' cp /etc/tracing-proxy/config.yaml ${TRACE_PROXY_CONFIG} cp /etc/tracing-proxy/rules.yaml ${TRACE_PROXY_RULES} +if [ -r ${CLUSTERINFO_PATH} ]; then + + CURRENT_REGION=$(jq <${CLUSTERINFO_PATH} -r .clusterinfo.CURRENT_REGION) + READ_WRITE_REGION=$(jq <${CLUSTERINFO_PATH} -r .clusterinfo.READ_WRITE_REGION) + + while [ "${CURRENT_REGION}" != "${READ_WRITE_REGION}" ]; do sleep 30; done +fi + if [ -r ${ELASTICACHE_PATH} ]; then # check if the configuration is a object or array TYPE=$(jq <${ELASTICACHE_PATH} -r .elasticache | jq 'if type=="array" then true else false end') if [ "${TYPE}" = true ]; then - echo "implement me" + + if [ -r ${CLUSTERINFO_PATH} ]; then + + CURRENT_REGION=$(jq <${CLUSTERINFO_PATH} -r .clusterinfo.CURRENT_REGION) + + CREDS=$(jq <${ELASTICACHE_PATH} -r .elasticache[0].\""${CURRENT_REGION}"\") + + REDIS_HOST=$(echo "${CREDS}" | jq -r '(.host)+":"+(.port|tostring)') + REDIS_USERNAME=$(echo "${CREDS}" | jq -r .username) + REDIS_PASSWORD=$(echo "${CREDS}" | jq -r .password) + REDIS_TLS_MODE=$(echo "${CREDS}" | jq -r .tls_mode | tr '[:upper:]' '[:lower:]') + + sed -i "s//${REDIS_HOST}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${REDIS_USERNAME}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${REDIS_PASSWORD}/g" ${TRACE_PROXY_CONFIG} + sed -i "s//${REDIS_TLS_MODE}/g" ${TRACE_PROXY_CONFIG} + + fi + + else REDIS_HOST=$(jq <${ELASTICACHE_PATH} -r '(.elasticache.host)+":"+(.elasticache.port|tostring)') REDIS_USERNAME=$(jq <${ELASTICACHE_PATH} -r .elasticache.username) @@ -57,12 +108,12 @@ fi if [ -r ${OPSRAMP_CREDS_PATH} ]; then - TRACES_API=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.traces_api) - METRICS_API=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.metrics_api) - AUTH_API=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.auth_api) - KEY=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.key) - SECRET=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.secret) - TENANT_ID=$(jq <${OPSRAMP_CREDS_PATH} -r .opsramp-tracing-proxy-creds.tenant_id) + TRACES_API=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".traces_api') + METRICS_API=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".metrics_api') + AUTH_API=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".auth_api') + KEY=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".key') + SECRET=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".secret') + TENANT_ID=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".tenant_id') sed -i "s//${TRACES_API}/g" ${TRACE_PROXY_CONFIG} sed -i "s//${METRICS_API}/g" ${TRACE_PROXY_CONFIG} From 6cbdae9a87ff9bddcb8f73a535575c4bc5e9b807 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Thu, 17 Aug 2023 12:40:40 +0530 Subject: [PATCH 348/351] Fix in Dockerfile --- Dockerfile | 55 +++++++++++++++++++++++++++--------------------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8d26e33c0c..1b8b7ddeed 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,32 @@ FROM --platform=$BUILDPLATFORM golang:alpine as builder +ARG TARGETOS +ARG TARGETARCH -RUN apk update && apk add --no-cache git bash ca-certificates && update-ca-certificates +RUN apk update && apk add --no-cache git bash ca-certificates curl && update-ca-certificates -ARG BUILD_ID="15.0.0" +#Setting up tini +ENV TINI_URL_ARM="https://coreupdate.central.arubanetworks.com/packages/tini-arm64" +ENV TINI_ESUM_ARM="c3c8377b2b6bd62e8086be40ce967dd4a6910cec69b475992eff1800ec44b08e" +ENV TINI_ESUM_AMD="57a120ebc06d16b3fae6a60b6b16da5a20711db41f8934c2089dea0d3eaa4f70" +ENV TINI_URL_AMD="https://coreupdate.central.arubanetworks.com/packages/tini-amd64" +RUN set -eux; \ + case "${TARGETARCH}" in \ + aarch64|arm64) \ + ESUM=$TINI_ESUM_ARM; \ + BINARY_URL=$TINI_URL_ARM; \ + ;; \ + amd64|x86_64) \ + ESUM=$TINI_ESUM_AMD; \ + BINARY_URL=$TINI_URL_AMD; \ + ;; \ + esac; \ + \ + curl -fL -o /usr/local/bin/tini "${BINARY_URL}"; \ + echo "${ESUM} /usr/local/bin/tini" | sha256sum -c -; \ + chmod +x /usr/local/bin/tini + +ARG BUILD_ID="15.0.0" WORKDIR /app ADD go.mod go.sum ./ @@ -20,40 +43,18 @@ RUN CGO_ENABLED=0 \ -o tracing-proxy \ ./cmd/tracing-proxy -FROM --platform=$BUILDPLATFORM alpine:3.17 +FROM --platform=$BUILDPLATFORM alpine:3.18 -RUN apk update && apk add --no-cache bash jq ca-certificates curl && update-ca-certificates +RUN apk update && apk add --no-cache bash jq ca-certificates && update-ca-certificates COPY --from=builder /app/config_complete.yaml /etc/tracing-proxy/config.yaml COPY --from=builder /app/rules_complete.yaml /etc/tracing-proxy/rules.yaml COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy +COPY --from=builder /usr/local/bin/tini /usr/local/bin/tini COPY --from=builder /app/start.sh /usr/bin/start.sh -#Setting up tini -ENV TINI_URL_ARM="https://coreupdate.central.arubanetworks.com/packages/tini-arm64" -ENV TINI_ESUM_ARM="c3c8377b2b6bd62e8086be40ce967dd4a6910cec69b475992eff1800ec44b08e" -ENV TINI_ESUM_AMD="57a120ebc06d16b3fae6a60b6b16da5a20711db41f8934c2089dea0d3eaa4f70" -ENV TINI_URL_AMD="https://coreupdate.central.arubanetworks.com/packages/tini-amd64" - -RUN set -eux; \ - ARCH="$(uname -m)"; \ - case "${ARCH}" in \ - aarch64|arm64) \ - ESUM=$TINI_ESUM_ARM; \ - BINARY_URL=$TINI_URL_ARM; \ - ;; \ - amd64|x86_64) \ - ESUM=$TINI_ESUM_AMD; \ - BINARY_URL=$TINI_URL_AMD; \ - ;; \ - esac; \ - \ - curl -fL -o /usr/local/bin/tini "${BINARY_URL}"; \ - echo "${ESUM} /usr/local/bin/tini" | sha256sum -c -; \ - chmod +x /usr/local/bin/tini - ENTRYPOINT ["tini", \ "-F", "/config/data/infra_elasticache.json", \ "-F", "/config/data/infra_clusterinfo.json", \ From 51c6db3de005ad30c3dbc3574707c68612798a10 Mon Sep 17 00:00:00 2001 From: "imran.syed" Date: Thu, 17 Aug 2023 12:55:11 +0530 Subject: [PATCH 349/351] Refined dockerfile --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1b8b7ddeed..53a6559350 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM --platform=$BUILDPLATFORM golang:alpine as builder ARG TARGETOS ARG TARGETARCH -RUN apk update && apk add --no-cache git bash ca-certificates curl && update-ca-certificates +RUN apk update && apk add --no-cache git ca-certificates curl && update-ca-certificates #Setting up tini ENV TINI_URL_ARM="https://coreupdate.central.arubanetworks.com/packages/tini-arm64" @@ -37,8 +37,8 @@ RUN go mod verify ADD . . RUN CGO_ENABLED=0 \ - GOOS=linux \ - GOARCH=amd64 \ + GOOS=${TARGETOS} \ + GOARCH=${TARGETARCH} \ go build -ldflags "-X main.BuildID=${BUILD_ID}" \ -o tracing-proxy \ ./cmd/tracing-proxy From 61d48b3995e3bb66458a8d469e98a242c8965a24 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Thu, 17 Aug 2023 21:49:38 +0530 Subject: [PATCH 350/351] updating start.sh and dockerfile --- Dockerfile | 10 +++++----- start.sh | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 53a6559350..23ca560256 100644 --- a/Dockerfile +++ b/Dockerfile @@ -55,10 +55,10 @@ COPY --from=builder /usr/local/bin/tini /usr/local/bin/tini COPY --from=builder /app/start.sh /usr/bin/start.sh -ENTRYPOINT ["tini", \ - "-F", "/config/data/infra_elasticache.json", \ - "-F", "/config/data/infra_clusterinfo.json", \ - "-F", "/config/data/config_tracing-proxy.json", \ - "--"] +#ENTRYPOINT ["tini", \ +# "-F", "/config/data/infra_elasticache.json", \ +# "-F", "/config/data/infra_clusterinfo.json", \ +# "-F", "/config/data/config_tracing-proxy.json", \ +# "--"] CMD ["/usr/bin/start.sh"] \ No newline at end of file diff --git a/start.sh b/start.sh index ef179b81a4..9dc43a9b7b 100755 --- a/start.sh +++ b/start.sh @@ -115,9 +115,9 @@ if [ -r ${OPSRAMP_CREDS_PATH} ]; then SECRET=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".secret') TENANT_ID=$(jq <${OPSRAMP_CREDS_PATH} -r '."tracing-proxy".tenant_id') - sed -i "s//${TRACES_API}/g" ${TRACE_PROXY_CONFIG} - sed -i "s//${METRICS_API}/g" ${TRACE_PROXY_CONFIG} - sed -i "s//${AUTH_API}/g" ${TRACE_PROXY_CONFIG} + sed -i "s**${TRACES_API}*g" ${TRACE_PROXY_CONFIG} + sed -i "s**${METRICS_API}*g" ${TRACE_PROXY_CONFIG} + sed -i "s**${AUTH_API}*g" ${TRACE_PROXY_CONFIG} sed -i "s//${KEY}/g" ${TRACE_PROXY_CONFIG} sed -i "s//${SECRET}/g" ${TRACE_PROXY_CONFIG} sed -i "s//${TENANT_ID}/g" ${TRACE_PROXY_CONFIG} From 8a4b34b280a671a8e5f5dcc6f55b98e30ffbac57 Mon Sep 17 00:00:00 2001 From: "lokesh.balla" Date: Mon, 21 Aug 2023 09:42:17 +0530 Subject: [PATCH 351/351] adding debian docker file with tini --- Dockerfile_debian | 64 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 Dockerfile_debian diff --git a/Dockerfile_debian b/Dockerfile_debian new file mode 100644 index 0000000000..b47e5d07dc --- /dev/null +++ b/Dockerfile_debian @@ -0,0 +1,64 @@ +FROM --platform=$BUILDPLATFORM golang:1.20.7-bullseye as builder +ARG TARGETOS +ARG TARGETARCH + +RUN apt update -y && apt install git ca-certificates curl -y && update-ca-certificates + +#Setting up tini +ENV TINI_URL_ARM="https://coreupdate.central.arubanetworks.com/packages/tini-arm64" +ENV TINI_ESUM_ARM="c3c8377b2b6bd62e8086be40ce967dd4a6910cec69b475992eff1800ec44b08e" +ENV TINI_ESUM_AMD="57a120ebc06d16b3fae6a60b6b16da5a20711db41f8934c2089dea0d3eaa4f70" +ENV TINI_URL_AMD="https://coreupdate.central.arubanetworks.com/packages/tini-amd64" + +RUN set -eux; \ + case "${TARGETARCH}" in \ + aarch64|arm64) \ + ESUM=$TINI_ESUM_ARM; \ + BINARY_URL=$TINI_URL_ARM; \ + ;; \ + amd64|x86_64) \ + ESUM=$TINI_ESUM_AMD; \ + BINARY_URL=$TINI_URL_AMD; \ + ;; \ + esac; \ + \ + curl -fL -o /usr/local/bin/tini "${BINARY_URL}"; \ + echo "${ESUM} /usr/local/bin/tini" | sha256sum -c -; \ + chmod +x /usr/local/bin/tini + +ARG BUILD_ID="15.0.0" +WORKDIR /app + +ADD go.mod go.sum ./ + +RUN go mod download +RUN go mod verify + +ADD . . + +RUN CGO_ENABLED=0 \ + GOOS=${TARGETOS} \ + GOARCH=${TARGETARCH} \ + go build -ldflags "-X main.BuildID=${BUILD_ID}" \ + -o tracing-proxy \ + ./cmd/tracing-proxy + +FROM --platform=$BUILDPLATFORM debian:bullseye-slim + +RUN apt update -y && apt install bash jq ca-certificates -y && update-ca-certificates + +COPY --from=builder /app/config_complete.yaml /etc/tracing-proxy/config.yaml +COPY --from=builder /app/rules_complete.yaml /etc/tracing-proxy/rules.yaml + +COPY --from=builder /app/tracing-proxy /usr/bin/tracing-proxy +COPY --from=builder /usr/local/bin/tini /usr/local/bin/tini + +COPY --from=builder /app/start.sh /usr/bin/start.sh + +ENTRYPOINT ["tini", \ + "-F", "/config/data/infra_elasticache.json", \ + "-F", "/config/data/infra_clusterinfo.json", \ + "-F", "/config/data/config_tracing-proxy.json", \ + "--"] + +CMD ["/usr/bin/start.sh"] \ No newline at end of file